Bug
[В начало]
Ошибка # 179
Показать/спрятать трассу ошибок Error trace
{ 20 typedef unsigned char __u8; 23 typedef unsigned short __u16; 25 typedef int __s32; 26 typedef unsigned int __u32; 29 typedef long long __s64; 30 typedef unsigned long long __u64; 15 typedef signed char s8; 16 typedef unsigned char u8; 18 typedef short s16; 19 typedef unsigned short u16; 21 typedef int s32; 22 typedef unsigned int u32; 24 typedef long long s64; 25 typedef unsigned long long u64; 14 typedef long __kernel_long_t; 15 typedef unsigned long __kernel_ulong_t; 27 typedef int __kernel_pid_t; 48 typedef unsigned int __kernel_uid32_t; 49 typedef unsigned int __kernel_gid32_t; 71 typedef __kernel_ulong_t __kernel_size_t; 72 typedef __kernel_long_t __kernel_ssize_t; 87 typedef long long __kernel_loff_t; 88 typedef __kernel_long_t __kernel_time_t; 89 typedef __kernel_long_t __kernel_clock_t; 90 typedef int __kernel_timer_t; 91 typedef int __kernel_clockid_t; 291 struct kernel_symbol { unsigned long value; const char *name; } ; 34 struct module ; 12 typedef __u32 __kernel_dev_t; 15 typedef __kernel_dev_t dev_t; 18 typedef unsigned short umode_t; 21 typedef __kernel_pid_t pid_t; 26 typedef __kernel_clockid_t clockid_t; 29 typedef _Bool bool; 31 typedef __kernel_uid32_t uid_t; 32 typedef __kernel_gid32_t gid_t; 45 typedef __kernel_loff_t loff_t; 54 typedef __kernel_size_t size_t; 59 typedef __kernel_ssize_t ssize_t; 69 typedef __kernel_time_t time_t; 91 typedef unsigned int uint; 102 typedef __s32 int32_t; 108 typedef __u32 uint32_t; 133 typedef unsigned long sector_t; 134 typedef unsigned long blkcnt_t; 152 typedef u64 dma_addr_t; 157 typedef unsigned int gfp_t; 158 typedef unsigned int fmode_t; 161 typedef u64 phys_addr_t; 166 typedef phys_addr_t resource_size_t; 176 struct __anonstruct_atomic_t_6 { int counter; } ; 176 typedef struct __anonstruct_atomic_t_6 atomic_t; 181 struct __anonstruct_atomic64_t_7 { long counter; } ; 181 typedef struct __anonstruct_atomic64_t_7 atomic64_t; 182 struct list_head { struct list_head *next; struct list_head *prev; } ; 187 struct hlist_node ; 187 struct hlist_head { struct hlist_node *first; } ; 191 struct hlist_node { struct hlist_node *next; struct hlist_node **pprev; } ; 202 struct callback_head { struct callback_head *next; void (*func)(struct callback_head *); } ; 115 typedef void (*ctor_fn_t)(); 283 struct _ddebug { const char *modname; const char *function; const char *filename; const char *format; unsigned int lineno; unsigned char flags; } ; 58 struct device ; 474 struct file_operations ; 486 struct completion ; 487 struct pt_regs ; 27 union __anonunion___u_9 { struct list_head *__val; char __c[1U]; } ; 65 union __anonunion___u_11 { struct list_head *__val; char __c[1U]; } ; 105 union __anonunion___u_13 { struct list_head *__val; char __c[1U]; } ; 202 union __anonunion___u_15 { struct list_head *__val; char __c[1U]; } ; 546 struct bug_entry { int bug_addr_disp; int file_disp; unsigned short line; unsigned short flags; } ; 131 struct timespec ; 132 struct compat_timespec ; 133 struct pollfd ; 134 struct __anonstruct_futex_27 { u32 *uaddr; u32 val; u32 flags; u32 bitset; u64 time; u32 *uaddr2; } ; 134 struct __anonstruct_nanosleep_28 { clockid_t clockid; struct timespec *rmtp; struct compat_timespec *compat_rmtp; u64 expires; } ; 134 struct __anonstruct_poll_29 { struct pollfd *ufds; int nfds; int has_timeout; unsigned long tv_sec; unsigned long tv_nsec; } ; 134 union __anonunion____missing_field_name_26 { struct __anonstruct_futex_27 futex; struct __anonstruct_nanosleep_28 nanosleep; struct __anonstruct_poll_29 poll; } ; 134 struct restart_block { long int (*fn)(struct restart_block *); union __anonunion____missing_field_name_26 __annonCompField4; } ; 50 struct task_struct ; 39 struct page ; 14 struct __anonstruct_pfn_t_30 { u64 val; } ; 14 typedef struct __anonstruct_pfn_t_30 pfn_t; 26 struct mm_struct ; 288 struct pt_regs { unsigned long r15; unsigned long r14; unsigned long r13; unsigned long r12; unsigned long bp; unsigned long bx; unsigned long r11; unsigned long r10; unsigned long r9; unsigned long r8; unsigned long ax; unsigned long cx; unsigned long dx; unsigned long si; unsigned long di; unsigned long orig_ax; unsigned long ip; unsigned long cs; unsigned long flags; unsigned long sp; unsigned long ss; } ; 66 struct __anonstruct____missing_field_name_32 { unsigned int a; unsigned int b; } ; 66 struct __anonstruct____missing_field_name_33 { u16 limit0; u16 base0; unsigned char base1; unsigned char type; unsigned char s; unsigned char dpl; unsigned char p; unsigned char limit; unsigned char avl; unsigned char l; unsigned char d; unsigned char g; unsigned char base2; } ; 66 union __anonunion____missing_field_name_31 { struct __anonstruct____missing_field_name_32 __annonCompField5; struct __anonstruct____missing_field_name_33 __annonCompField6; } ; 66 struct desc_struct { union __anonunion____missing_field_name_31 __annonCompField7; } ; 13 typedef unsigned long pteval_t; 14 typedef unsigned long pmdval_t; 15 typedef unsigned long pudval_t; 16 typedef unsigned long pgdval_t; 17 typedef unsigned long pgprotval_t; 19 struct __anonstruct_pte_t_34 { pteval_t pte; } ; 19 typedef struct __anonstruct_pte_t_34 pte_t; 21 struct pgprot { pgprotval_t pgprot; } ; 256 typedef struct pgprot pgprot_t; 258 struct __anonstruct_pgd_t_35 { pgdval_t pgd; } ; 258 typedef struct __anonstruct_pgd_t_35 pgd_t; 276 struct __anonstruct_pud_t_36 { pudval_t pud; } ; 276 typedef struct __anonstruct_pud_t_36 pud_t; 297 struct __anonstruct_pmd_t_37 { pmdval_t pmd; } ; 297 typedef struct __anonstruct_pmd_t_37 pmd_t; 423 typedef struct page *pgtable_t; 434 struct file ; 445 struct seq_file ; 481 struct thread_struct ; 483 struct cpumask ; 20 struct qspinlock { atomic_t val; } ; 33 typedef struct qspinlock arch_spinlock_t; 34 struct qrwlock { atomic_t cnts; arch_spinlock_t wait_lock; } ; 14 typedef struct qrwlock arch_rwlock_t; 247 struct math_emu_info { long ___orig_eip; struct pt_regs *regs; } ; 83 struct static_key { atomic_t enabled; } ; 26 union __anonunion___u_42 { int __val; char __c[1U]; } ; 38 union __anonunion___u_44 { int __val; char __c[1U]; } ; 23 typedef atomic64_t atomic_long_t; 359 struct cpumask { unsigned long bits[128U]; } ; 15 typedef struct cpumask cpumask_t; 657 typedef struct cpumask *cpumask_var_t; 22 struct tracepoint_func { void *func; void *data; int prio; } ; 28 struct tracepoint { const char *name; struct static_key key; int (*regfunc)(); void (*unregfunc)(); struct tracepoint_func *funcs; } ; 233 struct fregs_state { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u32 status; } ; 26 struct __anonstruct____missing_field_name_61 { u64 rip; u64 rdp; } ; 26 struct __anonstruct____missing_field_name_62 { u32 fip; u32 fcs; u32 foo; u32 fos; } ; 26 union __anonunion____missing_field_name_60 { struct __anonstruct____missing_field_name_61 __annonCompField13; struct __anonstruct____missing_field_name_62 __annonCompField14; } ; 26 union __anonunion____missing_field_name_63 { u32 padding1[12U]; u32 sw_reserved[12U]; } ; 26 struct fxregs_state { u16 cwd; u16 swd; u16 twd; u16 fop; union __anonunion____missing_field_name_60 __annonCompField15; u32 mxcsr; u32 mxcsr_mask; u32 st_space[32U]; u32 xmm_space[64U]; u32 padding[12U]; union __anonunion____missing_field_name_63 __annonCompField16; } ; 66 struct swregs_state { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u8 ftop; u8 changed; u8 lookahead; u8 no_update; u8 rm; u8 alimit; struct math_emu_info *info; u32 entry_eip; } ; 227 struct xstate_header { u64 xfeatures; u64 xcomp_bv; u64 reserved[6U]; } ; 233 struct xregs_state { struct fxregs_state i387; struct xstate_header header; u8 extended_state_area[0U]; } ; 254 union fpregs_state { struct fregs_state fsave; struct fxregs_state fxsave; struct swregs_state soft; struct xregs_state xsave; u8 __padding[4096U]; } ; 271 struct fpu { unsigned int last_cpu; unsigned char fpstate_active; unsigned char fpregs_active; union fpregs_state state; } ; 181 struct seq_operations ; 415 struct perf_event ; 420 struct __anonstruct_mm_segment_t_75 { unsigned long seg; } ; 420 typedef struct __anonstruct_mm_segment_t_75 mm_segment_t; 421 struct thread_struct { struct desc_struct tls_array[3U]; unsigned long sp0; unsigned long sp; unsigned short es; unsigned short ds; unsigned short fsindex; unsigned short gsindex; u32 status; unsigned long fsbase; unsigned long gsbase; struct perf_event *ptrace_bps[4U]; unsigned long debugreg6; unsigned long ptrace_dr7; unsigned long cr2; unsigned long trap_nr; unsigned long error_code; unsigned long *io_bitmap_ptr; unsigned long iopl; unsigned int io_bitmap_max; mm_segment_t addr_limit; unsigned char sig_on_uaccess_err; unsigned char uaccess_err; struct fpu fpu; } ; 48 struct thread_info { unsigned long flags; } ; 33 struct lockdep_map ; 55 struct stack_trace { unsigned int nr_entries; unsigned int max_entries; unsigned long *entries; int skip; } ; 28 struct lockdep_subclass_key { char __one_byte; } ; 53 struct lock_class_key { struct lockdep_subclass_key subkeys[8U]; } ; 59 struct lock_class { struct hlist_node hash_entry; struct list_head lock_entry; struct lockdep_subclass_key *key; unsigned int subclass; unsigned int dep_gen_id; unsigned long usage_mask; struct stack_trace usage_traces[13U]; struct list_head locks_after; struct list_head locks_before; unsigned int version; unsigned long ops; const char *name; int name_version; unsigned long contention_point[4U]; unsigned long contending_point[4U]; } ; 144 struct lockdep_map { struct lock_class_key *key; struct lock_class *class_cache[2U]; const char *name; int cpu; unsigned long ip; } ; 207 struct held_lock { u64 prev_chain_key; unsigned long acquire_ip; struct lockdep_map *instance; struct lockdep_map *nest_lock; u64 waittime_stamp; u64 holdtime_stamp; unsigned short class_idx; unsigned char irq_context; unsigned char trylock; unsigned char read; unsigned char check; unsigned char hardirqs_off; unsigned short references; unsigned int pin_count; } ; 593 struct raw_spinlock { arch_spinlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ; 32 typedef struct raw_spinlock raw_spinlock_t; 33 struct __anonstruct____missing_field_name_77 { u8 __padding[24U]; struct lockdep_map dep_map; } ; 33 union __anonunion____missing_field_name_76 { struct raw_spinlock rlock; struct __anonstruct____missing_field_name_77 __annonCompField19; } ; 33 struct spinlock { union __anonunion____missing_field_name_76 __annonCompField20; } ; 76 typedef struct spinlock spinlock_t; 23 struct __anonstruct_rwlock_t_78 { arch_rwlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ; 23 typedef struct __anonstruct_rwlock_t_78 rwlock_t; 408 struct seqcount { unsigned int sequence; struct lockdep_map dep_map; } ; 52 typedef struct seqcount seqcount_t; 601 struct timespec { __kernel_time_t tv_sec; long tv_nsec; } ; 19 struct timezone { int tz_minuteswest; int tz_dsttime; } ; 7 typedef __s64 time64_t; 83 struct user_namespace ; 22 struct __anonstruct_kuid_t_94 { uid_t val; } ; 22 typedef struct __anonstruct_kuid_t_94 kuid_t; 27 struct __anonstruct_kgid_t_95 { gid_t val; } ; 27 typedef struct __anonstruct_kgid_t_95 kgid_t; 139 struct kstat { u32 result_mask; umode_t mode; unsigned int nlink; uint32_t blksize; u64 attributes; u64 ino; dev_t dev; dev_t rdev; kuid_t uid; kgid_t gid; loff_t size; struct timespec atime; struct timespec mtime; struct timespec ctime; struct timespec btime; u64 blocks; } ; 48 struct vm_area_struct ; 13 struct __wait_queue ; 13 typedef struct __wait_queue wait_queue_t; 16 struct __wait_queue { unsigned int flags; void *private; int (*func)(wait_queue_t *, unsigned int, int, void *); struct list_head task_list; } ; 39 struct __wait_queue_head { spinlock_t lock; struct list_head task_list; } ; 44 typedef struct __wait_queue_head wait_queue_head_t; 97 struct __anonstruct_nodemask_t_96 { unsigned long bits[16U]; } ; 97 typedef struct __anonstruct_nodemask_t_96 nodemask_t; 247 typedef unsigned int isolate_mode_t; 13 struct optimistic_spin_queue { atomic_t tail; } ; 39 struct ww_acquire_ctx ; 40 struct mutex { atomic_long_t owner; spinlock_t wait_lock; struct optimistic_spin_queue osq; struct list_head wait_list; void *magic; struct lockdep_map dep_map; } ; 72 struct mutex_waiter { struct list_head list; struct task_struct *task; struct ww_acquire_ctx *ww_ctx; void *magic; } ; 229 struct rw_semaphore ; 230 struct rw_semaphore { atomic_long_t count; struct list_head wait_list; raw_spinlock_t wait_lock; struct optimistic_spin_queue osq; struct task_struct *owner; struct lockdep_map dep_map; } ; 28 typedef s64 ktime_t; 365 struct srcu_struct ; 1109 struct timer_list { struct hlist_node entry; unsigned long expires; void (*function)(unsigned long); unsigned long data; u32 flags; struct lockdep_map lockdep_map; } ; 211 struct hrtimer ; 212 enum hrtimer_restart ; 235 struct workqueue_struct ; 236 struct work_struct ; 54 struct work_struct { atomic_long_t data; struct list_head entry; void (*func)(struct work_struct *); struct lockdep_map lockdep_map; } ; 107 struct delayed_work { struct work_struct work; struct timer_list timer; struct workqueue_struct *wq; int cpu; } ; 153 struct execute_work { struct work_struct work; } ; 642 struct srcu_array { unsigned long lock_count[2U]; unsigned long unlock_count[2U]; } ; 40 struct rcu_batch { struct callback_head *head; struct callback_head **tail; } ; 44 struct srcu_struct { unsigned long completed; struct srcu_array *per_cpu_ref; spinlock_t queue_lock; bool running; struct rcu_batch batch_queue; struct rcu_batch batch_check0; struct rcu_batch batch_check1; struct rcu_batch batch_done; struct delayed_work work; struct lockdep_map dep_map; } ; 217 struct resource ; 68 struct resource { resource_size_t start; resource_size_t end; const char *name; unsigned long flags; unsigned long desc; struct resource *parent; struct resource *sibling; struct resource *child; } ; 236 struct pci_dev ; 144 struct pci_bus ; 38 struct ldt_struct ; 38 struct vdso_image ; 38 struct __anonstruct_mm_context_t_161 { struct ldt_struct *ldt; unsigned short ia32_compat; struct mutex lock; void *vdso; const struct vdso_image *vdso_image; atomic_t perf_rdpmc_allowed; u16 pkey_allocation_map; s16 execute_only_pkey; void *bd_addr; } ; 38 typedef struct __anonstruct_mm_context_t_161 mm_context_t; 34 struct bio_vec ; 1266 struct llist_node ; 69 struct llist_node { struct llist_node *next; } ; 17 struct call_single_data { struct llist_node llist; void (*func)(void *); void *info; unsigned int flags; } ; 551 struct rb_node { unsigned long __rb_parent_color; struct rb_node *rb_right; struct rb_node *rb_left; } ; 41 struct rb_root { struct rb_node *rb_node; } ; 835 struct nsproxy ; 37 struct cred ; 19 struct vmacache { u32 seqnum; struct vm_area_struct *vmas[4U]; } ; 41 struct task_rss_stat { int events; int count[4U]; } ; 49 struct mm_rss_stat { atomic_long_t count[4U]; } ; 54 struct page_frag { struct page *page; __u32 offset; __u32 size; } ; 61 struct tlbflush_unmap_batch { struct cpumask cpumask; bool flush_required; bool writable; } ; 85 struct completion { unsigned int done; wait_queue_head_t wait; } ; 108 struct inode ; 58 struct arch_uprobe_task { unsigned long saved_scratch_register; unsigned int saved_trap_nr; unsigned int saved_tf; } ; 66 enum uprobe_task_state { UTASK_RUNNING = 0, UTASK_SSTEP = 1, UTASK_SSTEP_ACK = 2, UTASK_SSTEP_TRAPPED = 3 } ; 73 struct __anonstruct____missing_field_name_215 { struct arch_uprobe_task autask; unsigned long vaddr; } ; 73 struct __anonstruct____missing_field_name_216 { struct callback_head dup_xol_work; unsigned long dup_xol_addr; } ; 73 union __anonunion____missing_field_name_214 { struct __anonstruct____missing_field_name_215 __annonCompField35; struct __anonstruct____missing_field_name_216 __annonCompField36; } ; 73 struct uprobe ; 73 struct return_instance ; 73 struct uprobe_task { enum uprobe_task_state state; union __anonunion____missing_field_name_214 __annonCompField37; struct uprobe *active_uprobe; unsigned long xol_vaddr; struct return_instance *return_instances; unsigned int depth; } ; 95 struct return_instance { struct uprobe *uprobe; unsigned long func; unsigned long stack; unsigned long orig_ret_vaddr; bool chained; struct return_instance *next; } ; 111 struct xol_area ; 112 struct uprobes_state { struct xol_area *xol_area; } ; 151 struct address_space ; 152 struct mem_cgroup ; 153 union __anonunion____missing_field_name_217 { struct address_space *mapping; void *s_mem; atomic_t compound_mapcount; } ; 153 union __anonunion____missing_field_name_218 { unsigned long index; void *freelist; } ; 153 struct __anonstruct____missing_field_name_222 { unsigned short inuse; unsigned short objects; unsigned char frozen; } ; 153 union __anonunion____missing_field_name_221 { atomic_t _mapcount; unsigned int active; struct __anonstruct____missing_field_name_222 __annonCompField40; int units; } ; 153 struct __anonstruct____missing_field_name_220 { union __anonunion____missing_field_name_221 __annonCompField41; atomic_t _refcount; } ; 153 union __anonunion____missing_field_name_219 { unsigned long counters; struct __anonstruct____missing_field_name_220 __annonCompField42; } ; 153 struct dev_pagemap ; 153 struct __anonstruct____missing_field_name_224 { struct page *next; int pages; int pobjects; } ; 153 struct __anonstruct____missing_field_name_225 { unsigned long compound_head; unsigned int compound_dtor; unsigned int compound_order; } ; 153 struct __anonstruct____missing_field_name_226 { unsigned long __pad; pgtable_t pmd_huge_pte; } ; 153 union __anonunion____missing_field_name_223 { struct list_head lru; struct dev_pagemap *pgmap; struct __anonstruct____missing_field_name_224 __annonCompField44; struct callback_head callback_head; struct __anonstruct____missing_field_name_225 __annonCompField45; struct __anonstruct____missing_field_name_226 __annonCompField46; } ; 153 struct kmem_cache ; 153 union __anonunion____missing_field_name_227 { unsigned long private; spinlock_t *ptl; struct kmem_cache *slab_cache; } ; 153 struct page { unsigned long flags; union __anonunion____missing_field_name_217 __annonCompField38; union __anonunion____missing_field_name_218 __annonCompField39; union __anonunion____missing_field_name_219 __annonCompField43; union __anonunion____missing_field_name_223 __annonCompField47; union __anonunion____missing_field_name_227 __annonCompField48; struct mem_cgroup *mem_cgroup; } ; 266 struct userfaultfd_ctx ; 266 struct vm_userfaultfd_ctx { struct userfaultfd_ctx *ctx; } ; 273 struct __anonstruct_shared_228 { struct rb_node rb; unsigned long rb_subtree_last; } ; 273 struct anon_vma ; 273 struct vm_operations_struct ; 273 struct mempolicy ; 273 struct vm_area_struct { unsigned long vm_start; unsigned long vm_end; struct vm_area_struct *vm_next; struct vm_area_struct *vm_prev; struct rb_node vm_rb; unsigned long rb_subtree_gap; struct mm_struct *vm_mm; pgprot_t vm_page_prot; unsigned long vm_flags; struct __anonstruct_shared_228 shared; struct list_head anon_vma_chain; struct anon_vma *anon_vma; const struct vm_operations_struct *vm_ops; unsigned long vm_pgoff; struct file *vm_file; void *vm_private_data; struct mempolicy *vm_policy; struct vm_userfaultfd_ctx vm_userfaultfd_ctx; } ; 346 struct core_thread { struct task_struct *task; struct core_thread *next; } ; 351 struct core_state { atomic_t nr_threads; struct core_thread dumper; struct completion startup; } ; 357 struct kioctx_table ; 358 struct linux_binfmt ; 358 struct mmu_notifier_mm ; 358 struct mm_struct { struct vm_area_struct *mmap; struct rb_root mm_rb; u32 vmacache_seqnum; unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); unsigned long mmap_base; unsigned long mmap_legacy_base; unsigned long task_size; unsigned long highest_vm_end; pgd_t *pgd; atomic_t mm_users; atomic_t mm_count; atomic_long_t nr_ptes; atomic_long_t nr_pmds; int map_count; spinlock_t page_table_lock; struct rw_semaphore mmap_sem; struct list_head mmlist; unsigned long hiwater_rss; unsigned long hiwater_vm; unsigned long total_vm; unsigned long locked_vm; unsigned long pinned_vm; unsigned long data_vm; unsigned long exec_vm; unsigned long stack_vm; unsigned long def_flags; unsigned long start_code; unsigned long end_code; unsigned long start_data; unsigned long end_data; unsigned long start_brk; unsigned long brk; unsigned long start_stack; unsigned long arg_start; unsigned long arg_end; unsigned long env_start; unsigned long env_end; unsigned long saved_auxv[46U]; struct mm_rss_stat rss_stat; struct linux_binfmt *binfmt; cpumask_var_t cpu_vm_mask_var; mm_context_t context; unsigned long flags; struct core_state *core_state; spinlock_t ioctx_lock; struct kioctx_table *ioctx_table; struct task_struct *owner; struct user_namespace *user_ns; struct file *exe_file; struct mmu_notifier_mm *mmu_notifier_mm; struct cpumask cpumask_allocation; unsigned long numa_next_scan; unsigned long numa_scan_offset; int numa_scan_seq; bool tlb_flush_pending; struct uprobes_state uprobes_state; atomic_long_t hugetlb_usage; struct work_struct async_put_work; } ; 544 struct vm_fault ; 598 struct vdso_image { void *data; unsigned long size; unsigned long alt; unsigned long alt_len; long sym_vvar_start; long sym_vvar_page; long sym_hpet_page; long sym_pvclock_page; long sym_VDSO32_NOTE_MASK; long sym___kernel_sigreturn; long sym___kernel_rt_sigreturn; long sym___kernel_vsyscall; long sym_int80_landing_pad; } ; 15 typedef __u64 Elf64_Addr; 16 typedef __u16 Elf64_Half; 18 typedef __u64 Elf64_Off; 20 typedef __u32 Elf64_Word; 21 typedef __u64 Elf64_Xword; 190 struct elf64_sym { Elf64_Word st_name; unsigned char st_info; unsigned char st_other; Elf64_Half st_shndx; Elf64_Addr st_value; Elf64_Xword st_size; } ; 198 typedef struct elf64_sym Elf64_Sym; 219 struct elf64_hdr { unsigned char e_ident[16U]; Elf64_Half e_type; Elf64_Half e_machine; Elf64_Word e_version; Elf64_Addr e_entry; Elf64_Off e_phoff; Elf64_Off e_shoff; Elf64_Word e_flags; Elf64_Half e_ehsize; Elf64_Half e_phentsize; Elf64_Half e_phnum; Elf64_Half e_shentsize; Elf64_Half e_shnum; Elf64_Half e_shstrndx; } ; 235 typedef struct elf64_hdr Elf64_Ehdr; 314 struct elf64_shdr { Elf64_Word sh_name; Elf64_Word sh_type; Elf64_Xword sh_flags; Elf64_Addr sh_addr; Elf64_Off sh_offset; Elf64_Xword sh_size; Elf64_Word sh_link; Elf64_Word sh_info; Elf64_Xword sh_addralign; Elf64_Xword sh_entsize; } ; 326 typedef struct elf64_shdr Elf64_Shdr; 65 struct radix_tree_root ; 65 union __anonunion____missing_field_name_233 { struct list_head private_list; struct callback_head callback_head; } ; 65 struct radix_tree_node { unsigned char shift; unsigned char offset; unsigned char count; unsigned char exceptional; struct radix_tree_node *parent; struct radix_tree_root *root; union __anonunion____missing_field_name_233 __annonCompField49; void *slots[64U]; unsigned long tags[3U][1U]; } ; 107 struct radix_tree_root { gfp_t gfp_mask; struct radix_tree_node *rnode; } ; 176 struct ida { struct radix_tree_root ida_rt; } ; 216 struct dentry ; 217 struct iattr ; 218 struct super_block ; 219 struct file_system_type ; 220 struct kernfs_open_node ; 221 struct kernfs_iattrs ; 245 struct kernfs_root ; 245 struct kernfs_elem_dir { unsigned long subdirs; struct rb_root children; struct kernfs_root *root; } ; 86 struct kernfs_node ; 86 struct kernfs_elem_symlink { struct kernfs_node *target_kn; } ; 90 struct kernfs_ops ; 90 struct kernfs_elem_attr { const struct kernfs_ops *ops; struct kernfs_open_node *open; loff_t size; struct kernfs_node *notify_next; } ; 97 union __anonunion____missing_field_name_242 { struct kernfs_elem_dir dir; struct kernfs_elem_symlink symlink; struct kernfs_elem_attr attr; } ; 97 struct kernfs_node { atomic_t count; atomic_t active; struct lockdep_map dep_map; struct kernfs_node *parent; const char *name; struct rb_node rb; const void *ns; unsigned int hash; union __anonunion____missing_field_name_242 __annonCompField50; void *priv; unsigned short flags; umode_t mode; unsigned int ino; struct kernfs_iattrs *iattr; } ; 139 struct kernfs_syscall_ops { int (*remount_fs)(struct kernfs_root *, int *, char *); int (*show_options)(struct seq_file *, struct kernfs_root *); int (*mkdir)(struct kernfs_node *, const char *, umode_t ); int (*rmdir)(struct kernfs_node *); int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *); int (*show_path)(struct seq_file *, struct kernfs_node *, struct kernfs_root *); } ; 158 struct kernfs_root { struct kernfs_node *kn; unsigned int flags; struct ida ino_ida; struct kernfs_syscall_ops *syscall_ops; struct list_head supers; wait_queue_head_t deactivate_waitq; } ; 174 struct kernfs_open_file { struct kernfs_node *kn; struct file *file; struct seq_file *seq_file; void *priv; struct mutex mutex; struct mutex prealloc_mutex; int event; struct list_head list; char *prealloc_buf; size_t atomic_write_len; bool mmapped; bool released; const struct vm_operations_struct *vm_ops; } ; 194 struct kernfs_ops { int (*open)(struct kernfs_open_file *); void (*release)(struct kernfs_open_file *); int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); ssize_t (*read)(struct kernfs_open_file *, char *, size_t , loff_t ); size_t atomic_write_len; bool prealloc; ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *); struct lock_class_key lockdep_key; } ; 521 struct sock ; 522 struct kobject ; 523 enum kobj_ns_type { KOBJ_NS_TYPE_NONE = 0, KOBJ_NS_TYPE_NET = 1, KOBJ_NS_TYPES = 2 } ; 529 struct kobj_ns_type_operations { enum kobj_ns_type type; bool (*current_may_mount)(); void * (*grab_current_ns)(); const void * (*netlink_ns)(struct sock *); const void * (*initial_ns)(); void (*drop_ns)(void *); } ; 59 struct bin_attribute ; 60 struct attribute { const char *name; umode_t mode; bool ignore_lockdep; struct lock_class_key *key; struct lock_class_key skey; } ; 37 struct attribute_group { const char *name; umode_t (*is_visible)(struct kobject *, struct attribute *, int); umode_t (*is_bin_visible)(struct kobject *, struct bin_attribute *, int); struct attribute **attrs; struct bin_attribute **bin_attrs; } ; 92 struct bin_attribute { struct attribute attr; size_t size; void *private; ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); int (*mmap)(struct file *, struct kobject *, struct bin_attribute *, struct vm_area_struct *); } ; 165 struct sysfs_ops { ssize_t (*show)(struct kobject *, struct attribute *, char *); ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t ); } ; 530 struct refcount_struct { atomic_t refs; } ; 11 typedef struct refcount_struct refcount_t; 41 struct kref { refcount_t refcount; } ; 52 struct kset ; 52 struct kobj_type ; 52 struct kobject { const char *name; struct list_head entry; struct kobject *parent; struct kset *kset; struct kobj_type *ktype; struct kernfs_node *sd; struct kref kref; struct delayed_work release; unsigned char state_initialized; unsigned char state_in_sysfs; unsigned char state_add_uevent_sent; unsigned char state_remove_uevent_sent; unsigned char uevent_suppress; } ; 115 struct kobj_type { void (*release)(struct kobject *); const struct sysfs_ops *sysfs_ops; struct attribute **default_attrs; const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject *); const void * (*namespace)(struct kobject *); } ; 123 struct kobj_uevent_env { char *argv[3U]; char *envp[32U]; int envp_idx; char buf[2048U]; int buflen; } ; 131 struct kset_uevent_ops { const int (*filter)(struct kset *, struct kobject *); const const char * (*name)(struct kset *, struct kobject *); const int (*uevent)(struct kset *, struct kobject *, struct kobj_uevent_env *); } ; 148 struct kset { struct list_head list; spinlock_t list_lock; struct kobject kobj; const struct kset_uevent_ops *uevent_ops; } ; 223 struct kernel_param ; 228 struct kernel_param_ops { unsigned int flags; int (*set)(const char *, const struct kernel_param *); int (*get)(char *, const struct kernel_param *); void (*free)(void *); } ; 62 struct kparam_string ; 62 struct kparam_array ; 62 union __anonunion____missing_field_name_245 { void *arg; const struct kparam_string *str; const struct kparam_array *arr; } ; 62 struct kernel_param { const char *name; struct module *mod; const struct kernel_param_ops *ops; const u16 perm; s8 level; u8 flags; union __anonunion____missing_field_name_245 __annonCompField51; } ; 83 struct kparam_string { unsigned int maxlen; char *string; } ; 89 struct kparam_array { unsigned int max; unsigned int elemsize; unsigned int *num; const struct kernel_param_ops *ops; void *elem; } ; 470 struct latch_tree_node { struct rb_node node[2U]; } ; 211 struct mod_arch_specific { } ; 38 struct exception_table_entry ; 39 struct module_param_attrs ; 39 struct module_kobject { struct kobject kobj; struct module *mod; struct kobject *drivers_dir; struct module_param_attrs *mp; struct completion *kobj_completion; } ; 49 struct module_attribute { struct attribute attr; ssize_t (*show)(struct module_attribute *, struct module_kobject *, char *); ssize_t (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t ); void (*setup)(struct module *, const char *); int (*test)(struct module *); void (*free)(struct module *); } ; 276 enum module_state { MODULE_STATE_LIVE = 0, MODULE_STATE_COMING = 1, MODULE_STATE_GOING = 2, MODULE_STATE_UNFORMED = 3 } ; 283 struct mod_tree_node { struct module *mod; struct latch_tree_node node; } ; 288 struct module_layout { void *base; unsigned int size; unsigned int text_size; unsigned int ro_size; unsigned int ro_after_init_size; struct mod_tree_node mtn; } ; 304 struct mod_kallsyms { Elf64_Sym *symtab; unsigned int num_symtab; char *strtab; } ; 318 struct klp_modinfo { Elf64_Ehdr hdr; Elf64_Shdr *sechdrs; char *secstrings; unsigned int symndx; } ; 326 struct module_sect_attrs ; 326 struct module_notes_attrs ; 326 struct trace_event_call ; 326 struct trace_enum_map ; 326 struct module { enum module_state state; struct list_head list; char name[56U]; struct module_kobject mkobj; struct module_attribute *modinfo_attrs; const char *version; const char *srcversion; struct kobject *holders_dir; const struct kernel_symbol *syms; const s32 *crcs; unsigned int num_syms; struct mutex param_lock; struct kernel_param *kp; unsigned int num_kp; unsigned int num_gpl_syms; const struct kernel_symbol *gpl_syms; const s32 *gpl_crcs; const struct kernel_symbol *unused_syms; const s32 *unused_crcs; unsigned int num_unused_syms; unsigned int num_unused_gpl_syms; const struct kernel_symbol *unused_gpl_syms; const s32 *unused_gpl_crcs; bool sig_ok; bool async_probe_requested; const struct kernel_symbol *gpl_future_syms; const s32 *gpl_future_crcs; unsigned int num_gpl_future_syms; unsigned int num_exentries; struct exception_table_entry *extable; int (*init)(); struct module_layout core_layout; struct module_layout init_layout; struct mod_arch_specific arch; unsigned long taints; unsigned int num_bugs; struct list_head bug_list; struct bug_entry *bug_table; struct mod_kallsyms *kallsyms; struct mod_kallsyms core_kallsyms; struct module_sect_attrs *sect_attrs; struct module_notes_attrs *notes_attrs; char *args; void *percpu; unsigned int percpu_size; unsigned int num_tracepoints; const struct tracepoint **tracepoints_ptrs; unsigned int num_trace_bprintk_fmt; const char **trace_bprintk_fmt_start; struct trace_event_call **trace_events; unsigned int num_trace_events; struct trace_enum_map **trace_enums; unsigned int num_trace_enums; bool klp; bool klp_alive; struct klp_modinfo *klp_info; struct list_head source_list; struct list_head target_list; void (*exit)(); atomic_t refcnt; ctor_fn_t (**ctors)(); unsigned int num_ctors; } ; 796 struct klist_node ; 797 struct klist { spinlock_t k_lock; struct list_head k_list; void (*get)(struct klist_node *); void (*put)(struct klist_node *); } ; 37 struct klist_node { void *n_klist; struct list_head n_node; struct kref n_ref; } ; 93 struct hlist_bl_node ; 93 struct hlist_bl_head { struct hlist_bl_node *first; } ; 36 struct hlist_bl_node { struct hlist_bl_node *next; struct hlist_bl_node **pprev; } ; 114 struct __anonstruct____missing_field_name_299 { spinlock_t lock; int count; } ; 114 union __anonunion____missing_field_name_298 { struct __anonstruct____missing_field_name_299 __annonCompField52; } ; 114 struct lockref { union __anonunion____missing_field_name_298 __annonCompField53; } ; 77 struct path ; 78 struct vfsmount ; 79 struct __anonstruct____missing_field_name_301 { u32 hash; u32 len; } ; 79 union __anonunion____missing_field_name_300 { struct __anonstruct____missing_field_name_301 __annonCompField54; u64 hash_len; } ; 79 struct qstr { union __anonunion____missing_field_name_300 __annonCompField55; const unsigned char *name; } ; 66 struct dentry_operations ; 66 union __anonunion____missing_field_name_302 { struct list_head d_lru; wait_queue_head_t *d_wait; } ; 66 union __anonunion_d_u_303 { struct hlist_node d_alias; struct hlist_bl_node d_in_lookup_hash; struct callback_head d_rcu; } ; 66 struct dentry { unsigned int d_flags; seqcount_t d_seq; struct hlist_bl_node d_hash; struct dentry *d_parent; struct qstr d_name; struct inode *d_inode; unsigned char d_iname[32U]; struct lockref d_lockref; const struct dentry_operations *d_op; struct super_block *d_sb; unsigned long d_time; void *d_fsdata; union __anonunion____missing_field_name_302 __annonCompField56; struct list_head d_child; struct list_head d_subdirs; union __anonunion_d_u_303 d_u; } ; 122 struct dentry_operations { int (*d_revalidate)(struct dentry *, unsigned int); int (*d_weak_revalidate)(struct dentry *, unsigned int); int (*d_hash)(const struct dentry *, struct qstr *); int (*d_compare)(const struct dentry *, unsigned int, const char *, const struct qstr *); int (*d_delete)(const struct dentry *); int (*d_init)(struct dentry *); void (*d_release)(struct dentry *); void (*d_prune)(struct dentry *); void (*d_iput)(struct dentry *, struct inode *); char * (*d_dname)(struct dentry *, char *, int); struct vfsmount * (*d_automount)(struct path *); int (*d_manage)(const struct path *, bool ); struct dentry * (*d_real)(struct dentry *, const struct inode *, unsigned int); } ; 593 struct path { struct vfsmount *mnt; struct dentry *dentry; } ; 19 struct shrink_control { gfp_t gfp_mask; unsigned long nr_to_scan; int nid; struct mem_cgroup *memcg; } ; 27 struct shrinker { unsigned long int (*count_objects)(struct shrinker *, struct shrink_control *); unsigned long int (*scan_objects)(struct shrinker *, struct shrink_control *); int seeks; long batch; unsigned long flags; struct list_head list; atomic_long_t *nr_deferred; } ; 80 struct list_lru_one { struct list_head list; long nr_items; } ; 32 struct list_lru_memcg { struct list_lru_one *lru[0U]; } ; 37 struct list_lru_node { spinlock_t lock; struct list_lru_one lru; struct list_lru_memcg *memcg_lrus; } ; 47 struct list_lru { struct list_lru_node *node; struct list_head list; } ; 189 enum pid_type { PIDTYPE_PID = 0, PIDTYPE_PGID = 1, PIDTYPE_SID = 2, PIDTYPE_MAX = 3 } ; 196 struct pid_namespace ; 196 struct upid { int nr; struct pid_namespace *ns; struct hlist_node pid_chain; } ; 56 struct pid { atomic_t count; unsigned int level; struct hlist_head tasks[3U]; struct callback_head rcu; struct upid numbers[1U]; } ; 68 struct pid_link { struct hlist_node node; struct pid *pid; } ; 22 struct kernel_cap_struct { __u32 cap[2U]; } ; 25 typedef struct kernel_cap_struct kernel_cap_t; 45 struct fiemap_extent { __u64 fe_logical; __u64 fe_physical; __u64 fe_length; __u64 fe_reserved64[2U]; __u32 fe_flags; __u32 fe_reserved[3U]; } ; 38 enum migrate_mode { MIGRATE_ASYNC = 0, MIGRATE_SYNC_LIGHT = 1, MIGRATE_SYNC = 2 } ; 44 struct rcuwait { struct task_struct *task; } ; 32 enum rcu_sync_type { RCU_SYNC = 0, RCU_SCHED_SYNC = 1, RCU_BH_SYNC = 2 } ; 38 struct rcu_sync { int gp_state; int gp_count; wait_queue_head_t gp_wait; int cb_state; struct callback_head cb_head; enum rcu_sync_type gp_type; } ; 66 struct percpu_rw_semaphore { struct rcu_sync rss; unsigned int *read_count; struct rw_semaphore rw_sem; struct rcuwait writer; int readers_block; } ; 144 struct delayed_call { void (*fn)(void *); void *arg; } ; 283 struct backing_dev_info ; 284 struct bdi_writeback ; 285 struct bio ; 286 struct export_operations ; 287 struct hd_geometry ; 289 struct kiocb ; 290 struct pipe_inode_info ; 291 struct poll_table_struct ; 292 struct kstatfs ; 293 struct swap_info_struct ; 294 struct iov_iter ; 295 struct fscrypt_info ; 296 struct fscrypt_operations ; 76 struct iattr { unsigned int ia_valid; umode_t ia_mode; kuid_t ia_uid; kgid_t ia_gid; loff_t ia_size; struct timespec ia_atime; struct timespec ia_mtime; struct timespec ia_ctime; struct file *ia_file; } ; 210 struct percpu_counter { raw_spinlock_t lock; s64 count; struct list_head list; s32 *counters; } ; 213 struct dquot ; 214 struct kqid ; 19 typedef __kernel_uid32_t projid_t; 23 struct __anonstruct_kprojid_t_305 { projid_t val; } ; 23 typedef struct __anonstruct_kprojid_t_305 kprojid_t; 181 enum quota_type { USRQUOTA = 0, GRPQUOTA = 1, PRJQUOTA = 2 } ; 66 typedef long long qsize_t; 67 union __anonunion____missing_field_name_306 { kuid_t uid; kgid_t gid; kprojid_t projid; } ; 67 struct kqid { union __anonunion____missing_field_name_306 __annonCompField57; enum quota_type type; } ; 194 struct mem_dqblk { qsize_t dqb_bhardlimit; qsize_t dqb_bsoftlimit; qsize_t dqb_curspace; qsize_t dqb_rsvspace; qsize_t dqb_ihardlimit; qsize_t dqb_isoftlimit; qsize_t dqb_curinodes; time64_t dqb_btime; time64_t dqb_itime; } ; 216 struct quota_format_type ; 217 struct mem_dqinfo { struct quota_format_type *dqi_format; int dqi_fmt_id; struct list_head dqi_dirty_list; unsigned long dqi_flags; unsigned int dqi_bgrace; unsigned int dqi_igrace; qsize_t dqi_max_spc_limit; qsize_t dqi_max_ino_limit; void *dqi_priv; } ; 282 struct dquot { struct hlist_node dq_hash; struct list_head dq_inuse; struct list_head dq_free; struct list_head dq_dirty; struct mutex dq_lock; atomic_t dq_count; wait_queue_head_t dq_wait_unused; struct super_block *dq_sb; struct kqid dq_id; loff_t dq_off; unsigned long dq_flags; struct mem_dqblk dq_dqb; } ; 309 struct quota_format_ops { int (*check_quota_file)(struct super_block *, int); int (*read_file_info)(struct super_block *, int); int (*write_file_info)(struct super_block *, int); int (*free_file_info)(struct super_block *, int); int (*read_dqblk)(struct dquot *); int (*commit_dqblk)(struct dquot *); int (*release_dqblk)(struct dquot *); int (*get_next_id)(struct super_block *, struct kqid *); } ; 321 struct dquot_operations { int (*write_dquot)(struct dquot *); struct dquot * (*alloc_dquot)(struct super_block *, int); void (*destroy_dquot)(struct dquot *); int (*acquire_dquot)(struct dquot *); int (*release_dquot)(struct dquot *); int (*mark_dirty)(struct dquot *); int (*write_info)(struct super_block *, int); qsize_t * (*get_reserved_space)(struct inode *); int (*get_projid)(struct inode *, kprojid_t *); int (*get_next_id)(struct super_block *, struct kqid *); } ; 338 struct qc_dqblk { int d_fieldmask; u64 d_spc_hardlimit; u64 d_spc_softlimit; u64 d_ino_hardlimit; u64 d_ino_softlimit; u64 d_space; u64 d_ino_count; s64 d_ino_timer; s64 d_spc_timer; int d_ino_warns; int d_spc_warns; u64 d_rt_spc_hardlimit; u64 d_rt_spc_softlimit; u64 d_rt_space; s64 d_rt_spc_timer; int d_rt_spc_warns; } ; 361 struct qc_type_state { unsigned int flags; unsigned int spc_timelimit; unsigned int ino_timelimit; unsigned int rt_spc_timelimit; unsigned int spc_warnlimit; unsigned int ino_warnlimit; unsigned int rt_spc_warnlimit; unsigned long long ino; blkcnt_t blocks; blkcnt_t nextents; } ; 407 struct qc_state { unsigned int s_incoredqs; struct qc_type_state s_state[3U]; } ; 418 struct qc_info { int i_fieldmask; unsigned int i_flags; unsigned int i_spc_timelimit; unsigned int i_ino_timelimit; unsigned int i_rt_spc_timelimit; unsigned int i_spc_warnlimit; unsigned int i_ino_warnlimit; unsigned int i_rt_spc_warnlimit; } ; 431 struct quotactl_ops { int (*quota_on)(struct super_block *, int, int, const struct path *); int (*quota_off)(struct super_block *, int); int (*quota_enable)(struct super_block *, unsigned int); int (*quota_disable)(struct super_block *, unsigned int); int (*quota_sync)(struct super_block *, int); int (*set_info)(struct super_block *, int, struct qc_info *); int (*get_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *); int (*get_nextdqblk)(struct super_block *, struct kqid *, struct qc_dqblk *); int (*set_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *); int (*get_state)(struct super_block *, struct qc_state *); int (*rm_xquota)(struct super_block *, unsigned int); } ; 447 struct quota_format_type { int qf_fmt_id; const struct quota_format_ops *qf_ops; struct module *qf_owner; struct quota_format_type *qf_next; } ; 511 struct quota_info { unsigned int flags; struct mutex dqio_mutex; struct inode *files[3U]; struct mem_dqinfo info[3U]; const struct quota_format_ops *ops[3U]; } ; 540 struct writeback_control ; 541 struct kiocb { struct file *ki_filp; loff_t ki_pos; void (*ki_complete)(struct kiocb *, long, long); void *private; int ki_flags; } ; 317 struct address_space_operations { int (*writepage)(struct page *, struct writeback_control *); int (*readpage)(struct file *, struct page *); int (*writepages)(struct address_space *, struct writeback_control *); int (*set_page_dirty)(struct page *); int (*readpages)(struct file *, struct address_space *, struct list_head *, unsigned int); int (*write_begin)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page **, void **); int (*write_end)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page *, void *); sector_t (*bmap)(struct address_space *, sector_t ); void (*invalidatepage)(struct page *, unsigned int, unsigned int); int (*releasepage)(struct page *, gfp_t ); void (*freepage)(struct page *); ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *); int (*migratepage)(struct address_space *, struct page *, struct page *, enum migrate_mode ); bool (*isolate_page)(struct page *, isolate_mode_t ); void (*putback_page)(struct page *); int (*launder_page)(struct page *); int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long); void (*is_dirty_writeback)(struct page *, bool *, bool *); int (*error_remove_page)(struct address_space *, struct page *); int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *); void (*swap_deactivate)(struct file *); } ; 376 struct address_space { struct inode *host; struct radix_tree_root page_tree; spinlock_t tree_lock; atomic_t i_mmap_writable; struct rb_root i_mmap; struct rw_semaphore i_mmap_rwsem; unsigned long nrpages; unsigned long nrexceptional; unsigned long writeback_index; const struct address_space_operations *a_ops; unsigned long flags; spinlock_t private_lock; gfp_t gfp_mask; struct list_head private_list; void *private_data; } ; 398 struct request_queue ; 399 struct hd_struct ; 399 struct gendisk ; 399 struct block_device { dev_t bd_dev; int bd_openers; struct inode *bd_inode; struct super_block *bd_super; struct mutex bd_mutex; void *bd_claiming; void *bd_holder; int bd_holders; bool bd_write_holder; struct list_head bd_holder_disks; struct block_device *bd_contains; unsigned int bd_block_size; struct hd_struct *bd_part; unsigned int bd_part_count; int bd_invalidated; struct gendisk *bd_disk; struct request_queue *bd_queue; struct backing_dev_info *bd_bdi; struct list_head bd_list; unsigned long bd_private; int bd_fsfreeze_count; struct mutex bd_fsfreeze_mutex; } ; 515 struct posix_acl ; 542 struct inode_operations ; 542 union __anonunion____missing_field_name_311 { const unsigned int i_nlink; unsigned int __i_nlink; } ; 542 union __anonunion____missing_field_name_312 { struct hlist_head i_dentry; struct callback_head i_rcu; } ; 542 struct file_lock_context ; 542 struct cdev ; 542 union __anonunion____missing_field_name_313 { struct pipe_inode_info *i_pipe; struct block_device *i_bdev; struct cdev *i_cdev; char *i_link; unsigned int i_dir_seq; } ; 542 struct inode { umode_t i_mode; unsigned short i_opflags; kuid_t i_uid; kgid_t i_gid; unsigned int i_flags; struct posix_acl *i_acl; struct posix_acl *i_default_acl; const struct inode_operations *i_op; struct super_block *i_sb; struct address_space *i_mapping; void *i_security; unsigned long i_ino; union __anonunion____missing_field_name_311 __annonCompField58; dev_t i_rdev; loff_t i_size; struct timespec i_atime; struct timespec i_mtime; struct timespec i_ctime; spinlock_t i_lock; unsigned short i_bytes; unsigned int i_blkbits; blkcnt_t i_blocks; unsigned long i_state; struct rw_semaphore i_rwsem; unsigned long dirtied_when; unsigned long dirtied_time_when; struct hlist_node i_hash; struct list_head i_io_list; struct bdi_writeback *i_wb; int i_wb_frn_winner; u16 i_wb_frn_avg_time; u16 i_wb_frn_history; struct list_head i_lru; struct list_head i_sb_list; struct list_head i_wb_list; union __anonunion____missing_field_name_312 __annonCompField59; u64 i_version; atomic_t i_count; atomic_t i_dio_count; atomic_t i_writecount; atomic_t i_readcount; const struct file_operations *i_fop; struct file_lock_context *i_flctx; struct address_space i_data; struct list_head i_devices; union __anonunion____missing_field_name_313 __annonCompField60; __u32 i_generation; __u32 i_fsnotify_mask; struct hlist_head i_fsnotify_marks; struct fscrypt_info *i_crypt_info; void *i_private; } ; 803 struct fown_struct { rwlock_t lock; struct pid *pid; enum pid_type pid_type; kuid_t uid; kuid_t euid; int signum; } ; 811 struct file_ra_state { unsigned long start; unsigned int size; unsigned int async_size; unsigned int ra_pages; unsigned int mmap_miss; loff_t prev_pos; } ; 834 union __anonunion_f_u_314 { struct llist_node fu_llist; struct callback_head fu_rcuhead; } ; 834 struct file { union __anonunion_f_u_314 f_u; struct path f_path; struct inode *f_inode; const struct file_operations *f_op; spinlock_t f_lock; atomic_long_t f_count; unsigned int f_flags; fmode_t f_mode; struct mutex f_pos_lock; loff_t f_pos; struct fown_struct f_owner; const struct cred *f_cred; struct file_ra_state f_ra; u64 f_version; void *f_security; void *private_data; struct list_head f_ep_links; struct list_head f_tfile_llink; struct address_space *f_mapping; } ; 919 typedef void *fl_owner_t; 920 struct file_lock ; 921 struct file_lock_operations { void (*fl_copy_lock)(struct file_lock *, struct file_lock *); void (*fl_release_private)(struct file_lock *); } ; 927 struct lock_manager_operations { int (*lm_compare_owner)(struct file_lock *, struct file_lock *); unsigned long int (*lm_owner_key)(struct file_lock *); fl_owner_t (*lm_get_owner)(fl_owner_t ); void (*lm_put_owner)(fl_owner_t ); void (*lm_notify)(struct file_lock *); int (*lm_grant)(struct file_lock *, int); bool (*lm_break)(struct file_lock *); int (*lm_change)(struct file_lock *, int, struct list_head *); void (*lm_setup)(struct file_lock *, void **); } ; 954 struct nlm_lockowner ; 955 struct nfs_lock_info { u32 state; struct nlm_lockowner *owner; struct list_head list; } ; 14 struct nfs4_lock_state ; 15 struct nfs4_lock_info { struct nfs4_lock_state *owner; } ; 19 struct fasync_struct ; 19 struct __anonstruct_afs_316 { struct list_head link; int state; } ; 19 union __anonunion_fl_u_315 { struct nfs_lock_info nfs_fl; struct nfs4_lock_info nfs4_fl; struct __anonstruct_afs_316 afs; } ; 19 struct file_lock { struct file_lock *fl_next; struct list_head fl_list; struct hlist_node fl_link; struct list_head fl_block; fl_owner_t fl_owner; unsigned int fl_flags; unsigned char fl_type; unsigned int fl_pid; int fl_link_cpu; struct pid *fl_nspid; wait_queue_head_t fl_wait; struct file *fl_file; loff_t fl_start; loff_t fl_end; struct fasync_struct *fl_fasync; unsigned long fl_break_time; unsigned long fl_downgrade_time; const struct file_lock_operations *fl_ops; const struct lock_manager_operations *fl_lmops; union __anonunion_fl_u_315 fl_u; } ; 1007 struct file_lock_context { spinlock_t flc_lock; struct list_head flc_flock; struct list_head flc_posix; struct list_head flc_lease; } ; 1074 struct files_struct ; 1227 struct fasync_struct { spinlock_t fa_lock; int magic; int fa_fd; struct fasync_struct *fa_next; struct file *fa_file; struct callback_head fa_rcu; } ; 1262 struct sb_writers { int frozen; wait_queue_head_t wait_unfrozen; struct percpu_rw_semaphore rw_sem[3U]; } ; 1292 struct super_operations ; 1292 struct xattr_handler ; 1292 struct mtd_info ; 1292 struct super_block { struct list_head s_list; dev_t s_dev; unsigned char s_blocksize_bits; unsigned long s_blocksize; loff_t s_maxbytes; struct file_system_type *s_type; const struct super_operations *s_op; const struct dquot_operations *dq_op; const struct quotactl_ops *s_qcop; const struct export_operations *s_export_op; unsigned long s_flags; unsigned long s_iflags; unsigned long s_magic; struct dentry *s_root; struct rw_semaphore s_umount; int s_count; atomic_t s_active; void *s_security; const struct xattr_handler **s_xattr; const struct fscrypt_operations *s_cop; struct hlist_bl_head s_anon; struct list_head s_mounts; struct block_device *s_bdev; struct backing_dev_info *s_bdi; struct mtd_info *s_mtd; struct hlist_node s_instances; unsigned int s_quota_types; struct quota_info s_dquot; struct sb_writers s_writers; char s_id[32U]; u8 s_uuid[16U]; void *s_fs_info; unsigned int s_max_links; fmode_t s_mode; u32 s_time_gran; struct mutex s_vfs_rename_mutex; char *s_subtype; char *s_options; const struct dentry_operations *s_d_op; int cleancache_poolid; struct shrinker s_shrink; atomic_long_t s_remove_count; int s_readonly_remount; struct workqueue_struct *s_dio_done_wq; struct hlist_head s_pins; struct user_namespace *s_user_ns; struct list_lru s_dentry_lru; struct list_lru s_inode_lru; struct callback_head rcu; struct work_struct destroy_work; struct mutex s_sync_lock; int s_stack_depth; spinlock_t s_inode_list_lock; struct list_head s_inodes; spinlock_t s_inode_wblist_lock; struct list_head s_inodes_wb; } ; 1579 struct fiemap_extent_info { unsigned int fi_flags; unsigned int fi_extents_mapped; unsigned int fi_extents_max; struct fiemap_extent *fi_extents_start; } ; 1592 struct dir_context ; 1617 struct dir_context { int (*actor)(struct dir_context *, const char *, int, loff_t , u64 , unsigned int); loff_t pos; } ; 1623 struct block_device_operations ; 1624 struct file_operations { struct module *owner; loff_t (*llseek)(struct file *, loff_t , int); ssize_t (*read)(struct file *, char *, size_t , loff_t *); ssize_t (*write)(struct file *, const char *, size_t , loff_t *); ssize_t (*read_iter)(struct kiocb *, struct iov_iter *); ssize_t (*write_iter)(struct kiocb *, struct iov_iter *); int (*iterate)(struct file *, struct dir_context *); int (*iterate_shared)(struct file *, struct dir_context *); unsigned int (*poll)(struct file *, struct poll_table_struct *); long int (*unlocked_ioctl)(struct file *, unsigned int, unsigned long); long int (*compat_ioctl)(struct file *, unsigned int, unsigned long); int (*mmap)(struct file *, struct vm_area_struct *); int (*open)(struct inode *, struct file *); int (*flush)(struct file *, fl_owner_t ); int (*release)(struct inode *, struct file *); int (*fsync)(struct file *, loff_t , loff_t , int); int (*fasync)(int, struct file *, int); int (*lock)(struct file *, int, struct file_lock *); ssize_t (*sendpage)(struct file *, struct page *, int, size_t , loff_t *, int); unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); int (*check_flags)(int); int (*flock)(struct file *, int, struct file_lock *); ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t , unsigned int); ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t , unsigned int); int (*setlease)(struct file *, long, struct file_lock **, void **); long int (*fallocate)(struct file *, int, loff_t , loff_t ); void (*show_fdinfo)(struct seq_file *, struct file *); ssize_t (*copy_file_range)(struct file *, loff_t , struct file *, loff_t , size_t , unsigned int); int (*clone_file_range)(struct file *, loff_t , struct file *, loff_t , u64 ); ssize_t (*dedupe_file_range)(struct file *, u64 , u64 , struct file *, u64 ); } ; 1692 struct inode_operations { struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int); const char * (*get_link)(struct dentry *, struct inode *, struct delayed_call *); int (*permission)(struct inode *, int); struct posix_acl * (*get_acl)(struct inode *, int); int (*readlink)(struct dentry *, char *, int); int (*create)(struct inode *, struct dentry *, umode_t , bool ); int (*link)(struct dentry *, struct inode *, struct dentry *); int (*unlink)(struct inode *, struct dentry *); int (*symlink)(struct inode *, struct dentry *, const char *); int (*mkdir)(struct inode *, struct dentry *, umode_t ); int (*rmdir)(struct inode *, struct dentry *); int (*mknod)(struct inode *, struct dentry *, umode_t , dev_t ); int (*rename)(struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); int (*setattr)(struct dentry *, struct iattr *); int (*getattr)(const struct path *, struct kstat *, u32 , unsigned int); ssize_t (*listxattr)(struct dentry *, char *, size_t ); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 , u64 ); int (*update_time)(struct inode *, struct timespec *, int); int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t , int *); int (*tmpfile)(struct inode *, struct dentry *, umode_t ); int (*set_acl)(struct inode *, struct posix_acl *, int); } ; 1771 struct super_operations { struct inode * (*alloc_inode)(struct super_block *); void (*destroy_inode)(struct inode *); void (*dirty_inode)(struct inode *, int); int (*write_inode)(struct inode *, struct writeback_control *); int (*drop_inode)(struct inode *); void (*evict_inode)(struct inode *); void (*put_super)(struct super_block *); int (*sync_fs)(struct super_block *, int); int (*freeze_super)(struct super_block *); int (*freeze_fs)(struct super_block *); int (*thaw_super)(struct super_block *); int (*unfreeze_fs)(struct super_block *); int (*statfs)(struct dentry *, struct kstatfs *); int (*remount_fs)(struct super_block *, int *, char *); void (*umount_begin)(struct super_block *); int (*show_options)(struct seq_file *, struct dentry *); int (*show_devname)(struct seq_file *, struct dentry *); int (*show_path)(struct seq_file *, struct dentry *); int (*show_stats)(struct seq_file *, struct dentry *); ssize_t (*quota_read)(struct super_block *, int, char *, size_t , loff_t ); ssize_t (*quota_write)(struct super_block *, int, const char *, size_t , loff_t ); struct dquot ** (*get_dquots)(struct inode *); int (*bdev_try_to_free_page)(struct super_block *, struct page *, gfp_t ); long int (*nr_cached_objects)(struct super_block *, struct shrink_control *); long int (*free_cached_objects)(struct super_block *, struct shrink_control *); } ; 2014 struct file_system_type { const char *name; int fs_flags; struct dentry * (*mount)(struct file_system_type *, int, const char *, void *); void (*kill_sb)(struct super_block *); struct module *owner; struct file_system_type *next; struct hlist_head fs_supers; struct lock_class_key s_lock_key; struct lock_class_key s_umount_key; struct lock_class_key s_vfs_rename_key; struct lock_class_key s_writers_key[3U]; struct lock_class_key i_lock_key; struct lock_class_key i_mutex_key; struct lock_class_key i_mutex_dir_key; } ; 3219 struct assoc_array_ptr ; 3219 struct assoc_array { struct assoc_array_ptr *root; unsigned long nr_leaves_on_tree; } ; 31 typedef int32_t key_serial_t; 34 typedef uint32_t key_perm_t; 35 struct key ; 36 struct user_struct ; 37 struct signal_struct ; 38 struct key_type ; 42 struct keyring_index_key { struct key_type *type; const char *description; size_t desc_len; } ; 91 union key_payload { void *rcu_data0; void *data[4U]; } ; 128 union __anonunion____missing_field_name_317 { struct list_head graveyard_link; struct rb_node serial_node; } ; 128 struct key_user ; 128 union __anonunion____missing_field_name_318 { time_t expiry; time_t revoked_at; } ; 128 struct __anonstruct____missing_field_name_320 { struct key_type *type; char *description; } ; 128 union __anonunion____missing_field_name_319 { struct keyring_index_key index_key; struct __anonstruct____missing_field_name_320 __annonCompField63; } ; 128 struct __anonstruct____missing_field_name_322 { struct list_head name_link; struct assoc_array keys; } ; 128 union __anonunion____missing_field_name_321 { union key_payload payload; struct __anonstruct____missing_field_name_322 __annonCompField65; int reject_error; } ; 128 struct key { atomic_t usage; key_serial_t serial; union __anonunion____missing_field_name_317 __annonCompField61; struct rw_semaphore sem; struct key_user *user; void *security; union __anonunion____missing_field_name_318 __annonCompField62; time_t last_used_at; kuid_t uid; kgid_t gid; key_perm_t perm; unsigned short quotalen; unsigned short datalen; unsigned long flags; union __anonunion____missing_field_name_319 __annonCompField64; union __anonunion____missing_field_name_321 __annonCompField66; int (*restrict_link)(struct key *, const struct key_type *, const union key_payload *); } ; 380 struct audit_context ; 26 struct sem_undo_list ; 26 struct sysv_sem { struct sem_undo_list *undo_list; } ; 26 struct sysv_shm { struct list_head shm_clist; } ; 12 enum kcov_mode { KCOV_MODE_DISABLED = 0, KCOV_MODE_TRACE = 1 } ; 84 struct plist_node { int prio; struct list_head prio_list; struct list_head node_list; } ; 299 struct timerqueue_node { struct rb_node node; ktime_t expires; } ; 12 struct timerqueue_head { struct rb_root head; struct timerqueue_node *next; } ; 50 struct hrtimer_clock_base ; 51 struct hrtimer_cpu_base ; 60 enum hrtimer_restart { HRTIMER_NORESTART = 0, HRTIMER_RESTART = 1 } ; 65 struct hrtimer { struct timerqueue_node node; ktime_t _softexpires; enum hrtimer_restart (*function)(struct hrtimer *); struct hrtimer_clock_base *base; u8 state; u8 is_rel; } ; 113 struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base; int index; clockid_t clockid; struct timerqueue_head active; ktime_t (*get_time)(); ktime_t offset; } ; 146 struct hrtimer_cpu_base { raw_spinlock_t lock; seqcount_t seq; struct hrtimer *running; unsigned int cpu; unsigned int active_bases; unsigned int clock_was_set_seq; bool migration_enabled; bool nohz_active; unsigned char in_hrtirq; unsigned char hres_active; unsigned char hang_detected; ktime_t expires_next; struct hrtimer *next_timer; unsigned int nr_events; unsigned int nr_retries; unsigned int nr_hangs; unsigned int max_hang_time; struct hrtimer_clock_base clock_base[4U]; } ; 43 struct seccomp_filter ; 44 struct seccomp { int mode; struct seccomp_filter *filter; } ; 11 struct latency_record { unsigned long backtrace[12U]; unsigned int count; unsigned long time; unsigned long max; } ; 24 struct __anonstruct_sigset_t_323 { unsigned long sig[1U]; } ; 24 typedef struct __anonstruct_sigset_t_323 sigset_t; 25 struct siginfo ; 38 union sigval { int sival_int; void *sival_ptr; } ; 10 typedef union sigval sigval_t; 11 struct __anonstruct__kill_325 { __kernel_pid_t _pid; __kernel_uid32_t _uid; } ; 11 struct __anonstruct__timer_326 { __kernel_timer_t _tid; int _overrun; char _pad[0U]; sigval_t _sigval; int _sys_private; } ; 11 struct __anonstruct__rt_327 { __kernel_pid_t _pid; __kernel_uid32_t _uid; sigval_t _sigval; } ; 11 struct __anonstruct__sigchld_328 { __kernel_pid_t _pid; __kernel_uid32_t _uid; int _status; __kernel_clock_t _utime; __kernel_clock_t _stime; } ; 11 struct __anonstruct__addr_bnd_331 { void *_lower; void *_upper; } ; 11 union __anonunion____missing_field_name_330 { struct __anonstruct__addr_bnd_331 _addr_bnd; __u32 _pkey; } ; 11 struct __anonstruct__sigfault_329 { void *_addr; short _addr_lsb; union __anonunion____missing_field_name_330 __annonCompField67; } ; 11 struct __anonstruct__sigpoll_332 { long _band; int _fd; } ; 11 struct __anonstruct__sigsys_333 { void *_call_addr; int _syscall; unsigned int _arch; } ; 11 union __anonunion__sifields_324 { int _pad[28U]; struct __anonstruct__kill_325 _kill; struct __anonstruct__timer_326 _timer; struct __anonstruct__rt_327 _rt; struct __anonstruct__sigchld_328 _sigchld; struct __anonstruct__sigfault_329 _sigfault; struct __anonstruct__sigpoll_332 _sigpoll; struct __anonstruct__sigsys_333 _sigsys; } ; 11 struct siginfo { int si_signo; int si_errno; int si_code; union __anonunion__sifields_324 _sifields; } ; 118 typedef struct siginfo siginfo_t; 21 struct sigpending { struct list_head list; sigset_t signal; } ; 65 struct task_io_accounting { u64 rchar; u64 wchar; u64 syscr; u64 syscw; u64 read_bytes; u64 write_bytes; u64 cancelled_write_bytes; } ; 45 struct bio_list ; 46 struct blk_plug ; 47 struct cfs_rq ; 48 struct fs_struct ; 49 struct futex_pi_state ; 50 struct io_context ; 51 struct nameidata ; 52 struct perf_event_context ; 54 struct reclaim_state ; 55 struct robust_list_head ; 58 struct sighand_struct ; 59 struct task_delay_info ; 60 struct task_group ; 187 struct prev_cputime { u64 utime; u64 stime; raw_spinlock_t lock; } ; 203 struct task_cputime { u64 utime; u64 stime; unsigned long long sum_exec_runtime; } ; 220 struct sched_info { unsigned long pcount; unsigned long long run_delay; unsigned long long last_arrival; unsigned long long last_queued; } ; 244 struct load_weight { unsigned long weight; u32 inv_weight; } ; 261 struct sched_avg { u64 last_update_time; u64 load_sum; u32 util_sum; u32 period_contrib; unsigned long load_avg; unsigned long util_avg; } ; 322 struct sched_statistics { u64 wait_start; u64 wait_max; u64 wait_count; u64 wait_sum; u64 iowait_count; u64 iowait_sum; u64 sleep_start; u64 sleep_max; s64 sum_sleep_runtime; u64 block_start; u64 block_max; u64 exec_max; u64 slice_max; u64 nr_migrations_cold; u64 nr_failed_migrations_affine; u64 nr_failed_migrations_running; u64 nr_failed_migrations_hot; u64 nr_forced_migrations; u64 nr_wakeups; u64 nr_wakeups_sync; u64 nr_wakeups_migrate; u64 nr_wakeups_local; u64 nr_wakeups_remote; u64 nr_wakeups_affine; u64 nr_wakeups_affine_attempts; u64 nr_wakeups_passive; u64 nr_wakeups_idle; } ; 357 struct sched_entity { struct load_weight load; struct rb_node run_node; struct list_head group_node; unsigned int on_rq; u64 exec_start; u64 sum_exec_runtime; u64 vruntime; u64 prev_sum_exec_runtime; u64 nr_migrations; struct sched_statistics statistics; int depth; struct sched_entity *parent; struct cfs_rq *cfs_rq; struct cfs_rq *my_q; struct sched_avg avg; } ; 393 struct rt_rq ; 393 struct sched_rt_entity { struct list_head run_list; unsigned long timeout; unsigned long watchdog_stamp; unsigned int time_slice; unsigned short on_rq; unsigned short on_list; struct sched_rt_entity *back; struct sched_rt_entity *parent; struct rt_rq *rt_rq; struct rt_rq *my_q; } ; 411 struct sched_dl_entity { struct rb_node rb_node; u64 dl_runtime; u64 dl_deadline; u64 dl_period; u64 dl_bw; s64 runtime; u64 deadline; unsigned int flags; int dl_throttled; int dl_boosted; int dl_yielded; struct hrtimer dl_timer; } ; 478 struct wake_q_node { struct wake_q_node *next; } ; 482 struct sched_class ; 482 struct rt_mutex_waiter ; 482 struct css_set ; 482 struct compat_robust_list_head ; 482 struct numa_group ; 482 struct kcov ; 482 struct task_struct { struct thread_info thread_info; volatile long state; void *stack; atomic_t usage; unsigned int flags; unsigned int ptrace; struct llist_node wake_entry; int on_cpu; unsigned int cpu; unsigned int wakee_flips; unsigned long wakee_flip_decay_ts; struct task_struct *last_wakee; int wake_cpu; int on_rq; int prio; int static_prio; int normal_prio; unsigned int rt_priority; const struct sched_class *sched_class; struct sched_entity se; struct sched_rt_entity rt; struct task_group *sched_task_group; struct sched_dl_entity dl; struct hlist_head preempt_notifiers; unsigned int policy; int nr_cpus_allowed; cpumask_t cpus_allowed; unsigned long rcu_tasks_nvcsw; bool rcu_tasks_holdout; struct list_head rcu_tasks_holdout_list; int rcu_tasks_idle_cpu; struct sched_info sched_info; struct list_head tasks; struct plist_node pushable_tasks; struct rb_node pushable_dl_tasks; struct mm_struct *mm; struct mm_struct *active_mm; struct vmacache vmacache; struct task_rss_stat rss_stat; int exit_state; int exit_code; int exit_signal; int pdeath_signal; unsigned long jobctl; unsigned int personality; unsigned char sched_reset_on_fork; unsigned char sched_contributes_to_load; unsigned char sched_migrated; unsigned char sched_remote_wakeup; unsigned char; unsigned char in_execve; unsigned char in_iowait; unsigned char restore_sigmask; unsigned char memcg_may_oom; unsigned char memcg_kmem_skip_account; unsigned char brk_randomized; unsigned long atomic_flags; struct restart_block restart_block; pid_t pid; pid_t tgid; struct task_struct *real_parent; struct task_struct *parent; struct list_head children; struct list_head sibling; struct task_struct *group_leader; struct list_head ptraced; struct list_head ptrace_entry; struct pid_link pids[3U]; struct list_head thread_group; struct list_head thread_node; struct completion *vfork_done; int *set_child_tid; int *clear_child_tid; u64 utime; u64 stime; u64 gtime; struct prev_cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; u64 start_time; u64 real_start_time; unsigned long min_flt; unsigned long maj_flt; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; const struct cred *ptracer_cred; const struct cred *real_cred; const struct cred *cred; char comm[16U]; struct nameidata *nameidata; struct sysv_sem sysvsem; struct sysv_shm sysvshm; unsigned long last_switch_count; struct fs_struct *fs; struct files_struct *files; struct nsproxy *nsproxy; struct signal_struct *signal; struct sighand_struct *sighand; sigset_t blocked; sigset_t real_blocked; sigset_t saved_sigmask; struct sigpending pending; unsigned long sas_ss_sp; size_t sas_ss_size; unsigned int sas_ss_flags; struct callback_head *task_works; struct audit_context *audit_context; kuid_t loginuid; unsigned int sessionid; struct seccomp seccomp; u32 parent_exec_id; u32 self_exec_id; spinlock_t alloc_lock; raw_spinlock_t pi_lock; struct wake_q_node wake_q; struct rb_root pi_waiters; struct rb_node *pi_waiters_leftmost; struct rt_mutex_waiter *pi_blocked_on; struct mutex_waiter *blocked_on; unsigned int irq_events; unsigned long hardirq_enable_ip; unsigned long hardirq_disable_ip; unsigned int hardirq_enable_event; unsigned int hardirq_disable_event; int hardirqs_enabled; int hardirq_context; unsigned long softirq_disable_ip; unsigned long softirq_enable_ip; unsigned int softirq_disable_event; unsigned int softirq_enable_event; int softirqs_enabled; int softirq_context; u64 curr_chain_key; int lockdep_depth; unsigned int lockdep_recursion; struct held_lock held_locks[48U]; gfp_t lockdep_reclaim_gfp; unsigned int in_ubsan; void *journal_info; struct bio_list *bio_list; struct blk_plug *plug; struct reclaim_state *reclaim_state; struct backing_dev_info *backing_dev_info; struct io_context *io_context; unsigned long ptrace_message; siginfo_t *last_siginfo; struct task_io_accounting ioac; u64 acct_rss_mem1; u64 acct_vm_mem1; u64 acct_timexpd; nodemask_t mems_allowed; seqcount_t mems_allowed_seq; int cpuset_mem_spread_rotor; int cpuset_slab_spread_rotor; struct css_set *cgroups; struct list_head cg_list; int closid; struct robust_list_head *robust_list; struct compat_robust_list_head *compat_robust_list; struct list_head pi_state_list; struct futex_pi_state *pi_state_cache; struct perf_event_context *perf_event_ctxp[2U]; struct mutex perf_event_mutex; struct list_head perf_event_list; struct mempolicy *mempolicy; short il_next; short pref_node_fork; int numa_scan_seq; unsigned int numa_scan_period; unsigned int numa_scan_period_max; int numa_preferred_nid; unsigned long numa_migrate_retry; u64 node_stamp; u64 last_task_numa_placement; u64 last_sum_exec_runtime; struct callback_head numa_work; struct list_head numa_entry; struct numa_group *numa_group; unsigned long *numa_faults; unsigned long total_numa_faults; unsigned long numa_faults_locality[3U]; unsigned long numa_pages_migrated; struct tlbflush_unmap_batch tlb_ubc; struct callback_head rcu; struct pipe_inode_info *splice_pipe; struct page_frag task_frag; struct task_delay_info *delays; int make_it_fail; int nr_dirtied; int nr_dirtied_pause; unsigned long dirty_paused_when; int latency_record_count; struct latency_record latency_record[32U]; u64 timer_slack_ns; u64 default_timer_slack_ns; unsigned int kasan_depth; unsigned long trace; unsigned long trace_recursion; enum kcov_mode kcov_mode; unsigned int kcov_size; void *kcov_area; struct kcov *kcov; struct mem_cgroup *memcg_in_oom; gfp_t memcg_oom_gfp_mask; int memcg_oom_order; unsigned int memcg_nr_pages_over_high; struct uprobe_task *utask; unsigned int sequential_io; unsigned int sequential_io_avg; unsigned long task_state_change; int pagefault_disabled; struct task_struct *oom_reaper_list; atomic_t stack_refcount; struct thread_struct thread; } ; 1562 struct user_struct { atomic_t __count; atomic_t processes; atomic_t sigpending; atomic_t fanotify_listeners; atomic_long_t epoll_watches; unsigned long mq_bytes; unsigned long locked_shm; unsigned long unix_inflight; atomic_long_t pipe_bufs; struct key *uid_keyring; struct key *session_keyring; struct hlist_node uidhash_node; kuid_t uid; atomic_long_t locked_vm; } ; 60 struct group_info { atomic_t usage; int ngroups; kgid_t gid[0U]; } ; 86 struct cred { atomic_t usage; atomic_t subscribers; void *put_addr; unsigned int magic; kuid_t uid; kgid_t gid; kuid_t suid; kgid_t sgid; kuid_t euid; kgid_t egid; kuid_t fsuid; kgid_t fsgid; unsigned int securebits; kernel_cap_t cap_inheritable; kernel_cap_t cap_permitted; kernel_cap_t cap_effective; kernel_cap_t cap_bset; kernel_cap_t cap_ambient; unsigned char jit_keyring; struct key *session_keyring; struct key *process_keyring; struct key *thread_keyring; struct key *request_key_auth; void *security; struct user_struct *user; struct user_namespace *user_ns; struct group_info *group_info; struct callback_head rcu; } ; 369 struct seq_file { char *buf; size_t size; size_t from; size_t count; size_t pad_until; loff_t index; loff_t read_pos; u64 version; struct mutex lock; const struct seq_operations *op; int poll_event; const struct file *file; void *private; } ; 30 struct seq_operations { void * (*start)(struct seq_file *, loff_t *); void (*stop)(struct seq_file *, void *); void * (*next)(struct seq_file *, void *, loff_t *); int (*show)(struct seq_file *, void *); } ; 222 struct pinctrl ; 223 struct pinctrl_state ; 200 struct dev_pin_info { struct pinctrl *p; struct pinctrl_state *default_state; struct pinctrl_state *init_state; struct pinctrl_state *sleep_state; struct pinctrl_state *idle_state; } ; 58 struct pm_message { int event; } ; 64 typedef struct pm_message pm_message_t; 65 struct dev_pm_ops { int (*prepare)(struct device *); void (*complete)(struct device *); int (*suspend)(struct device *); int (*resume)(struct device *); int (*freeze)(struct device *); int (*thaw)(struct device *); int (*poweroff)(struct device *); int (*restore)(struct device *); int (*suspend_late)(struct device *); int (*resume_early)(struct device *); int (*freeze_late)(struct device *); int (*thaw_early)(struct device *); int (*poweroff_late)(struct device *); int (*restore_early)(struct device *); int (*suspend_noirq)(struct device *); int (*resume_noirq)(struct device *); int (*freeze_noirq)(struct device *); int (*thaw_noirq)(struct device *); int (*poweroff_noirq)(struct device *); int (*restore_noirq)(struct device *); int (*runtime_suspend)(struct device *); int (*runtime_resume)(struct device *); int (*runtime_idle)(struct device *); } ; 315 enum rpm_status { RPM_ACTIVE = 0, RPM_RESUMING = 1, RPM_SUSPENDED = 2, RPM_SUSPENDING = 3 } ; 322 enum rpm_request { RPM_REQ_NONE = 0, RPM_REQ_IDLE = 1, RPM_REQ_SUSPEND = 2, RPM_REQ_AUTOSUSPEND = 3, RPM_REQ_RESUME = 4 } ; 330 struct wakeup_source ; 331 struct wake_irq ; 332 struct pm_domain_data ; 333 struct pm_subsys_data { spinlock_t lock; unsigned int refcount; struct list_head clock_list; struct pm_domain_data *domain_data; } ; 551 struct dev_pm_qos ; 551 struct dev_pm_info { pm_message_t power_state; unsigned char can_wakeup; unsigned char async_suspend; bool in_dpm_list; bool is_prepared; bool is_suspended; bool is_noirq_suspended; bool is_late_suspended; bool early_init; bool direct_complete; spinlock_t lock; struct list_head entry; struct completion completion; struct wakeup_source *wakeup; bool wakeup_path; bool syscore; bool no_pm_callbacks; struct timer_list suspend_timer; unsigned long timer_expires; struct work_struct work; wait_queue_head_t wait_queue; struct wake_irq *wakeirq; atomic_t usage_count; atomic_t child_count; unsigned char disable_depth; unsigned char idle_notification; unsigned char request_pending; unsigned char deferred_resume; unsigned char run_wake; unsigned char runtime_auto; bool ignore_children; unsigned char no_callbacks; unsigned char irq_safe; unsigned char use_autosuspend; unsigned char timer_autosuspends; unsigned char memalloc_noio; unsigned int links_count; enum rpm_request request; enum rpm_status runtime_status; int runtime_error; int autosuspend_delay; unsigned long last_busy; unsigned long active_jiffies; unsigned long suspended_jiffies; unsigned long accounting_timestamp; struct pm_subsys_data *subsys_data; void (*set_latency_tolerance)(struct device *, s32 ); struct dev_pm_qos *qos; } ; 613 struct dev_pm_domain { struct dev_pm_ops ops; void (*detach)(struct device *, bool ); int (*activate)(struct device *); void (*sync)(struct device *); void (*dismiss)(struct device *); } ; 76 struct dev_archdata { void *iommu; } ; 8 struct dma_map_ops ; 21 struct device_private ; 22 struct device_driver ; 23 struct driver_private ; 24 struct class ; 25 struct subsys_private ; 26 struct bus_type ; 27 struct device_node ; 28 struct fwnode_handle ; 29 struct iommu_ops ; 30 struct iommu_group ; 31 struct iommu_fwspec ; 62 struct device_attribute ; 62 struct bus_type { const char *name; const char *dev_name; struct device *dev_root; struct device_attribute *dev_attrs; const struct attribute_group **bus_groups; const struct attribute_group **dev_groups; const struct attribute_group **drv_groups; int (*match)(struct device *, struct device_driver *); int (*uevent)(struct device *, struct kobj_uevent_env *); int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*online)(struct device *); int (*offline)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); int (*num_vf)(struct device *); const struct dev_pm_ops *pm; const struct iommu_ops *iommu_ops; struct subsys_private *p; struct lock_class_key lock_key; } ; 147 struct device_type ; 206 enum probe_type { PROBE_DEFAULT_STRATEGY = 0, PROBE_PREFER_ASYNCHRONOUS = 1, PROBE_FORCE_SYNCHRONOUS = 2 } ; 212 struct of_device_id ; 212 struct acpi_device_id ; 212 struct device_driver { const char *name; struct bus_type *bus; struct module *owner; const char *mod_name; bool suppress_bind_attrs; enum probe_type probe_type; const struct of_device_id *of_match_table; const struct acpi_device_id *acpi_match_table; int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct attribute_group **groups; const struct dev_pm_ops *pm; struct driver_private *p; } ; 362 struct class_attribute ; 362 struct class { const char *name; struct module *owner; struct class_attribute *class_attrs; const struct attribute_group **class_groups; const struct attribute_group **dev_groups; struct kobject *dev_kobj; int (*dev_uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *); void (*class_release)(struct class *); void (*dev_release)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct kobj_ns_type_operations *ns_type; const void * (*namespace)(struct device *); const struct dev_pm_ops *pm; struct subsys_private *p; } ; 457 struct class_attribute { struct attribute attr; ssize_t (*show)(struct class *, struct class_attribute *, char *); ssize_t (*store)(struct class *, struct class_attribute *, const char *, size_t ); } ; 527 struct device_type { const char *name; const struct attribute_group **groups; int (*uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *, kuid_t *, kgid_t *); void (*release)(struct device *); const struct dev_pm_ops *pm; } ; 555 struct device_attribute { struct attribute attr; ssize_t (*show)(struct device *, struct device_attribute *, char *); ssize_t (*store)(struct device *, struct device_attribute *, const char *, size_t ); } ; 727 struct device_dma_parameters { unsigned int max_segment_size; unsigned long segment_boundary_mask; } ; 790 enum dl_dev_state { DL_DEV_NO_DRIVER = 0, DL_DEV_PROBING = 1, DL_DEV_DRIVER_BOUND = 2, DL_DEV_UNBINDING = 3 } ; 797 struct dev_links_info { struct list_head suppliers; struct list_head consumers; enum dl_dev_state status; } ; 817 struct irq_domain ; 817 struct dma_coherent_mem ; 817 struct cma ; 817 struct device { struct device *parent; struct device_private *p; struct kobject kobj; const char *init_name; const struct device_type *type; struct mutex mutex; struct bus_type *bus; struct device_driver *driver; void *platform_data; void *driver_data; struct dev_links_info links; struct dev_pm_info power; struct dev_pm_domain *pm_domain; struct irq_domain *msi_domain; struct dev_pin_info *pins; struct list_head msi_list; int numa_node; const struct dma_map_ops *dma_ops; u64 *dma_mask; u64 coherent_dma_mask; unsigned long dma_pfn_offset; struct device_dma_parameters *dma_parms; struct list_head dma_pools; struct dma_coherent_mem *dma_mem; struct cma *cma_area; struct dev_archdata archdata; struct device_node *of_node; struct fwnode_handle *fwnode; dev_t devt; u32 id; spinlock_t devres_lock; struct list_head devres_head; struct klist_node knode_class; struct class *class; const struct attribute_group **groups; void (*release)(struct device *); struct iommu_group *iommu_group; struct iommu_fwspec *iommu_fwspec; bool offline_disabled; bool offline; } ; 976 struct wakeup_source { const char *name; struct list_head entry; spinlock_t lock; struct wake_irq *wakeirq; struct timer_list timer; unsigned long timer_expires; ktime_t total_time; ktime_t max_time; ktime_t last_time; ktime_t start_prevent_time; ktime_t prevent_sleep_time; unsigned long event_count; unsigned long active_count; unsigned long relax_count; unsigned long expire_count; unsigned long wakeup_count; bool active; bool autosleep_enabled; } ; 13 typedef unsigned long kernel_ulong_t; 14 struct pci_device_id { __u32 vendor; __u32 device; __u32 subvendor; __u32 subdevice; __u32 class; __u32 class_mask; kernel_ulong_t driver_data; } ; 187 struct acpi_device_id { __u8 id[9U]; kernel_ulong_t driver_data; __u32 cls; __u32 cls_msk; } ; 230 struct of_device_id { char name[32U]; char type[32U]; char compatible[128U]; const void *data; } ; 70 struct hotplug_slot ; 70 struct pci_slot { struct pci_bus *bus; struct list_head list; struct hotplug_slot *hotplug; unsigned char number; struct kobject kobj; } ; 108 typedef int pci_power_t; 135 typedef unsigned int pci_channel_state_t; 136 enum pci_channel_state { pci_channel_io_normal = 1, pci_channel_io_frozen = 2, pci_channel_io_perm_failure = 3 } ; 161 typedef unsigned short pci_dev_flags_t; 188 typedef unsigned short pci_bus_flags_t; 247 struct pcie_link_state ; 248 struct pci_vpd ; 249 struct pci_sriov ; 251 struct proc_dir_entry ; 251 struct pci_driver ; 251 union __anonunion____missing_field_name_352 { struct pci_sriov *sriov; struct pci_dev *physfn; } ; 251 struct pci_dev { struct list_head bus_list; struct pci_bus *bus; struct pci_bus *subordinate; void *sysdata; struct proc_dir_entry *procent; struct pci_slot *slot; unsigned int devfn; unsigned short vendor; unsigned short device; unsigned short subsystem_vendor; unsigned short subsystem_device; unsigned int class; u8 revision; u8 hdr_type; u16 aer_cap; u8 pcie_cap; u8 msi_cap; u8 msix_cap; unsigned char pcie_mpss; u8 rom_base_reg; u8 pin; u16 pcie_flags_reg; unsigned long *dma_alias_mask; struct pci_driver *driver; u64 dma_mask; struct device_dma_parameters dma_parms; pci_power_t current_state; u8 pm_cap; unsigned char pme_support; unsigned char pme_interrupt; unsigned char pme_poll; unsigned char d1_support; unsigned char d2_support; unsigned char no_d1d2; unsigned char no_d3cold; unsigned char bridge_d3; unsigned char d3cold_allowed; unsigned char mmio_always_on; unsigned char wakeup_prepared; unsigned char runtime_d3cold; unsigned char ignore_hotplug; unsigned char hotplug_user_indicators; unsigned int d3_delay; unsigned int d3cold_delay; struct pcie_link_state *link_state; pci_channel_state_t error_state; struct device dev; int cfg_size; unsigned int irq; struct resource resource[17U]; bool match_driver; unsigned char transparent; unsigned char multifunction; unsigned char is_added; unsigned char is_busmaster; unsigned char no_msi; unsigned char no_64bit_msi; unsigned char block_cfg_access; unsigned char broken_parity_status; unsigned char irq_reroute_variant; unsigned char msi_enabled; unsigned char msix_enabled; unsigned char ari_enabled; unsigned char ats_enabled; unsigned char is_managed; unsigned char needs_freset; unsigned char state_saved; unsigned char is_physfn; unsigned char is_virtfn; unsigned char reset_fn; unsigned char is_hotplug_bridge; unsigned char __aer_firmware_first_valid; unsigned char __aer_firmware_first; unsigned char broken_intx_masking; unsigned char io_window_1k; unsigned char irq_managed; unsigned char has_secondary_link; unsigned char non_compliant_bars; pci_dev_flags_t dev_flags; atomic_t enable_cnt; u32 saved_config_space[16U]; struct hlist_head saved_cap_space; struct bin_attribute *rom_attr; int rom_attr_enabled; struct bin_attribute *res_attr[17U]; struct bin_attribute *res_attr_wc[17U]; unsigned char ptm_root; unsigned char ptm_enabled; u8 ptm_granularity; const struct attribute_group **msi_irq_groups; struct pci_vpd *vpd; union __anonunion____missing_field_name_352 __annonCompField72; u16 ats_cap; u8 ats_stu; atomic_t ats_ref_cnt; phys_addr_t rom; size_t romlen; char *driver_override; } ; 419 struct pci_ops ; 419 struct msi_controller ; 482 struct pci_bus { struct list_head node; struct pci_bus *parent; struct list_head children; struct list_head devices; struct pci_dev *self; struct list_head slots; struct resource *resource[4U]; struct list_head resources; struct resource busn_res; struct pci_ops *ops; struct msi_controller *msi; void *sysdata; struct proc_dir_entry *procdir; unsigned char number; unsigned char primary; unsigned char max_bus_speed; unsigned char cur_bus_speed; char name[48U]; unsigned short bridge_ctl; pci_bus_flags_t bus_flags; struct device *bridge; struct device dev; struct bin_attribute *legacy_io; struct bin_attribute *legacy_mem; unsigned char is_added; } ; 606 struct pci_ops { int (*add_bus)(struct pci_bus *); void (*remove_bus)(struct pci_bus *); void * (*map_bus)(struct pci_bus *, unsigned int, int); int (*read)(struct pci_bus *, unsigned int, int, int, u32 *); int (*write)(struct pci_bus *, unsigned int, int, int, u32 ); } ; 636 struct pci_dynids { spinlock_t lock; struct list_head list; } ; 650 typedef unsigned int pci_ers_result_t; 660 struct pci_error_handlers { pci_ers_result_t (*error_detected)(struct pci_dev *, enum pci_channel_state ); pci_ers_result_t (*mmio_enabled)(struct pci_dev *); pci_ers_result_t (*slot_reset)(struct pci_dev *); void (*reset_notify)(struct pci_dev *, bool ); void (*resume)(struct pci_dev *); } ; 690 struct pci_driver { struct list_head node; const char *name; const struct pci_device_id *id_table; int (*probe)(struct pci_dev *, const struct pci_device_id *); void (*remove)(struct pci_dev *); int (*suspend)(struct pci_dev *, pm_message_t ); int (*suspend_late)(struct pci_dev *, pm_message_t ); int (*resume_early)(struct pci_dev *); int (*resume)(struct pci_dev *); void (*shutdown)(struct pci_dev *); int (*sriov_configure)(struct pci_dev *, int); const struct pci_error_handlers *err_handler; struct device_driver driver; struct pci_dynids dynids; } ; 1270 struct percpu_ref ; 55 typedef void percpu_ref_func_t(struct percpu_ref *); 68 struct percpu_ref { atomic_long_t count; unsigned long percpu_count_ptr; percpu_ref_func_t *release; percpu_ref_func_t *confirm_switch; bool force_atomic; struct callback_head rcu; } ; 277 struct vm_fault { struct vm_area_struct *vma; unsigned int flags; gfp_t gfp_mask; unsigned long pgoff; unsigned long address; pmd_t *pmd; pud_t *pud; pte_t orig_pte; struct page *cow_page; struct mem_cgroup *memcg; struct page *page; pte_t *pte; spinlock_t *ptl; pgtable_t prealloc_pte; } ; 340 enum page_entry_size { PE_SIZE_PTE = 0, PE_SIZE_PMD = 1, PE_SIZE_PUD = 2 } ; 346 struct vm_operations_struct { void (*open)(struct vm_area_struct *); void (*close)(struct vm_area_struct *); int (*mremap)(struct vm_area_struct *); int (*fault)(struct vm_fault *); int (*huge_fault)(struct vm_fault *, enum page_entry_size ); void (*map_pages)(struct vm_fault *, unsigned long, unsigned long); int (*page_mkwrite)(struct vm_fault *); int (*pfn_mkwrite)(struct vm_fault *); int (*access)(struct vm_area_struct *, unsigned long, void *, int, int); const char * (*name)(struct vm_area_struct *); int (*set_policy)(struct vm_area_struct *, struct mempolicy *); struct mempolicy * (*get_policy)(struct vm_area_struct *, unsigned long); struct page * (*find_special_page)(struct vm_area_struct *, unsigned long); } ; 2513 struct scatterlist { unsigned long sg_magic; unsigned long page_link; unsigned int offset; unsigned int length; dma_addr_t dma_address; unsigned int dma_length; } ; 21 struct sg_table { struct scatterlist *sgl; unsigned int nents; unsigned int orig_nents; } ; 96 enum dma_data_direction { DMA_BIDIRECTIONAL = 0, DMA_TO_DEVICE = 1, DMA_FROM_DEVICE = 2, DMA_NONE = 3 } ; 158 struct dma_map_ops { void * (*alloc)(struct device *, size_t , dma_addr_t *, gfp_t , unsigned long); void (*free)(struct device *, size_t , void *, dma_addr_t , unsigned long); int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t , size_t , unsigned long); int (*get_sgtable)(struct device *, struct sg_table *, void *, dma_addr_t , size_t , unsigned long); dma_addr_t (*map_page)(struct device *, struct page *, unsigned long, size_t , enum dma_data_direction , unsigned long); void (*unmap_page)(struct device *, dma_addr_t , size_t , enum dma_data_direction , unsigned long); int (*map_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , unsigned long); void (*unmap_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , unsigned long); dma_addr_t (*map_resource)(struct device *, phys_addr_t , size_t , enum dma_data_direction , unsigned long); void (*unmap_resource)(struct device *, dma_addr_t , size_t , enum dma_data_direction , unsigned long); void (*sync_single_for_cpu)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_single_for_device)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_sg_for_cpu)(struct device *, struct scatterlist *, int, enum dma_data_direction ); void (*sync_sg_for_device)(struct device *, struct scatterlist *, int, enum dma_data_direction ); int (*mapping_error)(struct device *, dma_addr_t ); int (*dma_supported)(struct device *, u64 ); int (*set_dma_mask)(struct device *, u64 ); int is_phys; } ; 134 enum irqreturn { IRQ_NONE = 0, IRQ_HANDLED = 1, IRQ_WAKE_THREAD = 2 } ; 16 typedef enum irqreturn irqreturn_t; 133 struct exception_table_entry { int insn; int fixup; int handler; } ; 82 struct disk_stats { unsigned long sectors[2U]; unsigned long ios[2U]; unsigned long merges[2U]; unsigned long ticks[2U]; unsigned long io_ticks; unsigned long time_in_queue; } ; 91 struct partition_meta_info { char uuid[37U]; u8 volname[64U]; } ; 103 struct hd_struct { sector_t start_sect; sector_t nr_sects; seqcount_t nr_sects_seq; sector_t alignment_offset; unsigned int discard_alignment; struct device __dev; struct kobject *holder_dir; int policy; int partno; struct partition_meta_info *info; int make_it_fail; unsigned long stamp; atomic_t in_flight[2U]; struct disk_stats *dkstats; struct percpu_ref ref; struct callback_head callback_head; } ; 137 struct disk_part_tbl { struct callback_head callback_head; int len; struct hd_struct *last_lookup; struct hd_struct *part[]; } ; 155 struct disk_events ; 156 struct badblocks ; 157 struct blk_integrity_profile ; 157 struct blk_integrity { struct blk_integrity_profile *profile; unsigned char flags; unsigned char tuple_size; unsigned char interval_exp; unsigned char tag_size; } ; 168 struct disk_devt { atomic_t count; void (*release)(struct disk_devt *); } ; 177 struct timer_rand_state ; 177 struct gendisk { int major; int first_minor; int minors; struct disk_devt *disk_devt; char disk_name[32U]; char * (*devnode)(struct gendisk *, umode_t *); unsigned int events; unsigned int async_events; struct disk_part_tbl *part_tbl; struct hd_struct part0; const struct block_device_operations *fops; struct request_queue *queue; void *private_data; int flags; struct kobject *slave_dir; struct timer_rand_state *random; atomic_t sync_io; struct disk_events *ev; struct kobject integrity_kobj; int node_id; struct badblocks *bb; } ; 72 struct fprop_local_percpu { struct percpu_counter events; unsigned int period; raw_spinlock_t lock; } ; 33 typedef int congested_fn(void *, int); 42 struct bdi_writeback_congested { unsigned long state; atomic_t refcnt; struct backing_dev_info *bdi; int blkcg_id; struct rb_node rb_node; } ; 61 struct cgroup_subsys_state ; 61 union __anonunion____missing_field_name_374 { struct work_struct release_work; struct callback_head rcu; } ; 61 struct bdi_writeback { struct backing_dev_info *bdi; unsigned long state; unsigned long last_old_flush; struct list_head b_dirty; struct list_head b_io; struct list_head b_more_io; struct list_head b_dirty_time; spinlock_t list_lock; struct percpu_counter stat[4U]; struct bdi_writeback_congested *congested; unsigned long bw_time_stamp; unsigned long dirtied_stamp; unsigned long written_stamp; unsigned long write_bandwidth; unsigned long avg_write_bandwidth; unsigned long dirty_ratelimit; unsigned long balanced_dirty_ratelimit; struct fprop_local_percpu completions; int dirty_exceeded; spinlock_t work_lock; struct list_head work_list; struct delayed_work dwork; unsigned long dirty_sleep; struct list_head bdi_node; struct percpu_ref refcnt; struct fprop_local_percpu memcg_completions; struct cgroup_subsys_state *memcg_css; struct cgroup_subsys_state *blkcg_css; struct list_head memcg_node; struct list_head blkcg_node; union __anonunion____missing_field_name_374 __annonCompField81; } ; 137 struct backing_dev_info { struct list_head bdi_list; unsigned long ra_pages; unsigned long io_pages; congested_fn *congested_fn; void *congested_data; char *name; struct kref refcnt; unsigned int capabilities; unsigned int min_ratio; unsigned int max_ratio; unsigned int max_prop_frac; atomic_long_t tot_write_bandwidth; struct bdi_writeback wb; struct list_head wb_list; struct radix_tree_root cgwb_tree; struct rb_root cgwb_congested_tree; atomic_t usage_cnt; wait_queue_head_t wb_waitq; struct device *dev; struct device *owner; struct timer_list laptop_mode_wb_timer; struct dentry *debug_dir; struct dentry *debug_stats; } ; 12 typedef void * mempool_alloc_t(gfp_t , void *); 13 typedef void mempool_free_t(void *, void *); 14 struct mempool_s { spinlock_t lock; int min_nr; int curr_nr; void **elements; void *pool_data; mempool_alloc_t *alloc; mempool_free_t *free; wait_queue_head_t wait; } ; 25 typedef struct mempool_s mempool_t; 79 union __anonunion____missing_field_name_375 { struct list_head q_node; struct kmem_cache *__rcu_icq_cache; } ; 79 union __anonunion____missing_field_name_376 { struct hlist_node ioc_node; struct callback_head __rcu_head; } ; 79 struct io_cq { struct request_queue *q; struct io_context *ioc; union __anonunion____missing_field_name_375 __annonCompField82; union __anonunion____missing_field_name_376 __annonCompField83; unsigned int flags; } ; 92 struct io_context { atomic_long_t refcount; atomic_t active_ref; atomic_t nr_tasks; spinlock_t lock; unsigned short ioprio; int nr_batch_requests; unsigned long last_waited; struct radix_tree_root icq_tree; struct io_cq *icq_hint; struct hlist_head icq_list; struct work_struct release_work; } ; 77 struct bio_vec { struct page *bv_page; unsigned int bv_len; unsigned int bv_offset; } ; 34 struct bvec_iter { sector_t bi_sector; unsigned int bi_size; unsigned int bi_idx; unsigned int bi_bvec_done; } ; 84 struct bio_set ; 85 struct bio_integrity_payload ; 18 typedef void bio_end_io_t(struct bio *); 19 union __anonunion____missing_field_name_377 { struct bio_integrity_payload *bi_integrity; } ; 19 struct bio { struct bio *bi_next; struct block_device *bi_bdev; int bi_error; unsigned int bi_opf; unsigned short bi_flags; unsigned short bi_ioprio; struct bvec_iter bi_iter; unsigned int bi_phys_segments; unsigned int bi_seg_front_size; unsigned int bi_seg_back_size; atomic_t __bi_remaining; bio_end_io_t *bi_end_io; void *bi_private; struct io_context *bi_ioc; struct cgroup_subsys_state *bi_css; union __anonunion____missing_field_name_377 __annonCompField84; unsigned short bi_vcnt; unsigned short bi_max_vecs; atomic_t __bi_cnt; struct bio_vec *bi_io_vec; struct bio_set *bi_pool; struct bio_vec bi_inline_vecs[0U]; } ; 250 typedef unsigned int blk_qc_t; 285 struct blk_issue_stat { u64 time; } ; 289 struct blk_rq_stat { s64 mean; u64 min; u64 max; s32 nr_samples; s32 nr_batch; u64 batch; s64 time; } ; 303 struct bio_integrity_payload { struct bio *bip_bio; struct bvec_iter bip_iter; bio_end_io_t *bip_end_io; unsigned short bip_slab; unsigned short bip_vcnt; unsigned short bip_max_vcnt; unsigned short bip_flags; struct work_struct bip_work; struct bio_vec *bip_vec; struct bio_vec bip_inline_vecs[0U]; } ; 544 struct bio_list { struct bio *head; struct bio *tail; } ; 676 struct bio_set { struct kmem_cache *bio_slab; unsigned int front_pad; mempool_t *bio_pool; mempool_t *bvec_pool; mempool_t *bio_integrity_pool; mempool_t *bvec_integrity_pool; spinlock_t rescue_lock; struct bio_list rescue_list; struct work_struct rescue_work; struct workqueue_struct *rescue_workqueue; } ; 64 struct bsg_class_device { struct device *class_dev; struct device *parent; int minor; struct request_queue *queue; struct kref ref; void (*release)(struct device *); } ; 131 struct elevator_queue ; 133 struct request ; 135 struct bsg_job ; 136 struct blkcg_gq ; 137 struct blk_flush_queue ; 138 struct pr_ops ; 139 struct rq_wb ; 53 typedef void rq_end_io_fn(struct request *, int); 54 struct request_list { struct request_queue *q; struct blkcg_gq *blkg; int count[2U]; int starved[2U]; mempool_t *rq_pool; wait_queue_head_t wait[2U]; unsigned int flags; } ; 76 typedef __u32 req_flags_t; 77 union __anonunion____missing_field_name_378 { struct call_single_data csd; u64 fifo_time; } ; 77 struct blk_mq_ctx ; 77 union __anonunion____missing_field_name_379 { struct hlist_node hash; struct list_head ipi_list; } ; 77 union __anonunion____missing_field_name_380 { struct rb_node rb_node; struct bio_vec special_vec; void *completion_data; } ; 77 struct __anonstruct_elv_382 { struct io_cq *icq; void *priv[2U]; } ; 77 struct __anonstruct_flush_383 { unsigned int seq; struct list_head list; rq_end_io_fn *saved_end_io; } ; 77 union __anonunion____missing_field_name_381 { struct __anonstruct_elv_382 elv; struct __anonstruct_flush_383 flush; } ; 77 struct request { struct list_head queuelist; union __anonunion____missing_field_name_378 __annonCompField85; struct request_queue *q; struct blk_mq_ctx *mq_ctx; int cpu; unsigned int cmd_flags; req_flags_t rq_flags; int internal_tag; unsigned long atomic_flags; unsigned int __data_len; int tag; sector_t __sector; struct bio *bio; struct bio *biotail; union __anonunion____missing_field_name_379 __annonCompField86; union __anonunion____missing_field_name_380 __annonCompField87; union __anonunion____missing_field_name_381 __annonCompField88; struct gendisk *rq_disk; struct hd_struct *part; unsigned long start_time; struct blk_issue_stat issue_stat; struct request_list *rl; unsigned long long start_time_ns; unsigned long long io_start_time_ns; unsigned short nr_phys_segments; unsigned short nr_integrity_segments; unsigned short ioprio; void *special; int errors; unsigned int extra_len; unsigned long deadline; struct list_head timeout_list; unsigned int timeout; int retries; rq_end_io_fn *end_io; void *end_io_data; struct request *next_rq; } ; 117 struct elevator_type ; 118 enum elv_merge { ELEVATOR_NO_MERGE = 0, ELEVATOR_FRONT_MERGE = 1, ELEVATOR_BACK_MERGE = 2, ELEVATOR_DISCARD_MERGE = 3 } ; 22 typedef enum elv_merge elevator_merge_fn(struct request_queue *, struct request **, struct bio *); 25 typedef void elevator_merge_req_fn(struct request_queue *, struct request *, struct request *); 27 typedef void elevator_merged_fn(struct request_queue *, struct request *, enum elv_merge ); 29 typedef int elevator_allow_bio_merge_fn(struct request_queue *, struct request *, struct bio *); 32 typedef int elevator_allow_rq_merge_fn(struct request_queue *, struct request *, struct request *); 35 typedef void elevator_bio_merged_fn(struct request_queue *, struct request *, struct bio *); 38 typedef int elevator_dispatch_fn(struct request_queue *, int); 40 typedef void elevator_add_req_fn(struct request_queue *, struct request *); 41 typedef struct request * elevator_request_list_fn(struct request_queue *, struct request *); 42 typedef void elevator_completed_req_fn(struct request_queue *, struct request *); 43 typedef int elevator_may_queue_fn(struct request_queue *, unsigned int); 45 typedef void elevator_init_icq_fn(struct io_cq *); 46 typedef void elevator_exit_icq_fn(struct io_cq *); 47 typedef int elevator_set_req_fn(struct request_queue *, struct request *, struct bio *, gfp_t ); 49 typedef void elevator_put_req_fn(struct request *); 50 typedef void elevator_activate_req_fn(struct request_queue *, struct request *); 51 typedef void elevator_deactivate_req_fn(struct request_queue *, struct request *); 53 typedef int elevator_init_fn(struct request_queue *, struct elevator_type *); 55 typedef void elevator_exit_fn(struct elevator_queue *); 56 typedef void elevator_registered_fn(struct request_queue *); 57 struct elevator_ops { elevator_merge_fn *elevator_merge_fn; elevator_merged_fn *elevator_merged_fn; elevator_merge_req_fn *elevator_merge_req_fn; elevator_allow_bio_merge_fn *elevator_allow_bio_merge_fn; elevator_allow_rq_merge_fn *elevator_allow_rq_merge_fn; elevator_bio_merged_fn *elevator_bio_merged_fn; elevator_dispatch_fn *elevator_dispatch_fn; elevator_add_req_fn *elevator_add_req_fn; elevator_activate_req_fn *elevator_activate_req_fn; elevator_deactivate_req_fn *elevator_deactivate_req_fn; elevator_completed_req_fn *elevator_completed_req_fn; elevator_request_list_fn *elevator_former_req_fn; elevator_request_list_fn *elevator_latter_req_fn; elevator_init_icq_fn *elevator_init_icq_fn; elevator_exit_icq_fn *elevator_exit_icq_fn; elevator_set_req_fn *elevator_set_req_fn; elevator_put_req_fn *elevator_put_req_fn; elevator_may_queue_fn *elevator_may_queue_fn; elevator_init_fn *elevator_init_fn; elevator_exit_fn *elevator_exit_fn; elevator_registered_fn *elevator_registered_fn; } ; 89 struct blk_mq_alloc_data ; 90 struct blk_mq_hw_ctx ; 91 struct elevator_mq_ops { int (*init_sched)(struct request_queue *, struct elevator_type *); void (*exit_sched)(struct elevator_queue *); bool (*allow_merge)(struct request_queue *, struct request *, struct bio *); bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *); int (*request_merge)(struct request_queue *, struct request **, struct bio *); void (*request_merged)(struct request_queue *, struct request *, enum elv_merge ); void (*requests_merged)(struct request_queue *, struct request *, struct request *); struct request * (*get_request)(struct request_queue *, unsigned int, struct blk_mq_alloc_data *); void (*put_request)(struct request *); void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, bool ); struct request * (*dispatch_request)(struct blk_mq_hw_ctx *); bool (*has_work)(struct blk_mq_hw_ctx *); void (*completed_request)(struct blk_mq_hw_ctx *, struct request *); void (*started_request)(struct request *); void (*requeue_request)(struct request *); struct request * (*former_request)(struct request_queue *, struct request *); struct request * (*next_request)(struct request_queue *, struct request *); int (*get_rq_priv)(struct request_queue *, struct request *, struct bio *); void (*put_rq_priv)(struct request_queue *, struct request *); void (*init_icq)(struct io_cq *); void (*exit_icq)(struct io_cq *); } ; 117 struct elv_fs_entry { struct attribute attr; ssize_t (*show)(struct elevator_queue *, char *); ssize_t (*store)(struct elevator_queue *, const char *, size_t ); } ; 125 union __anonunion_ops_384 { struct elevator_ops sq; struct elevator_mq_ops mq; } ; 125 struct elevator_type { struct kmem_cache *icq_cache; union __anonunion_ops_384 ops; size_t icq_size; size_t icq_align; struct elv_fs_entry *elevator_attrs; char elevator_name[16U]; struct module *elevator_owner; bool uses_mq; char icq_cache_name[21U]; struct list_head list; } ; 157 struct elevator_queue { struct elevator_type *type; void *elevator_data; struct kobject kobj; struct mutex sysfs_lock; unsigned char registered; unsigned char uses_mq; struct hlist_head hash[64U]; } ; 261 typedef void request_fn_proc(struct request_queue *); 262 typedef blk_qc_t make_request_fn(struct request_queue *, struct bio *); 263 typedef int prep_rq_fn(struct request_queue *, struct request *); 264 typedef void unprep_rq_fn(struct request_queue *, struct request *); 267 typedef void softirq_done_fn(struct request *); 268 typedef int dma_drain_needed_fn(struct request *); 269 typedef int lld_busy_fn(struct request_queue *); 270 typedef int bsg_job_fn(struct bsg_job *); 271 typedef int init_rq_fn(struct request_queue *, struct request *, gfp_t ); 272 typedef void exit_rq_fn(struct request_queue *, struct request *); 273 enum blk_eh_timer_return { BLK_EH_NOT_HANDLED = 0, BLK_EH_HANDLED = 1, BLK_EH_RESET_TIMER = 2 } ; 280 typedef enum blk_eh_timer_return rq_timed_out_fn(struct request *); 286 struct blk_queue_tag { struct request **tag_index; unsigned long *tag_map; int max_depth; int real_max_depth; atomic_t refcnt; int alloc_policy; int next_tag; } ; 296 enum blk_zoned_model { BLK_ZONED_NONE = 0, BLK_ZONED_HA = 1, BLK_ZONED_HM = 2 } ; 302 struct queue_limits { unsigned long bounce_pfn; unsigned long seg_boundary_mask; unsigned long virt_boundary_mask; unsigned int max_hw_sectors; unsigned int max_dev_sectors; unsigned int chunk_sectors; unsigned int max_sectors; unsigned int max_segment_size; unsigned int physical_block_size; unsigned int alignment_offset; unsigned int io_min; unsigned int io_opt; unsigned int max_discard_sectors; unsigned int max_hw_discard_sectors; unsigned int max_write_same_sectors; unsigned int max_write_zeroes_sectors; unsigned int discard_granularity; unsigned int discard_alignment; unsigned short logical_block_size; unsigned short max_segments; unsigned short max_integrity_segments; unsigned short max_discard_segments; unsigned char misaligned; unsigned char discard_misaligned; unsigned char cluster; unsigned char discard_zeroes_data; unsigned char raid_partial_stripes_expensive; enum blk_zoned_model zoned; } ; 361 struct blk_mq_ops ; 361 struct throtl_data ; 361 struct blk_mq_tag_set ; 361 struct request_queue { struct list_head queue_head; struct request *last_merge; struct elevator_queue *elevator; int nr_rqs[2U]; int nr_rqs_elvpriv; struct rq_wb *rq_wb; struct request_list root_rl; request_fn_proc *request_fn; make_request_fn *make_request_fn; prep_rq_fn *prep_rq_fn; unprep_rq_fn *unprep_rq_fn; softirq_done_fn *softirq_done_fn; rq_timed_out_fn *rq_timed_out_fn; dma_drain_needed_fn *dma_drain_needed; lld_busy_fn *lld_busy_fn; init_rq_fn *init_rq_fn; exit_rq_fn *exit_rq_fn; const struct blk_mq_ops *mq_ops; unsigned int *mq_map; struct blk_mq_ctx *queue_ctx; unsigned int nr_queues; unsigned int queue_depth; struct blk_mq_hw_ctx **queue_hw_ctx; unsigned int nr_hw_queues; sector_t end_sector; struct request *boundary_rq; struct delayed_work delay_work; struct backing_dev_info *backing_dev_info; struct disk_devt *disk_devt; void *queuedata; unsigned long queue_flags; int id; gfp_t bounce_gfp; spinlock_t __queue_lock; spinlock_t *queue_lock; struct kobject kobj; struct kobject mq_kobj; struct blk_integrity integrity; struct device *dev; int rpm_status; unsigned int nr_pending; unsigned long nr_requests; unsigned int nr_congestion_on; unsigned int nr_congestion_off; unsigned int nr_batching; unsigned int dma_drain_size; void *dma_drain_buffer; unsigned int dma_pad_mask; unsigned int dma_alignment; struct blk_queue_tag *queue_tags; struct list_head tag_busy_list; unsigned int nr_sorted; unsigned int in_flight[2U]; struct blk_rq_stat rq_stats[2U]; unsigned int request_fn_active; unsigned int rq_timeout; int poll_nsec; struct timer_list timeout; struct work_struct timeout_work; struct list_head timeout_list; struct list_head icq_list; unsigned long blkcg_pols[1U]; struct blkcg_gq *root_blkg; struct list_head blkg_list; struct queue_limits limits; unsigned int sg_timeout; unsigned int sg_reserved_size; int node; struct blk_flush_queue *fq; struct list_head requeue_list; spinlock_t requeue_lock; struct delayed_work requeue_work; struct mutex sysfs_lock; int bypass_depth; atomic_t mq_freeze_depth; bsg_job_fn *bsg_job_fn; int bsg_job_size; struct bsg_class_device bsg_dev; struct throtl_data *td; struct callback_head callback_head; wait_queue_head_t mq_freeze_wq; struct percpu_ref q_usage_counter; struct list_head all_q_node; struct blk_mq_tag_set *tag_set; struct list_head tag_set_list; struct bio_set *bio_split; struct dentry *debugfs_dir; struct dentry *mq_debugfs_dir; bool mq_sysfs_init_done; size_t cmd_size; void *rq_alloc_data; } ; 1241 struct blk_plug { struct list_head list; struct list_head mq_list; struct list_head cb_list; } ; 1744 struct blk_integrity_iter { void *prot_buf; void *data_buf; sector_t seed; unsigned int data_size; unsigned short interval; const char *disk_name; } ; 1773 typedef int integrity_processing_fn(struct blk_integrity_iter *); 1774 struct blk_integrity_profile { integrity_processing_fn *generate_fn; integrity_processing_fn *verify_fn; const char *name; } ; 1933 struct block_device_operations { int (*open)(struct block_device *, fmode_t ); void (*release)(struct gendisk *, fmode_t ); int (*rw_page)(struct block_device *, sector_t , struct page *, bool ); int (*ioctl)(struct block_device *, fmode_t , unsigned int, unsigned long); int (*compat_ioctl)(struct block_device *, fmode_t , unsigned int, unsigned long); long int (*direct_access)(struct block_device *, sector_t , void **, pfn_t *, long); unsigned int (*check_events)(struct gendisk *, unsigned int); int (*media_changed)(struct gendisk *); void (*unlock_native_capacity)(struct gendisk *); int (*revalidate_disk)(struct gendisk *); int (*getgeo)(struct block_device *, struct hd_geometry *); void (*swap_slot_free_notify)(struct block_device *, unsigned long); struct module *owner; const struct pr_ops *pr_ops; } ; 68 struct scsi_cmnd ; 25 enum scsi_device_state { SDEV_CREATED = 1, SDEV_RUNNING = 2, SDEV_CANCEL = 3, SDEV_DEL = 4, SDEV_QUIESCE = 5, SDEV_OFFLINE = 6, SDEV_TRANSPORT_OFFLINE = 7, SDEV_BLOCK = 8, SDEV_CREATED_BLOCK = 9 } ; 78 struct Scsi_Host ; 78 struct scsi_target ; 78 struct scsi_device_handler ; 78 struct scsi_device { struct Scsi_Host *host; struct request_queue *request_queue; struct list_head siblings; struct list_head same_target_siblings; atomic_t device_busy; atomic_t device_blocked; spinlock_t list_lock; struct list_head cmd_list; struct list_head starved_entry; unsigned short queue_depth; unsigned short max_queue_depth; unsigned short last_queue_full_depth; unsigned short last_queue_full_count; unsigned long last_queue_full_time; unsigned long queue_ramp_up_period; unsigned long last_queue_ramp_up; unsigned int id; unsigned int channel; u64 lun; unsigned int manufacturer; unsigned int sector_size; void *hostdata; char type; char scsi_level; char inq_periph_qual; struct mutex inquiry_mutex; unsigned char inquiry_len; unsigned char *inquiry; const char *vendor; const char *model; const char *rev; int vpd_pg83_len; unsigned char *vpd_pg83; int vpd_pg80_len; unsigned char *vpd_pg80; unsigned char current_tag; struct scsi_target *sdev_target; unsigned int sdev_bflags; unsigned int eh_timeout; unsigned char removable; unsigned char changed; unsigned char busy; unsigned char lockable; unsigned char locked; unsigned char borken; unsigned char disconnect; unsigned char soft_reset; unsigned char sdtr; unsigned char wdtr; unsigned char ppr; unsigned char tagged_supported; unsigned char simple_tags; unsigned char was_reset; unsigned char expecting_cc_ua; unsigned char use_10_for_rw; unsigned char use_10_for_ms; unsigned char no_report_opcodes; unsigned char no_write_same; unsigned char use_16_for_rw; unsigned char skip_ms_page_8; unsigned char skip_ms_page_3f; unsigned char skip_vpd_pages; unsigned char try_vpd_pages; unsigned char use_192_bytes_for_3f; unsigned char no_start_on_add; unsigned char allow_restart; unsigned char manage_start_stop; unsigned char start_stop_pwr_cond; unsigned char no_uld_attach; unsigned char select_no_atn; unsigned char fix_capacity; unsigned char guess_capacity; unsigned char retry_hwerror; unsigned char last_sector_bug; unsigned char no_read_disc_info; unsigned char no_read_capacity_16; unsigned char try_rc_10_first; unsigned char is_visible; unsigned char wce_default_on; unsigned char no_dif; unsigned char broken_fua; unsigned char lun_in_cdb; unsigned char synchronous_alua; atomic_t disk_events_disable_depth; unsigned long supported_events[1U]; unsigned long pending_events[1U]; struct list_head event_list; struct work_struct event_work; unsigned int max_device_blocked; atomic_t iorequest_cnt; atomic_t iodone_cnt; atomic_t ioerr_cnt; struct device sdev_gendev; struct device sdev_dev; struct execute_work ew; struct work_struct requeue_work; struct scsi_device_handler *handler; void *handler_data; unsigned char access_state; enum scsi_device_state sdev_state; unsigned long sdev_data[0U]; } ; 237 enum scsi_target_state { STARGET_CREATED = 1, STARGET_RUNNING = 2, STARGET_REMOVE = 3, STARGET_DEL = 4 } ; 244 struct scsi_target { struct scsi_device *starget_sdev_user; struct list_head siblings; struct list_head devices; struct device dev; struct kref reap_ref; unsigned int channel; unsigned int id; unsigned char create; unsigned char single_lun; unsigned char pdt_1f_for_no_lun; unsigned char no_report_luns; unsigned char expecting_lun_change; atomic_t target_busy; atomic_t target_blocked; unsigned int can_queue; unsigned int max_target_blocked; char scsi_level; enum scsi_target_state state; void *hostdata; unsigned long starget_data[0U]; } ; 548 struct sbitmap_word { unsigned long word; unsigned long depth; } ; 40 struct sbitmap { unsigned int depth; unsigned int shift; unsigned int map_nr; struct sbitmap_word *map; } ; 402 struct blk_mq_tags ; 403 struct __anonstruct____missing_field_name_386 { spinlock_t lock; struct list_head dispatch; unsigned long state; } ; 403 struct blk_mq_hw_ctx { struct __anonstruct____missing_field_name_386 __annonCompField89; struct work_struct run_work; cpumask_var_t cpumask; int next_cpu; int next_cpu_batch; unsigned long flags; void *sched_data; struct request_queue *queue; struct blk_flush_queue *fq; void *driver_data; struct sbitmap ctx_map; struct blk_mq_ctx **ctxs; unsigned int nr_ctx; wait_queue_t dispatch_wait; atomic_t wait_index; struct blk_mq_tags *tags; struct blk_mq_tags *sched_tags; struct srcu_struct queue_rq_srcu; unsigned long queued; unsigned long run; unsigned long dispatched[7U]; unsigned int numa_node; unsigned int queue_num; atomic_t nr_active; struct delayed_work delay_work; struct hlist_node cpuhp_dead; struct kobject kobj; unsigned long poll_considered; unsigned long poll_invoked; unsigned long poll_success; } ; 63 struct blk_mq_tag_set { unsigned int *mq_map; const struct blk_mq_ops *ops; unsigned int nr_hw_queues; unsigned int queue_depth; unsigned int reserved_tags; unsigned int cmd_size; int numa_node; unsigned int timeout; unsigned int flags; void *driver_data; struct blk_mq_tags **tags; struct mutex tag_list_lock; struct list_head tag_list; } ; 81 struct blk_mq_queue_data { struct request *rq; struct list_head *list; bool last; } ; 88 typedef int queue_rq_fn(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *); 89 typedef enum blk_eh_timer_return timeout_fn(struct request *, bool ); 90 typedef int init_hctx_fn(struct blk_mq_hw_ctx *, void *, unsigned int); 91 typedef void exit_hctx_fn(struct blk_mq_hw_ctx *, unsigned int); 92 typedef int init_request_fn(void *, struct request *, unsigned int, unsigned int, unsigned int); 94 typedef void exit_request_fn(void *, struct request *, unsigned int, unsigned int); 96 typedef int reinit_request_fn(void *, struct request *); 101 typedef int poll_fn(struct blk_mq_hw_ctx *, unsigned int); 102 typedef int map_queues_fn(struct blk_mq_tag_set *); 103 struct blk_mq_ops { queue_rq_fn *queue_rq; timeout_fn *timeout; poll_fn *poll; softirq_done_fn *complete; init_hctx_fn *init_hctx; exit_hctx_fn *exit_hctx; init_request_fn *init_request; exit_request_fn *exit_request; reinit_request_fn *reinit_request; map_queues_fn *map_queues; } ; 268 struct scsi_request { unsigned char __cmd[16U]; unsigned char *cmd; unsigned short cmd_len; unsigned int sense_len; unsigned int resid_len; void *sense; } ; 30 struct scsi_data_buffer { struct sg_table table; unsigned int length; int resid; } ; 40 struct scsi_pointer { char *ptr; int this_residual; struct scatterlist *buffer; int buffers_residual; dma_addr_t dma_handle; volatile int Status; volatile int Message; volatile int have_data_in; volatile int sent_command; volatile int phase; } ; 56 struct scsi_cmnd { struct scsi_request req; struct scsi_device *device; struct list_head list; struct list_head eh_entry; struct delayed_work abort_work; int eh_eflags; unsigned long serial_number; unsigned long jiffies_at_alloc; int retries; int allowed; unsigned char prot_op; unsigned char prot_type; unsigned char prot_flags; unsigned short cmd_len; enum dma_data_direction sc_data_direction; unsigned char *cmnd; struct scsi_data_buffer sdb; struct scsi_data_buffer *prot_sdb; unsigned int underflow; unsigned int transfersize; struct request *request; unsigned char *sense_buffer; void (*scsi_done)(struct scsi_cmnd *); struct scsi_pointer SCp; unsigned char *host_scribble; int result; int flags; unsigned char tag; } ; 353 struct scsi_host_cmd_pool ; 354 struct scsi_transport_template ; 356 struct scsi_host_template { struct module *module; const char *name; int (*detect)(struct scsi_host_template *); int (*release)(struct Scsi_Host *); const char * (*info)(struct Scsi_Host *); int (*ioctl)(struct scsi_device *, int, void *); int (*compat_ioctl)(struct scsi_device *, int, void *); int (*queuecommand)(struct Scsi_Host *, struct scsi_cmnd *); int (*eh_abort_handler)(struct scsi_cmnd *); int (*eh_device_reset_handler)(struct scsi_cmnd *); int (*eh_target_reset_handler)(struct scsi_cmnd *); int (*eh_bus_reset_handler)(struct scsi_cmnd *); int (*eh_host_reset_handler)(struct scsi_cmnd *); int (*slave_alloc)(struct scsi_device *); int (*slave_configure)(struct scsi_device *); void (*slave_destroy)(struct scsi_device *); int (*target_alloc)(struct scsi_target *); void (*target_destroy)(struct scsi_target *); int (*scan_finished)(struct Scsi_Host *, unsigned long); void (*scan_start)(struct Scsi_Host *); int (*change_queue_depth)(struct scsi_device *, int); int (*map_queues)(struct Scsi_Host *); int (*bios_param)(struct scsi_device *, struct block_device *, sector_t , int *); void (*unlock_native_capacity)(struct scsi_device *); int (*show_info)(struct seq_file *, struct Scsi_Host *); int (*write_info)(struct Scsi_Host *, char *, int); enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *); int (*host_reset)(struct Scsi_Host *, int); const char *proc_name; struct proc_dir_entry *proc_dir; int can_queue; int this_id; unsigned short sg_tablesize; unsigned short sg_prot_tablesize; unsigned int max_sectors; unsigned long dma_boundary; short cmd_per_lun; unsigned char present; int tag_alloc_policy; unsigned char track_queue_depth; unsigned char supported_mode; unsigned char unchecked_isa_dma; unsigned char use_clustering; unsigned char emulated; unsigned char skip_settle_delay; unsigned char no_write_same; unsigned char no_async_abort; unsigned int max_host_blocked; struct device_attribute **shost_attrs; struct device_attribute **sdev_attrs; struct list_head legacy_hosts; u64 vendor_id; unsigned int cmd_size; struct scsi_host_cmd_pool *cmd_pool; } ; 507 enum scsi_host_state { SHOST_CREATED = 1, SHOST_RUNNING = 2, SHOST_CANCEL = 3, SHOST_DEL = 4, SHOST_RECOVERY = 5, SHOST_CANCEL_RECOVERY = 6, SHOST_DEL_RECOVERY = 7 } ; 517 union __anonunion____missing_field_name_387 { struct blk_queue_tag *bqt; struct blk_mq_tag_set tag_set; } ; 517 struct Scsi_Host { struct list_head __devices; struct list_head __targets; struct list_head starved_list; spinlock_t default_lock; spinlock_t *host_lock; struct mutex scan_mutex; struct list_head eh_cmd_q; struct task_struct *ehandler; struct completion *eh_action; wait_queue_head_t host_wait; struct scsi_host_template *hostt; struct scsi_transport_template *transportt; union __anonunion____missing_field_name_387 __annonCompField90; atomic_t host_busy; atomic_t host_blocked; unsigned int host_failed; unsigned int host_eh_scheduled; unsigned int host_no; int eh_deadline; unsigned long last_reset; unsigned int max_channel; unsigned int max_id; u64 max_lun; unsigned int unique_id; unsigned short max_cmd_len; int this_id; int can_queue; short cmd_per_lun; unsigned short sg_tablesize; unsigned short sg_prot_tablesize; unsigned int max_sectors; unsigned long dma_boundary; unsigned int nr_hw_queues; unsigned long cmd_serial_number; unsigned char active_mode; unsigned char unchecked_isa_dma; unsigned char use_clustering; unsigned char host_self_blocked; unsigned char reverse_ordering; unsigned char tmf_in_progress; unsigned char async_scan; unsigned char eh_noresume; unsigned char no_write_same; unsigned char use_blk_mq; unsigned char use_cmd_list; unsigned char short_inquiry; char work_q_name[20U]; struct workqueue_struct *work_q; struct workqueue_struct *tmf_work_q; unsigned char no_scsi2_lun_in_cdb; unsigned int max_host_blocked; unsigned int prot_capabilities; unsigned char prot_guard_type; struct request_queue *uspace_req_q; unsigned long base; unsigned long io_port; unsigned char n_io_port; unsigned char dma_channel; unsigned int irq; enum scsi_host_state shost_state; struct device shost_gendev; struct device shost_dev; struct list_head sht_legacy_list; void *shost_data; struct device *dma_dev; unsigned long hostdata[0U]; } ; 931 struct attribute_container { struct list_head node; struct klist containers; struct class *class; const struct attribute_group *grp; struct device_attribute **attrs; int (*match)(struct attribute_container *, struct device *); unsigned long flags; } ; 71 struct transport_container ; 43 struct transport_container { struct attribute_container ac; const struct attribute_group *statistics; } ; 100 struct scsi_transport_template { struct transport_container host_attrs; struct transport_container target_attrs; struct transport_container device_attrs; int (*user_scan)(struct Scsi_Host *, uint , uint , u64 ); int device_size; int device_private_offset; int target_size; int target_private_offset; int host_size; unsigned char create_work_queue; void (*eh_strategy_handler)(struct Scsi_Host *); } ; 200 enum mvumi_qc_result { MV_QUEUE_COMMAND_RESULT_SENT = 0, MV_QUEUE_COMMAND_RESULT_NO_RESOURCE = 1 } ; 205 struct mvumi_hw_regs { void *main_int_cause_reg; void *enpointa_mask_reg; void *enpointb_mask_reg; void *rstoutn_en_reg; void *ctrl_sts_reg; void *rstoutn_mask_reg; void *sys_soft_rst_reg; void *pciea_to_arm_drbl_reg; void *arm_to_pciea_drbl_reg; void *arm_to_pciea_mask_reg; void *pciea_to_arm_msg0; void *pciea_to_arm_msg1; void *arm_to_pciea_msg0; void *arm_to_pciea_msg1; void *reset_request; void *reset_enable; void *inb_list_basel; void *inb_list_baseh; void *inb_aval_count_basel; void *inb_aval_count_baseh; void *inb_write_pointer; void *inb_read_pointer; void *outb_list_basel; void *outb_list_baseh; void *outb_copy_basel; void *outb_copy_baseh; void *outb_copy_pointer; void *outb_read_pointer; void *inb_isr_cause; void *outb_isr_cause; void *outb_coal_cfg; void *outb_coal_timeout; u32 int_comaout; u32 int_comaerr; u32 int_dl_cpu2pciea; u32 int_mu; u32 int_drbl_int_mask; u32 int_main_int_mask; u32 cl_pointer_toggle; u32 cl_slot_num_mask; u32 clic_irq; u32 clic_in_err; u32 clic_out_err; } ; 104 struct mvumi_dyn_list_entry { u32 src_low_addr; u32 src_high_addr; u32 if_length; u32 reserve; } ; 126 struct mvumi_hotplug_event { u16 size; u8 dummy[2U]; u8 bitmap[0U]; } ; 151 struct mvumi_driver_event { u32 time_stamp; u32 sequence_no; u32 event_id; u8 severity; u8 param_count; u16 device_id; u32 params[4U]; u8 sense_data_length; u8 Reserved1; u8 sense_data[30U]; } ; 164 struct mvumi_event_req { unsigned char count; unsigned char reserved[3U]; struct mvumi_driver_event events[6U]; } ; 170 struct mvumi_hba ; 170 struct mvumi_events_wq { struct work_struct work_q; struct mvumi_hba *mhba; unsigned int event; void *param; } ; 177 struct mvumi_sgl { u32 baseaddr_l; u32 baseaddr_h; u32 flags; u32 size; } ; 193 struct mvumi_compact_sgl { u32 baseaddr_l; u32 baseaddr_h; u32 flags; } ; 198 struct mvumi_res { struct list_head entry; dma_addr_t bus_addr; void *virt_addr; unsigned int size; unsigned short type; } ; 234 enum resource_type { RESOURCE_CACHED_MEMORY = 0, RESOURCE_UNCACHED_MEMORY = 1 } ; 258 struct mvumi_msg_frame ; 258 struct mvumi_cmd { struct list_head queue_pointer; struct mvumi_msg_frame *frame; dma_addr_t frame_phys; struct scsi_cmnd *scmd; atomic_t sync_cmd; void *data_buf; unsigned short request_id; unsigned char cmd_status; } ; 272 struct mvumi_msg_frame { u16 device_id; u16 tag; u8 cmd_flag; u8 req_function; u8 cdb_length; u8 sg_counts; u32 data_transfer_length; u16 request_id; u16 reserved1; u8 cdb[16U]; u32 payload[1U]; } ; 291 struct mvumi_rsp_frame { u16 device_id; u16 tag; u8 req_status; u8 rsp_flag; u16 request_id; u32 payload[1U]; } ; 306 struct mvumi_ob_data { struct list_head list; unsigned char data[0U]; } ; 311 struct version_info { u32 ver_major; u32 ver_minor; u32 ver_oem; u32 ver_build; } ; 385 struct mvumi_hs_header { u8 page_code; u8 checksum; u16 frame_length; u32 frame_content[1U]; } ; 392 struct mvumi_hs_page1 { u8 pagecode; u8 checksum; u16 frame_length; u16 number_of_ports; u16 max_devices_support; u16 max_io_support; u16 umi_ver; u32 max_transfer_size; struct version_info fw_ver; u8 cl_in_max_entry_size; u8 cl_out_max_entry_size; u8 cl_inout_list_depth; u8 total_pages; u16 capability; u16 reserved1; } ; 431 struct mvumi_hs_page2 { u8 pagecode; u8 checksum; u16 frame_length; u8 host_type; u8 host_cap; u8 reserved[2U]; struct version_info host_ver; u32 system_io_bus; u32 slot_number; u32 intr_level; u32 intr_vector; u64 seconds_since1970; } ; 448 struct mvumi_hs_page3 { u8 pagecode; u8 checksum; u16 frame_length; u16 control; u8 reserved[2U]; u32 host_bufferaddr_l; u32 host_bufferaddr_h; u32 host_eventaddr_l; u32 host_eventaddr_h; } ; 461 struct mvumi_hs_page4 { u8 pagecode; u8 checksum; u16 frame_length; u32 ib_baseaddr_l; u32 ib_baseaddr_h; u32 ob_baseaddr_l; u32 ob_baseaddr_h; u8 ib_entry_size; u8 ob_entry_size; u8 ob_depth; u8 ib_depth; } ; 475 struct mvumi_tag { unsigned short *stack; unsigned short top; unsigned short size; } ; 481 struct mvumi_device { struct list_head list; struct scsi_device *sdev; u64 wwid; u8 dev_type; int id; } ; 489 struct mvumi_instance_template ; 489 struct mvumi_hba { void *base_addr[6U]; u32 pci_base[6U]; void *mmio; struct list_head cmd_pool; struct Scsi_Host *shost; wait_queue_head_t int_cmd_wait_q; struct pci_dev *pdev; unsigned int unique_id; atomic_t fw_outstanding; struct mvumi_instance_template *instancet; void *ib_list; dma_addr_t ib_list_phys; void *ib_frame; dma_addr_t ib_frame_phys; void *ob_list; dma_addr_t ob_list_phys; void *ib_shadow; dma_addr_t ib_shadow_phys; void *ob_shadow; dma_addr_t ob_shadow_phys; void *handshake_page; dma_addr_t handshake_page_phys; unsigned int global_isr; unsigned int isr_status; unsigned short max_sge; unsigned short max_target_id; unsigned char *target_map; unsigned int max_io; unsigned int list_num_io; unsigned int ib_max_size; unsigned int ob_max_size; unsigned int ib_max_size_setting; unsigned int ob_max_size_setting; unsigned int max_transfer_size; unsigned char hba_total_pages; unsigned char fw_flag; unsigned char request_id_enabled; unsigned char eot_flag; unsigned short hba_capability; unsigned short io_seq; unsigned int ib_cur_slot; unsigned int ob_cur_slot; unsigned int fw_state; struct mutex sas_discovery_mutex; struct list_head ob_data_list; struct list_head free_ob_list; struct list_head res_list; struct list_head waiting_req_list; struct mvumi_tag tag_pool; struct mvumi_cmd **tag_cmd; struct mvumi_hw_regs *regs; struct mutex device_lock; struct list_head mhba_dev_list; struct list_head shost_dev_list; struct task_struct *dm_thread; atomic_t pnp_count; } ; 559 struct mvumi_instance_template { void (*fire_cmd)(struct mvumi_hba *, struct mvumi_cmd *); void (*enable_intr)(struct mvumi_hba *); void (*disable_intr)(struct mvumi_hba *); int (*clear_intr)(void *); unsigned int (*read_fw_status_reg)(struct mvumi_hba *); unsigned int (*check_ib_list)(struct mvumi_hba *); int (*check_ob_list)(struct mvumi_hba *, unsigned int *, unsigned int *); int (*reset_host)(struct mvumi_hba *); } ; 1 void * __builtin_memcpy(void *, const void *, unsigned long); 1 long int __builtin_expect(long, long); 252 void __read_once_size(const volatile void *p, void *res, int size); 277 void __write_once_size(volatile void *p, void *res, int size); 34 extern struct module __this_module; 28 unsigned long int find_next_zero_bit(const unsigned long *, unsigned long, unsigned long); 42 unsigned long int find_first_bit(const unsigned long *, unsigned long); 172 int printk(const char *, ...); 63 void __dynamic_dev_dbg(struct _ddebug *, const struct device *, const char *, ...); 191 void __might_sleep(const char *, int, int); 3 bool ldv_is_err(const void *ptr); 8 void ldv_dma_map_page(); 25 void INIT_LIST_HEAD(struct list_head *list); 32 bool __list_add_valid(struct list_head *, struct list_head *, struct list_head *); 35 bool __list_del_entry_valid(struct list_head *); 55 void __list_add(struct list_head *new, struct list_head *prev, struct list_head *next); 76 void list_add(struct list_head *new, struct list_head *head); 90 void list_add_tail(struct list_head *new, struct list_head *head); 102 void __list_del(struct list_head *prev, struct list_head *next); 114 void __list_del_entry(struct list_head *entry); 122 void list_del(struct list_head *entry); 156 void list_del_init(struct list_head *entry); 178 void list_move_tail(struct list_head *list, struct list_head *head); 200 int list_empty(const struct list_head *head); 87 void __bad_percpu_size(); 10 extern struct task_struct *current_task; 12 struct task_struct * get_current(); 7 extern unsigned long page_offset_base; 9 extern unsigned long vmemmap_base; 23 unsigned long int __phys_addr(unsigned long); 12 void __xchg_wrong_size(); 24 int atomic_read(const atomic_t *v); 36 void atomic_set(atomic_t *v, int i); 89 void atomic_inc(atomic_t *v); 101 void atomic_dec(atomic_t *v); 32 void * __memcpy(void *, const void *, size_t ); 57 void * __memset(void *, int, size_t ); 41 bool IS_ERR(const void *ptr); 281 void lockdep_init_map(struct lockdep_map *, const char *, struct lock_class_key *, int); 32 unsigned long int _raw_spin_lock_irqsave(raw_spinlock_t *); 43 void _raw_spin_unlock_irqrestore(raw_spinlock_t *, unsigned long); 286 raw_spinlock_t * spinlock_check(spinlock_t *lock); 352 void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags); 9 extern struct timezone sys_tz; 73 void __init_waitqueue_head(wait_queue_head_t *, const char *, struct lock_class_key *); 201 void __wake_up(wait_queue_head_t *, unsigned int, int, void *); 252 void init_wait_entry(wait_queue_t *, int); 983 long int prepare_to_wait_event(wait_queue_head_t *, wait_queue_t *, int); 984 void finish_wait(wait_queue_head_t *, wait_queue_t *); 133 void __mutex_init(struct mutex *, const char *, struct lock_class_key *); 155 void mutex_lock_nested(struct mutex *, unsigned int); 195 void mutex_unlock(struct mutex *); 78 extern volatile unsigned long jiffies; 59 time64_t ktime_get_real_seconds(); 193 void __init_work(struct work_struct *, int); 365 extern struct workqueue_struct *system_wq; 443 bool queue_work_on(int, struct workqueue_struct *, struct work_struct *); 484 bool queue_work(struct workqueue_struct *wq, struct work_struct *work); 543 bool schedule_work(struct work_struct *work); 31 unsigned int ioread32(void *); 41 void iowrite32(u32 , void *); 84 void pci_iounmap(struct pci_dev *, void *); 17 void * pci_iomap(struct pci_dev *, int, unsigned long); 175 long int schedule_timeout(long); 180 void schedule(); 1386 int wake_up_process(struct task_struct *); 1026 void * dev_get_drvdata(const struct device *dev); 1031 void dev_set_drvdata(struct device *dev, void *data); 1261 void dev_err(const struct device *, const char *, ...); 1263 void dev_warn(const struct device *, const char *, ...); 154 void kfree(const void *); 330 void * __kmalloc(size_t , gfp_t ); 478 void * kmalloc(size_t size, gfp_t flags); 661 void * kzalloc(size_t size, gfp_t flags); 923 int pci_bus_read_config_dword(struct pci_bus *, unsigned int, int, u32 *); 929 int pci_bus_write_config_dword(struct pci_bus *, unsigned int, int, u32 ); 951 int pci_read_config_dword(const struct pci_dev *dev, int where, u32 *val); 964 int pci_write_config_dword(const struct pci_dev *dev, int where, u32 val); 1011 int pci_enable_device(struct pci_dev *); 1028 void pci_disable_device(struct pci_dev *); 1031 void pci_set_master(struct pci_dev *); 1084 int pci_save_state(struct pci_dev *); 1085 void pci_restore_state(struct pci_dev *); 1098 int pci_set_power_state(struct pci_dev *, pci_power_t ); 1099 pci_power_t pci_choose_state(struct pci_dev *, pm_message_t ); 1102 int __pci_enable_wake(struct pci_dev *, pci_power_t , bool , bool ); 1113 int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable); 1157 int pci_request_regions(struct pci_dev *, const char *); 1159 void pci_release_regions(struct pci_dev *); 1212 int __pci_register_driver(struct pci_driver *, struct module *, const char *); 1221 void pci_unregister_driver(struct pci_driver *); 1020 void * lowmem_page_address(const struct page *page); 120 struct page * sg_page(struct scatterlist *sg); 239 void * sg_virt(struct scatterlist *sg); 246 struct scatterlist * sg_next(struct scatterlist *); 1650 void * pci_get_drvdata(struct pci_dev *pdev); 1655 void pci_set_drvdata(struct pci_dev *pdev, void *data); 37 void debug_dma_map_page(struct device *, struct page *, size_t , size_t , int, dma_addr_t , bool ); 44 void debug_dma_unmap_page(struct device *, dma_addr_t , size_t , int, bool ); 47 void debug_dma_map_sg(struct device *, struct scatterlist *, int, int, int); 50 void debug_dma_unmap_sg(struct device *, struct scatterlist *, int, int); 53 void debug_dma_alloc_coherent(struct device *, size_t , dma_addr_t , void *); 131 void kmemcheck_mark_initialized(void *address, unsigned int n); 144 int valid_dma_direction(int dma_direction); 28 extern const struct dma_map_ops *dma_ops; 30 const struct dma_map_ops * get_arch_dma_ops(struct bus_type *bus); 35 bool arch_dma_alloc_attrs(struct device **, gfp_t *); 39 int dma_supported(struct device *, u64 ); 175 const struct dma_map_ops * get_dma_ops(struct device *dev); 200 dma_addr_t ldv_dma_map_single_attrs_5(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, unsigned long attrs); 200 dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, unsigned long attrs); 223 void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, unsigned long attrs); 240 int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs); 258 void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs); 476 void * dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs); 517 void * dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag); 523 void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle); 575 int dma_set_mask(struct device *dev, u64 mask); 680 void * dma_zalloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag); 16 void * pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle); 23 void * pci_zalloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle); 31 void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle); 38 dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction); 44 void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction); 65 int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction); 72 void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction); 113 int pci_set_dma_mask(struct pci_dev *dev, u64 mask); 139 int request_threaded_irq(unsigned int, irqreturn_t (*)(int, void *), irqreturn_t (*)(int, void *), unsigned long, const char *, void *); 144 int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *), unsigned long flags, const char *name, void *dev); 158 void free_irq(unsigned int, void *); 10 void __const_udelay(unsigned long); 57 void msleep(unsigned int); 59 void usleep_range(unsigned long, unsigned long); 64 void scsi_build_sense_buffer(int, u8 *, u8 , u8 , u8 ); 236 void scmd_printk(const char *, const struct scsi_cmnd *, const char *, ...); 311 int scsi_add_device(struct Scsi_Host *, uint , uint , u64 ); 314 void scsi_remove_device(struct scsi_device *); 320 void scsi_device_put(struct scsi_device *); 321 struct scsi_device * scsi_device_lookup(struct Scsi_Host *, uint , uint , u64 ); 173 unsigned int scsi_sg_count(struct scsi_cmnd *cmd); 178 struct scatterlist * scsi_sglist(struct scsi_cmnd *cmd); 183 unsigned int scsi_bufflen(struct scsi_cmnd *cmd); 754 void * shost_priv(struct Scsi_Host *shost); 787 struct Scsi_Host * scsi_host_alloc(struct scsi_host_template *, int); 788 int scsi_add_host_with_dma(struct Scsi_Host *, struct device *, struct device *); 792 void scsi_rescan_device(struct device *); 793 void scsi_remove_host(struct Scsi_Host *); 795 void scsi_host_put(struct Scsi_Host *); 798 void scsi_cmd_get_serial(struct Scsi_Host *, struct scsi_cmnd *); 800 int scsi_add_host(struct Scsi_Host *host, struct device *dev); 8 struct task_struct * kthread_create_on_node(int (*)(void *), void *, int, const char *, ...); 54 int kthread_stop(struct task_struct *); 55 bool kthread_should_stop(); 53 const struct pci_device_id mvumi_pci_table[3U] = { { 6987U, 37187U, 4294967295U, 4294967295U, 0U, 0U, 0UL }, { 6987U, 38272U, 4294967295U, 4294967295U, 0U, 0U, 0UL }, { 0U, 0U, 0U, 0U, 0U, 0U, 0UL } }; 59 const struct pci_device_id __mod_pci__mvumi_pci_table_device_table[3U] = { }; 61 void tag_init(struct mvumi_tag *st, unsigned short size); 70 unsigned short int tag_get_one(struct mvumi_hba *mhba, struct mvumi_tag *st); 76 void tag_release_one(struct mvumi_hba *mhba, struct mvumi_tag *st, unsigned short tag); 83 bool tag_is_empty(struct mvumi_tag *st); 91 void mvumi_unmap_pci_addr(struct pci_dev *dev, void **addr_array); 101 int mvumi_map_pci_addr(struct pci_dev *dev, void **addr_array); 123 struct mvumi_res * mvumi_alloc_mem_resource(struct mvumi_hba *mhba, enum resource_type type, unsigned int size); 172 void mvumi_release_mem_resource(struct mvumi_hba *mhba); 205 int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd, void *sgl_p, unsigned char *sg_count); 252 int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd, unsigned int size); 278 struct mvumi_cmd * mvumi_create_internal_cmd(struct mvumi_hba *mhba, unsigned int buf_size); 314 void mvumi_delete_internal_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd); 344 struct mvumi_cmd * mvumi_get_cmd(struct mvumi_hba *mhba); 363 void mvumi_return_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd); 374 void mvumi_free_cmds(struct mvumi_hba *mhba); 393 int mvumi_alloc_cmds(struct mvumi_hba *mhba); 430 unsigned int mvumi_check_ib_list_9143(struct mvumi_hba *mhba); 452 unsigned int mvumi_check_ib_list_9580(struct mvumi_hba *mhba); 463 void mvumi_get_ib_list_entry(struct mvumi_hba *mhba, void **ib_entry); 484 void mvumi_send_ib_list_entry(struct mvumi_hba *mhba); 490 char mvumi_check_ob_frame(struct mvumi_hba *mhba, unsigned int cur_obf, struct mvumi_rsp_frame *p_outb_frame); 517 int mvumi_check_ob_list_9143(struct mvumi_hba *mhba, unsigned int *cur_obf, unsigned int *assign_obf_end); 538 int mvumi_check_ob_list_9580(struct mvumi_hba *mhba, unsigned int *cur_obf, unsigned int *assign_obf_end); 555 void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba); 609 void mvumi_reset(struct mvumi_hba *mhba); 620 unsigned char mvumi_start(struct mvumi_hba *mhba); 622 int mvumi_wait_for_outstanding(struct mvumi_hba *mhba); 633 int mvumi_wait_for_fw(struct mvumi_hba *mhba); 658 void mvumi_backup_bar_addr(struct mvumi_hba *mhba); 668 void mvumi_restore_bar_addr(struct mvumi_hba *mhba); 679 unsigned int mvumi_pci_set_master(struct pci_dev *pdev); 693 int mvumi_reset_host_9580(struct mvumi_hba *mhba); 720 int mvumi_reset_host_9143(struct mvumi_hba *mhba); 725 int mvumi_host_reset(struct scsi_cmnd *scmd); 737 int mvumi_issue_blocked_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd); 782 void mvumi_release_fw(struct mvumi_hba *mhba); 793 unsigned char mvumi_flush_cache(struct mvumi_hba *mhba); 842 unsigned char mvumi_calculate_checksum(struct mvumi_hs_header *p_header, unsigned short len); 857 void mvumi_hs_build_page(struct mvumi_hba *mhba, struct mvumi_hs_header *hs_header); 935 int mvumi_init_data(struct mvumi_hba *mhba); 1064 int mvumi_hs_process_page(struct mvumi_hba *mhba, struct mvumi_hs_header *hs_header); 1118 int mvumi_handshake(struct mvumi_hba *mhba); 1237 unsigned char mvumi_handshake_event(struct mvumi_hba *mhba); 1265 unsigned char mvumi_check_handshake(struct mvumi_hba *mhba); 1326 void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd, struct mvumi_rsp_frame *ob_frame); 1373 void mvumi_complete_internal_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd, struct mvumi_rsp_frame *ob_frame); 1391 void mvumi_show_event(struct mvumi_hba *mhba, struct mvumi_driver_event *ptr); 1417 int mvumi_handle_hotplug(struct mvumi_hba *mhba, u16 devid, int status); 1449 u64 mvumi_inquiry(struct mvumi_hba *mhba, unsigned int id, struct mvumi_cmd *cmd); 1499 void mvumi_detach_devices(struct mvumi_hba *mhba); 1535 void mvumi_rescan_devices(struct mvumi_hba *mhba, int id); 1546 int mvumi_match_devices(struct mvumi_hba *mhba, int id, u64 wwid); 1569 void mvumi_remove_devices(struct mvumi_hba *mhba, int id); 1586 int mvumi_probe_devices(struct mvumi_hba *mhba); 1643 int mvumi_rescan_bus(void *data); 1683 void mvumi_proc_msg(struct mvumi_hba *mhba, struct mvumi_hotplug_event *param); 1715 void mvumi_notification(struct mvumi_hba *mhba, u8 msg, void *buffer); 1737 int mvumi_get_event(struct mvumi_hba *mhba, unsigned char msg); 1769 void mvumi_scan_events(struct work_struct *work); 1778 void mvumi_launch_events(struct mvumi_hba *mhba, u32 isr_status); 1802 void mvumi_handle_clob(struct mvumi_hba *mhba); 1828 irqreturn_t mvumi_isr_handler(int irq, void *devp); 1860 enum mvumi_qc_result mvumi_send_command(struct mvumi_hba *mhba, struct mvumi_cmd *cmd); 1898 void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd); 1936 void mvumi_enable_intr(struct mvumi_hba *mhba); 1951 void mvumi_disable_intr(struct mvumi_hba *mhba); 1963 int mvumi_clear_intr(void *extend); 2009 unsigned int mvumi_read_fw_status_reg(struct mvumi_hba *mhba); 2019 struct mvumi_instance_template mvumi_instance_9143 = { &mvumi_fire_cmd, &mvumi_enable_intr, &mvumi_disable_intr, &mvumi_clear_intr, &mvumi_read_fw_status_reg, &mvumi_check_ib_list_9143, &mvumi_check_ob_list_9143, &mvumi_reset_host_9143 }; 2030 struct mvumi_instance_template mvumi_instance_9580 = { &mvumi_fire_cmd, &mvumi_enable_intr, &mvumi_disable_intr, &mvumi_clear_intr, &mvumi_read_fw_status_reg, &mvumi_check_ib_list_9580, &mvumi_check_ob_list_9580, &mvumi_reset_host_9580 }; 2041 int mvumi_slave_configure(struct scsi_device *sdev); 2063 unsigned char mvumi_build_frame(struct mvumi_hba *mhba, struct scsi_cmnd *scmd, struct mvumi_cmd *cmd); 2120 int mvumi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd); 2154 enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd); 2196 int mvumi_bios_param(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int *geom); 2223 struct scsi_host_template mvumi_template = { &__this_module, "Marvell Storage Controller", 0, 0, 0, 0, 0, &mvumi_queue_command, 0, 0, 0, 0, &mvumi_host_reset, 0, &mvumi_slave_configure, 0, 0, 0, 0, 0, 0, 0, &mvumi_bios_param, 0, 0, 0, &mvumi_timed_out, 0, 0, 0, 0, -1, (unsigned short)0, (unsigned short)0, 0U, 0UL, (short)0, 0U, 0, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 0, 0, { 0, 0 }, 0ULL, 0U, 0 }; 2235 int mvumi_cfg_hw_reg(struct mvumi_hba *mhba); 2359 int mvumi_init_fw(struct mvumi_hba *mhba); 2436 int mvumi_io_attach(struct mvumi_hba *mhba); 2503 int mvumi_probe_one(struct pci_dev *pdev, const struct pci_device_id *id); 2594 void mvumi_detach_one(struct pci_dev *pdev); 2622 void mvumi_shutdown(struct pci_dev *pdev); 2629 int mvumi_suspend(struct pci_dev *pdev, pm_message_t state); 2648 int mvumi_resume(struct pci_dev *pdev); 2717 struct pci_driver mvumi_pci_driver = { { 0, 0 }, "mvumi", (const struct pci_device_id *)(&mvumi_pci_table), &mvumi_probe_one, &mvumi_detach_one, &mvumi_suspend, 0, 0, &mvumi_resume, &mvumi_shutdown, 0, 0, { 0, 0, 0, 0, (_Bool)0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { { { { { { 0 } }, 0U, 0U, 0, { 0, { 0, 0 }, 0, 0, 0UL } } } }, { 0, 0 } } }; 2733 int mvumi_init(); 2741 void mvumi_exit(); 2766 void ldv_check_final_state(); 2769 void ldv_check_return_value(int); 2772 void ldv_check_return_value_probe(int); 2775 void ldv_initialize(); 2778 void ldv_handler_precall(); 2781 int nondet_int(); 2784 int LDV_IN_INTERRUPT = 0; 2787 void ldv_main0_sequence_infinite_withcheck_stateful(); 10 void ldv_error(); 14 void * ldv_err_ptr(long error); 21 long int ldv_ptr_err(const void *ptr); 28 bool ldv_is_err_or_null(const void *ptr); 5 int LDV_DMA_MAP_CALLS = 0; 16 void ldv_dma_mapping_error(); return ; } { 2789 struct mvumi_hba *var_group1; 2790 struct mvumi_cmd *var_group2; 2791 void *var_mvumi_clear_intr_66_p0; 2792 unsigned int *var_mvumi_check_ob_list_9143_21_p1; 2793 unsigned int *var_mvumi_check_ob_list_9143_21_p2; 2794 unsigned int *var_mvumi_check_ob_list_9580_22_p1; 2795 unsigned int *var_mvumi_check_ob_list_9580_22_p2; 2796 struct scsi_device *var_group3; 2797 struct Scsi_Host *var_group4; 2798 struct scsi_cmnd *var_group5; 2799 struct pci_dev *var_group6; 2800 const struct pci_device_id *var_mvumi_probe_one_75_p1; 2801 int res_mvumi_probe_one_75; 2802 struct pm_message var_mvumi_suspend_78_p1; 2803 int var_mvumi_isr_handler_61_p0; 2804 void *var_mvumi_isr_handler_61_p1; 2805 int ldv_s_mvumi_pci_driver_pci_driver; 2806 int tmp; 2807 int tmp___0; 2808 int tmp___1; 3024 ldv_s_mvumi_pci_driver_pci_driver = 0; 2997 LDV_IN_INTERRUPT = 1; 3006 ldv_initialize() { /* Function call is skipped due to function is undefined */} 3015 ldv_handler_precall() { /* Function call is skipped due to function is undefined */} { 2735 int tmp; 2735 tmp = __pci_register_driver(&mvumi_pci_driver, &__this_module, "mvumi") { /* Function call is skipped due to function is undefined */} } 3030 goto ldv_40793; 3030 tmp___1 = nondet_int() { /* Function call is skipped due to function is undefined */} 3033 goto ldv_40792; 3031 ldv_40792:; 3034 tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */} 3034 switch (tmp___0); 3401 ldv_handler_precall() { /* Function call is skipped due to function is undefined */} { 2122 struct mvumi_cmd *cmd; 2123 struct mvumi_hba *mhba; 2124 unsigned long irq_flags; 2125 raw_spinlock_t *tmp; 2126 long tmp___0; 2127 unsigned char tmp___1; 2128 long tmp___2; { 288 return &(lock->__annonCompField20.rlock);; } 2127 irq_flags = _raw_spin_lock_irqsave(tmp) { /* Function call is skipped due to function is undefined */} 2128 scsi_cmd_get_serial(shost, scmd) { /* Function call is skipped due to function is undefined */} 2130 mhba = (struct mvumi_hba *)(&(shost->hostdata)); 2131 scmd->result = 0; { 346 struct mvumi_cmd *cmd; 347 const struct list_head *__mptr; 348 int tmp; 349 long tmp___0; 346 cmd = (struct mvumi_cmd *)0; { 202 union __anonunion___u_15 __u; { 254 switch (size); 255 assume(!(size == 1)); 255 assume(!(size == 2)); 255 assume(!(size == 4)); 255 assume(size == 8); 254 *((__u64 *)res) = *((volatile __u64 *)p); 254 goto ldv_883; 256 return ;; } 202 return ((unsigned long)((const struct list_head *)(__u.__val))) == ((unsigned long)head);; } 348 tmp___0 = __builtin_expect(tmp == 0, 1L) { /* Function call is skipped due to function is undefined */} 353 dev_warn((const struct device *)(&(mhba->pdev->dev)), "command pool is empty!\n") { /* Function call is skipped due to function is undefined */} } 2133 tmp___0 = __builtin_expect(((unsigned long)cmd) == ((unsigned long)((struct mvumi_cmd *)0)), 0L) { /* Function call is skipped due to function is undefined */} { 2066 struct mvumi_msg_frame *pframe; 2067 int tmp; 2068 unsigned int tmp___0; 2068 cmd->scmd = scmd; 2069 cmd->cmd_status = 128U; 2070 pframe = cmd->frame; 2071 short __CPAchecker_TMP_0 = (short)(scmd->device->id); 2071 unsigned short __CPAchecker_TMP_1 = (unsigned short)(scmd->device->lun); 2071 pframe->device_id = (u16 )(((int)__CPAchecker_TMP_0) | ((int)((short)(((int)__CPAchecker_TMP_1) << 8)))); 2073 pframe->cmd_flag = 0U; 2075 unsigned int __CPAchecker_TMP_2 = (unsigned int)(scmd->sc_data_direction); 2075 switch (__CPAchecker_TMP_2); 2083 unsigned int __CPAchecker_TMP_5 = (unsigned int)(pframe->cmd_flag); 2083 pframe->cmd_flag = (u8 )(__CPAchecker_TMP_5 | 16U); 2084 goto ldv_40570; 2091 ldv_40570:; 2092 u8 __CPAchecker_TMP_7 = (u8 )(scmd->cmd_len); 2092 pframe->cdb_length = __CPAchecker_TMP_7; 2093 const void *__CPAchecker_TMP_8 = (const void *)(scmd->cmnd); 2093 size_t __CPAchecker_TMP_9 = (size_t )(pframe->cdb_length); 2093 __memcpy((void *)(&(pframe->cdb)), __CPAchecker_TMP_8, __CPAchecker_TMP_9) { /* Function call is skipped due to function is undefined */} 2094 pframe->req_function = 1U; { 185 return cmd->sdb.length;; } { 208 struct scatterlist *sg; 209 struct mvumi_sgl *m_sg; 210 unsigned int i; 211 unsigned int sgnum; 212 unsigned int tmp; 213 unsigned long long busaddr; 214 int tmp___0; 215 unsigned int tmp___2; 216 struct scatterlist *tmp___3; 217 unsigned long long tmp___4; 218 unsigned int tmp___5; 219 unsigned int tmp___6; 209 m_sg = (struct mvumi_sgl *)sgl_p; { 175 return cmd->sdb.table.nents;; } 211 sgnum = tmp; { 185 return cmd->sdb.length;; } { 185 return cmd->sdb.length;; } { 180 return cmd->sdb.table.sgl;; } 236 int __CPAchecker_TMP_6 = (int)(scmd->sc_data_direction); { 41 unsigned long long tmp; 40 struct device *__CPAchecker_TMP_0; 40 assume(((unsigned long)hwdev) != ((unsigned long)((struct pci_dev *)0))); 40 __CPAchecker_TMP_0 = &(hwdev->dev); { 38 unsigned long long tmp; { } 204 const struct dma_map_ops *ops; 205 const struct dma_map_ops *tmp; 206 unsigned long long addr; 207 int tmp___0; 208 long tmp___1; 209 unsigned long tmp___2; 210 unsigned long tmp___3; { 177 const struct dma_map_ops *tmp; 177 assume(((unsigned long)dev) != ((unsigned long)((struct device *)0))); 177 unsigned long __CPAchecker_TMP_0 = (unsigned long)(dev->dma_ops); 177 assume(!(__CPAchecker_TMP_0 != ((unsigned long)((const struct dma_map_ops *)0)))); 179 struct bus_type *__CPAchecker_TMP_1; 179 assume(((unsigned long)dev) != ((unsigned long)((struct device *)0))); 179 __CPAchecker_TMP_1 = dev->bus; { 32 return dma_ops;; } 179 return tmp;; } 205 ops = tmp; { 133 return ;; } { 146 int __CPAchecker_TMP_0; 146 assume(!(dma_direction == 0)); 146 assume(dma_direction == 1); __CPAchecker_TMP_0 = 1; 146 return __CPAchecker_TMP_0;; } 209 tmp___1 = __builtin_expect(tmp___0 == 0, 0L) { /* Function call is skipped due to function is undefined */} 209 assume(!(tmp___1 != 0L)); 210 tmp___2 = __phys_addr((unsigned long)ptr) { /* Function call is skipped due to function is undefined */} 210 addr = (*(ops->map_page))(dev, (struct page *)((tmp___2 >> 12) + vmemmap_base), ((unsigned long)ptr) & 4095UL, size, dir, attrs); 213 tmp___3 = __phys_addr((unsigned long)ptr) { /* Function call is skipped due to function is undefined */} 213 debug_dma_map_page(dev, (struct page *)((tmp___3 >> 12) + vmemmap_base), ((unsigned long)ptr) & 4095UL, size, (int)dir, addr, 1) { /* Function call is skipped due to function is undefined */} 216 return addr;; } 40 return tmp;; } 236 scmd->SCp.dma_handle = tmp___4; 241 busaddr = scmd->SCp.dma_handle; 242 m_sg->baseaddr_l = (unsigned int)busaddr; 243 m_sg->baseaddr_h = (unsigned int)(busaddr >> 32ULL); 244 int __CPAchecker_TMP_7 = (int)(mhba->eot_flag); 244 m_sg->flags = 1U << __CPAchecker_TMP_7; 245 unsigned int __CPAchecker_TMP_8 = (unsigned int)(mhba->hba_capability); { 185 return cmd->sdb.length;; } 246 *sg_count = 1U; } { } 185 return cmd->sdb.length;; } 2138 tmp___2 = __builtin_expect(((unsigned int)tmp___1) != 0U, 0L) { /* Function call is skipped due to function is undefined */} 2141 cmd->scmd = scmd; 2142 scmd->SCp.ptr = (char *)cmd; 2143 (*(mhba->instancet->fire_cmd))(mhba, cmd); { } 354 _raw_spin_unlock_irqrestore(&(lock->__annonCompField20.rlock), flags) { /* Function call is skipped due to function is undefined */} 355 return ;; } 3413 goto ldv_40764; 3587 ldv_40764:; 3588 ldv_40793:; 3030 tmp___1 = nondet_int() { /* Function call is skipped due to function is undefined */} 3033 goto ldv_40792; 3031 ldv_40792:; 3034 tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */} 3034 switch (tmp___0); 3401 ldv_handler_precall() { /* Function call is skipped due to function is undefined */} { } 2122 struct mvumi_cmd *cmd; 2123 struct mvumi_hba *mhba; 2124 unsigned long irq_flags; 2125 raw_spinlock_t *tmp; 2126 long tmp___0; 2127 unsigned char tmp___1; 2128 long tmp___2; { 288 return &(lock->__annonCompField20.rlock);; } 2127 irq_flags = _raw_spin_lock_irqsave(tmp) { /* Function call is skipped due to function is undefined */} 2128 scsi_cmd_get_serial(shost, scmd) { /* Function call is skipped due to function is undefined */} 2130 mhba = (struct mvumi_hba *)(&(shost->hostdata)); 2131 scmd->result = 0; { 346 struct mvumi_cmd *cmd; 347 const struct list_head *__mptr; 348 int tmp; 349 long tmp___0; 346 cmd = (struct mvumi_cmd *)0; { 202 union __anonunion___u_15 __u; { 254 switch (size); 255 assume(!(size == 1)); 255 assume(!(size == 2)); 255 assume(!(size == 4)); 255 assume(size == 8); 254 *((__u64 *)res) = *((volatile __u64 *)p); 254 goto ldv_883; 256 return ;; } 202 return ((unsigned long)((const struct list_head *)(__u.__val))) == ((unsigned long)head);; } 348 tmp___0 = __builtin_expect(tmp == 0, 1L) { /* Function call is skipped due to function is undefined */} 353 dev_warn((const struct device *)(&(mhba->pdev->dev)), "command pool is empty!\n") { /* Function call is skipped due to function is undefined */} } 2133 tmp___0 = __builtin_expect(((unsigned long)cmd) == ((unsigned long)((struct mvumi_cmd *)0)), 0L) { /* Function call is skipped due to function is undefined */} { } 2066 struct mvumi_msg_frame *pframe; 2067 int tmp; 2068 unsigned int tmp___0; 2068 cmd->scmd = scmd; 2069 cmd->cmd_status = 128U; 2070 pframe = cmd->frame; 2071 short __CPAchecker_TMP_0 = (short)(scmd->device->id); 2071 unsigned short __CPAchecker_TMP_1 = (unsigned short)(scmd->device->lun); 2071 pframe->device_id = (u16 )(((int)__CPAchecker_TMP_0) | ((int)((short)(((int)__CPAchecker_TMP_1) << 8)))); 2073 pframe->cmd_flag = 0U; 2075 unsigned int __CPAchecker_TMP_2 = (unsigned int)(scmd->sc_data_direction); 2075 switch (__CPAchecker_TMP_2); 2083 unsigned int __CPAchecker_TMP_5 = (unsigned int)(pframe->cmd_flag); 2083 pframe->cmd_flag = (u8 )(__CPAchecker_TMP_5 | 16U); 2084 goto ldv_40570; 2091 ldv_40570:; 2092 u8 __CPAchecker_TMP_7 = (u8 )(scmd->cmd_len); 2092 pframe->cdb_length = __CPAchecker_TMP_7; 2093 const void *__CPAchecker_TMP_8 = (const void *)(scmd->cmnd); 2093 size_t __CPAchecker_TMP_9 = (size_t )(pframe->cdb_length); 2093 __memcpy((void *)(&(pframe->cdb)), __CPAchecker_TMP_8, __CPAchecker_TMP_9) { /* Function call is skipped due to function is undefined */} 2094 pframe->req_function = 1U; { 185 return cmd->sdb.length;; } { } 208 struct scatterlist *sg; 209 struct mvumi_sgl *m_sg; 210 unsigned int i; 211 unsigned int sgnum; 212 unsigned int tmp; 213 unsigned long long busaddr; 214 int tmp___0; 215 unsigned int tmp___2; 216 struct scatterlist *tmp___3; 217 unsigned long long tmp___4; 218 unsigned int tmp___5; 219 unsigned int tmp___6; 209 m_sg = (struct mvumi_sgl *)sgl_p; { 175 return cmd->sdb.table.nents;; } 211 sgnum = tmp; { 185 return cmd->sdb.length;; } { 185 return cmd->sdb.length;; } { 180 return cmd->sdb.table.sgl;; } 236 int __CPAchecker_TMP_6 = (int)(scmd->sc_data_direction); { } 41 unsigned long long tmp; 40 struct device *__CPAchecker_TMP_0; 40 assume(((unsigned long)hwdev) != ((unsigned long)((struct pci_dev *)0))); 40 __CPAchecker_TMP_0 = &(hwdev->dev); } | Source code
1 #ifndef _ASM_X86_DMA_MAPPING_H
2 #define _ASM_X86_DMA_MAPPING_H
3
4 /*
5 * IOMMU interface. See Documentation/DMA-API-HOWTO.txt and
6 * Documentation/DMA-API.txt for documentation.
7 */
8
9 #include <linux/kmemcheck.h>
10 #include <linux/scatterlist.h>
11 #include <linux/dma-debug.h>
12 #include <asm/io.h>
13 #include <asm/swiotlb.h>
14 #include <linux/dma-contiguous.h>
15
16 #ifdef CONFIG_ISA
17 # define ISA_DMA_BIT_MASK DMA_BIT_MASK(24)
18 #else
19 # define ISA_DMA_BIT_MASK DMA_BIT_MASK(32)
20 #endif
21
22 #define DMA_ERROR_CODE 0
23
24 extern int iommu_merge;
25 extern struct device x86_dma_fallback_dev;
26 extern int panic_on_overflow;
27
28 extern const struct dma_map_ops *dma_ops;
29
30 static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
31 {
32 return dma_ops;
33 }
34
35 bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp);
36 #define arch_dma_alloc_attrs arch_dma_alloc_attrs
37
38 #define HAVE_ARCH_DMA_SUPPORTED 1
39 extern int dma_supported(struct device *hwdev, u64 mask);
40
41 extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
42 dma_addr_t *dma_addr, gfp_t flag,
43 unsigned long attrs);
44
45 extern void dma_generic_free_coherent(struct device *dev, size_t size,
46 void *vaddr, dma_addr_t dma_addr,
47 unsigned long attrs);
48
49 #ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */
50 extern bool dma_capable(struct device *dev, dma_addr_t addr, size_t size);
51 extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
52 extern phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
53 #else
54
55 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
56 {
57 if (!dev->dma_mask)
58 return 0;
59
60 return addr + size - 1 <= *dev->dma_mask;
61 }
62
63 static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
64 {
65 return paddr;
66 }
67
68 static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
69 {
70 return daddr;
71 }
72 #endif /* CONFIG_X86_DMA_REMAP */
73
74 static inline void
75 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
76 enum dma_data_direction dir)
77 {
78 flush_write_buffers();
79 }
80
81 static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
82 gfp_t gfp)
83 {
84 unsigned long dma_mask = 0;
85
86 dma_mask = dev->coherent_dma_mask;
87 if (!dma_mask)
88 dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32);
89
90 return dma_mask;
91 }
92
93 static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
94 {
95 unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
96
97 if (dma_mask <= DMA_BIT_MASK(24))
98 gfp |= GFP_DMA;
99 #ifdef CONFIG_X86_64
100 if (dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
101 gfp |= GFP_DMA32;
102 #endif
103 return gfp;
104 }
105
106 #endif 1
2 /*
3 * Marvell UMI driver
4 *
5 * Copyright 2011 Marvell. <jyli@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23 */
24
25 #include <linux/kernel.h>
26 #include <linux/module.h>
27 #include <linux/moduleparam.h>
28 #include <linux/init.h>
29 #include <linux/device.h>
30 #include <linux/pci.h>
31 #include <linux/list.h>
32 #include <linux/spinlock.h>
33 #include <linux/interrupt.h>
34 #include <linux/delay.h>
35 #include <linux/ktime.h>
36 #include <linux/blkdev.h>
37 #include <linux/io.h>
38 #include <scsi/scsi.h>
39 #include <scsi/scsi_cmnd.h>
40 #include <scsi/scsi_device.h>
41 #include <scsi/scsi_host.h>
42 #include <scsi/scsi_transport.h>
43 #include <scsi/scsi_eh.h>
44 #include <linux/uaccess.h>
45 #include <linux/kthread.h>
46
47 #include "mvumi.h"
48
49 MODULE_LICENSE("GPL");
50 MODULE_AUTHOR("jyli@marvell.com");
51 MODULE_DESCRIPTION("Marvell UMI Driver");
52
53 static const struct pci_device_id mvumi_pci_table[] = {
54 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9143) },
55 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9580) },
56 { 0 }
57 };
58
59 MODULE_DEVICE_TABLE(pci, mvumi_pci_table);
60
61 static void tag_init(struct mvumi_tag *st, unsigned short size)
62 {
63 unsigned short i;
64 BUG_ON(size != st->size);
65 st->top = size;
66 for (i = 0; i < size; i++)
67 st->stack[i] = size - 1 - i;
68 }
69
70 static unsigned short tag_get_one(struct mvumi_hba *mhba, struct mvumi_tag *st)
71 {
72 BUG_ON(st->top <= 0);
73 return st->stack[--st->top];
74 }
75
76 static void tag_release_one(struct mvumi_hba *mhba, struct mvumi_tag *st,
77 unsigned short tag)
78 {
79 BUG_ON(st->top >= st->size);
80 st->stack[st->top++] = tag;
81 }
82
83 static bool tag_is_empty(struct mvumi_tag *st)
84 {
85 if (st->top == 0)
86 return 1;
87 else
88 return 0;
89 }
90
91 static void mvumi_unmap_pci_addr(struct pci_dev *dev, void **addr_array)
92 {
93 int i;
94
95 for (i = 0; i < MAX_BASE_ADDRESS; i++)
96 if ((pci_resource_flags(dev, i) & IORESOURCE_MEM) &&
97 addr_array[i])
98 pci_iounmap(dev, addr_array[i]);
99 }
100
101 static int mvumi_map_pci_addr(struct pci_dev *dev, void **addr_array)
102 {
103 int i;
104
105 for (i = 0; i < MAX_BASE_ADDRESS; i++) {
106 if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
107 addr_array[i] = pci_iomap(dev, i, 0);
108 if (!addr_array[i]) {
109 dev_err(&dev->dev, "failed to map Bar[%d]\n",
110 i);
111 mvumi_unmap_pci_addr(dev, addr_array);
112 return -ENOMEM;
113 }
114 } else
115 addr_array[i] = NULL;
116
117 dev_dbg(&dev->dev, "Bar %d : %p.\n", i, addr_array[i]);
118 }
119
120 return 0;
121 }
122
123 static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba,
124 enum resource_type type, unsigned int size)
125 {
126 struct mvumi_res *res = kzalloc(sizeof(*res), GFP_ATOMIC);
127
128 if (!res) {
129 dev_err(&mhba->pdev->dev,
130 "Failed to allocate memory for resource manager.\n");
131 return NULL;
132 }
133
134 switch (type) {
135 case RESOURCE_CACHED_MEMORY:
136 res->virt_addr = kzalloc(size, GFP_ATOMIC);
137 if (!res->virt_addr) {
138 dev_err(&mhba->pdev->dev,
139 "unable to allocate memory,size = %d.\n", size);
140 kfree(res);
141 return NULL;
142 }
143 break;
144
145 case RESOURCE_UNCACHED_MEMORY:
146 size = round_up(size, 8);
147 res->virt_addr = pci_zalloc_consistent(mhba->pdev, size,
148 &res->bus_addr);
149 if (!res->virt_addr) {
150 dev_err(&mhba->pdev->dev,
151 "unable to allocate consistent mem,"
152 "size = %d.\n", size);
153 kfree(res);
154 return NULL;
155 }
156 break;
157
158 default:
159 dev_err(&mhba->pdev->dev, "unknown resource type %d.\n", type);
160 kfree(res);
161 return NULL;
162 }
163
164 res->type = type;
165 res->size = size;
166 INIT_LIST_HEAD(&res->entry);
167 list_add_tail(&res->entry, &mhba->res_list);
168
169 return res;
170 }
171
172 static void mvumi_release_mem_resource(struct mvumi_hba *mhba)
173 {
174 struct mvumi_res *res, *tmp;
175
176 list_for_each_entry_safe(res, tmp, &mhba->res_list, entry) {
177 switch (res->type) {
178 case RESOURCE_UNCACHED_MEMORY:
179 pci_free_consistent(mhba->pdev, res->size,
180 res->virt_addr, res->bus_addr);
181 break;
182 case RESOURCE_CACHED_MEMORY:
183 kfree(res->virt_addr);
184 break;
185 default:
186 dev_err(&mhba->pdev->dev,
187 "unknown resource type %d\n", res->type);
188 break;
189 }
190 list_del(&res->entry);
191 kfree(res);
192 }
193 mhba->fw_flag &= ~MVUMI_FW_ALLOC;
194 }
195
196 /**
197 * mvumi_make_sgl - Prepares SGL
198 * @mhba: Adapter soft state
199 * @scmd: SCSI command from the mid-layer
200 * @sgl_p: SGL to be filled in
201 * @sg_count return the number of SG elements
202 *
203 * If successful, this function returns 0. otherwise, it returns -1.
204 */
205 static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd,
206 void *sgl_p, unsigned char *sg_count)
207 {
208 struct scatterlist *sg;
209 struct mvumi_sgl *m_sg = (struct mvumi_sgl *) sgl_p;
210 unsigned int i;
211 unsigned int sgnum = scsi_sg_count(scmd);
212 dma_addr_t busaddr;
213
214 if (sgnum) {
215 sg = scsi_sglist(scmd);
216 *sg_count = pci_map_sg(mhba->pdev, sg, sgnum,
217 (int) scmd->sc_data_direction);
218 if (*sg_count > mhba->max_sge) {
219 dev_err(&mhba->pdev->dev, "sg count[0x%x] is bigger "
220 "than max sg[0x%x].\n",
221 *sg_count, mhba->max_sge);
222 return -1;
223 }
224 for (i = 0; i < *sg_count; i++) {
225 busaddr = sg_dma_address(&sg[i]);
226 m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
227 m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
228 m_sg->flags = 0;
229 sgd_setsz(mhba, m_sg, cpu_to_le32(sg_dma_len(&sg[i])));
230 if ((i + 1) == *sg_count)
231 m_sg->flags |= 1U << mhba->eot_flag;
232
233 sgd_inc(mhba, m_sg);
234 }
235 } else {
236 scmd->SCp.dma_handle = scsi_bufflen(scmd) ?
237 pci_map_single(mhba->pdev, scsi_sglist(scmd),
238 scsi_bufflen(scmd),
239 (int) scmd->sc_data_direction)
240 : 0;
241 busaddr = scmd->SCp.dma_handle;
242 m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
243 m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
244 m_sg->flags = 1U << mhba->eot_flag;
245 sgd_setsz(mhba, m_sg, cpu_to_le32(scsi_bufflen(scmd)));
246 *sg_count = 1;
247 }
248
249 return 0;
250 }
251
252 static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
253 unsigned int size)
254 {
255 struct mvumi_sgl *m_sg;
256 void *virt_addr;
257 dma_addr_t phy_addr;
258
259 if (size == 0)
260 return 0;
261
262 virt_addr = pci_zalloc_consistent(mhba->pdev, size, &phy_addr);
263 if (!virt_addr)
264 return -1;
265
266 m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0];
267 cmd->frame->sg_counts = 1;
268 cmd->data_buf = virt_addr;
269
270 m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(phy_addr));
271 m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(phy_addr));
272 m_sg->flags = 1U << mhba->eot_flag;
273 sgd_setsz(mhba, m_sg, cpu_to_le32(size));
274
275 return 0;
276 }
277
278 static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba,
279 unsigned int buf_size)
280 {
281 struct mvumi_cmd *cmd;
282
283 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
284 if (!cmd) {
285 dev_err(&mhba->pdev->dev, "failed to create a internal cmd\n");
286 return NULL;
287 }
288 INIT_LIST_HEAD(&cmd->queue_pointer);
289
290 cmd->frame = pci_alloc_consistent(mhba->pdev,
291 mhba->ib_max_size, &cmd->frame_phys);
292 if (!cmd->frame) {
293 dev_err(&mhba->pdev->dev, "failed to allocate memory for FW"
294 " frame,size = %d.\n", mhba->ib_max_size);
295 kfree(cmd);
296 return NULL;
297 }
298
299 if (buf_size) {
300 if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) {
301 dev_err(&mhba->pdev->dev, "failed to allocate memory"
302 " for internal frame\n");
303 pci_free_consistent(mhba->pdev, mhba->ib_max_size,
304 cmd->frame, cmd->frame_phys);
305 kfree(cmd);
306 return NULL;
307 }
308 } else
309 cmd->frame->sg_counts = 0;
310
311 return cmd;
312 }
313
314 static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba,
315 struct mvumi_cmd *cmd)
316 {
317 struct mvumi_sgl *m_sg;
318 unsigned int size;
319 dma_addr_t phy_addr;
320
321 if (cmd && cmd->frame) {
322 if (cmd->frame->sg_counts) {
323 m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0];
324 sgd_getsz(mhba, m_sg, size);
325
326 phy_addr = (dma_addr_t) m_sg->baseaddr_l |
327 (dma_addr_t) ((m_sg->baseaddr_h << 16) << 16);
328
329 pci_free_consistent(mhba->pdev, size, cmd->data_buf,
330 phy_addr);
331 }
332 pci_free_consistent(mhba->pdev, mhba->ib_max_size,
333 cmd->frame, cmd->frame_phys);
334 kfree(cmd);
335 }
336 }
337
338 /**
339 * mvumi_get_cmd - Get a command from the free pool
340 * @mhba: Adapter soft state
341 *
342 * Returns a free command from the pool
343 */
344 static struct mvumi_cmd *mvumi_get_cmd(struct mvumi_hba *mhba)
345 {
346 struct mvumi_cmd *cmd = NULL;
347
348 if (likely(!list_empty(&mhba->cmd_pool))) {
349 cmd = list_entry((&mhba->cmd_pool)->next,
350 struct mvumi_cmd, queue_pointer);
351 list_del_init(&cmd->queue_pointer);
352 } else
353 dev_warn(&mhba->pdev->dev, "command pool is empty!\n");
354
355 return cmd;
356 }
357
358 /**
359 * mvumi_return_cmd - Return a cmd to free command pool
360 * @mhba: Adapter soft state
361 * @cmd: Command packet to be returned to free command pool
362 */
363 static inline void mvumi_return_cmd(struct mvumi_hba *mhba,
364 struct mvumi_cmd *cmd)
365 {
366 cmd->scmd = NULL;
367 list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
368 }
369
370 /**
371 * mvumi_free_cmds - Free all the cmds in the free cmd pool
372 * @mhba: Adapter soft state
373 */
374 static void mvumi_free_cmds(struct mvumi_hba *mhba)
375 {
376 struct mvumi_cmd *cmd;
377
378 while (!list_empty(&mhba->cmd_pool)) {
379 cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
380 queue_pointer);
381 list_del(&cmd->queue_pointer);
382 if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC))
383 kfree(cmd->frame);
384 kfree(cmd);
385 }
386 }
387
388 /**
389 * mvumi_alloc_cmds - Allocates the command packets
390 * @mhba: Adapter soft state
391 *
392 */
393 static int mvumi_alloc_cmds(struct mvumi_hba *mhba)
394 {
395 int i;
396 struct mvumi_cmd *cmd;
397
398 for (i = 0; i < mhba->max_io; i++) {
399 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
400 if (!cmd)
401 goto err_exit;
402
403 INIT_LIST_HEAD(&cmd->queue_pointer);
404 list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
405 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
406 cmd->frame = mhba->ib_frame + i * mhba->ib_max_size;
407 cmd->frame_phys = mhba->ib_frame_phys
408 + i * mhba->ib_max_size;
409 } else
410 cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL);
411 if (!cmd->frame)
412 goto err_exit;
413 }
414 return 0;
415
416 err_exit:
417 dev_err(&mhba->pdev->dev,
418 "failed to allocate memory for cmd[0x%x].\n", i);
419 while (!list_empty(&mhba->cmd_pool)) {
420 cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
421 queue_pointer);
422 list_del(&cmd->queue_pointer);
423 if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC))
424 kfree(cmd->frame);
425 kfree(cmd);
426 }
427 return -ENOMEM;
428 }
429
430 static unsigned int mvumi_check_ib_list_9143(struct mvumi_hba *mhba)
431 {
432 unsigned int ib_rp_reg;
433 struct mvumi_hw_regs *regs = mhba->regs;
434
435 ib_rp_reg = ioread32(mhba->regs->inb_read_pointer);
436
437 if (unlikely(((ib_rp_reg & regs->cl_slot_num_mask) ==
438 (mhba->ib_cur_slot & regs->cl_slot_num_mask)) &&
439 ((ib_rp_reg & regs->cl_pointer_toggle)
440 != (mhba->ib_cur_slot & regs->cl_pointer_toggle)))) {
441 dev_warn(&mhba->pdev->dev, "no free slot to use.\n");
442 return 0;
443 }
444 if (atomic_read(&mhba->fw_outstanding) >= mhba->max_io) {
445 dev_warn(&mhba->pdev->dev, "firmware io overflow.\n");
446 return 0;
447 } else {
448 return mhba->max_io - atomic_read(&mhba->fw_outstanding);
449 }
450 }
451
452 static unsigned int mvumi_check_ib_list_9580(struct mvumi_hba *mhba)
453 {
454 unsigned int count;
455 if (atomic_read(&mhba->fw_outstanding) >= (mhba->max_io - 1))
456 return 0;
457 count = ioread32(mhba->ib_shadow);
458 if (count == 0xffff)
459 return 0;
460 return count;
461 }
462
463 static void mvumi_get_ib_list_entry(struct mvumi_hba *mhba, void **ib_entry)
464 {
465 unsigned int cur_ib_entry;
466
467 cur_ib_entry = mhba->ib_cur_slot & mhba->regs->cl_slot_num_mask;
468 cur_ib_entry++;
469 if (cur_ib_entry >= mhba->list_num_io) {
470 cur_ib_entry -= mhba->list_num_io;
471 mhba->ib_cur_slot ^= mhba->regs->cl_pointer_toggle;
472 }
473 mhba->ib_cur_slot &= ~mhba->regs->cl_slot_num_mask;
474 mhba->ib_cur_slot |= (cur_ib_entry & mhba->regs->cl_slot_num_mask);
475 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
476 *ib_entry = mhba->ib_list + cur_ib_entry *
477 sizeof(struct mvumi_dyn_list_entry);
478 } else {
479 *ib_entry = mhba->ib_list + cur_ib_entry * mhba->ib_max_size;
480 }
481 atomic_inc(&mhba->fw_outstanding);
482 }
483
484 static void mvumi_send_ib_list_entry(struct mvumi_hba *mhba)
485 {
486 iowrite32(0xffff, mhba->ib_shadow);
487 iowrite32(mhba->ib_cur_slot, mhba->regs->inb_write_pointer);
488 }
489
490 static char mvumi_check_ob_frame(struct mvumi_hba *mhba,
491 unsigned int cur_obf, struct mvumi_rsp_frame *p_outb_frame)
492 {
493 unsigned short tag, request_id;
494
495 udelay(1);
496 p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
497 request_id = p_outb_frame->request_id;
498 tag = p_outb_frame->tag;
499 if (tag > mhba->tag_pool.size) {
500 dev_err(&mhba->pdev->dev, "ob frame data error\n");
501 return -1;
502 }
503 if (mhba->tag_cmd[tag] == NULL) {
504 dev_err(&mhba->pdev->dev, "tag[0x%x] with NO command\n", tag);
505 return -1;
506 } else if (mhba->tag_cmd[tag]->request_id != request_id &&
507 mhba->request_id_enabled) {
508 dev_err(&mhba->pdev->dev, "request ID from FW:0x%x,"
509 "cmd request ID:0x%x\n", request_id,
510 mhba->tag_cmd[tag]->request_id);
511 return -1;
512 }
513
514 return 0;
515 }
516
517 static int mvumi_check_ob_list_9143(struct mvumi_hba *mhba,
518 unsigned int *cur_obf, unsigned int *assign_obf_end)
519 {
520 unsigned int ob_write, ob_write_shadow;
521 struct mvumi_hw_regs *regs = mhba->regs;
522
523 do {
524 ob_write = ioread32(regs->outb_copy_pointer);
525 ob_write_shadow = ioread32(mhba->ob_shadow);
526 } while ((ob_write & regs->cl_slot_num_mask) != ob_write_shadow);
527
528 *cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask;
529 *assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask;
530
531 if ((ob_write & regs->cl_pointer_toggle) !=
532 (mhba->ob_cur_slot & regs->cl_pointer_toggle)) {
533 *assign_obf_end += mhba->list_num_io;
534 }
535 return 0;
536 }
537
538 static int mvumi_check_ob_list_9580(struct mvumi_hba *mhba,
539 unsigned int *cur_obf, unsigned int *assign_obf_end)
540 {
541 unsigned int ob_write;
542 struct mvumi_hw_regs *regs = mhba->regs;
543
544 ob_write = ioread32(regs->outb_read_pointer);
545 ob_write = ioread32(regs->outb_copy_pointer);
546 *cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask;
547 *assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask;
548 if (*assign_obf_end < *cur_obf)
549 *assign_obf_end += mhba->list_num_io;
550 else if (*assign_obf_end == *cur_obf)
551 return -1;
552 return 0;
553 }
554
555 static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba)
556 {
557 unsigned int cur_obf, assign_obf_end, i;
558 struct mvumi_ob_data *ob_data;
559 struct mvumi_rsp_frame *p_outb_frame;
560 struct mvumi_hw_regs *regs = mhba->regs;
561
562 if (mhba->instancet->check_ob_list(mhba, &cur_obf, &assign_obf_end))
563 return;
564
565 for (i = (assign_obf_end - cur_obf); i != 0; i--) {
566 cur_obf++;
567 if (cur_obf >= mhba->list_num_io) {
568 cur_obf -= mhba->list_num_io;
569 mhba->ob_cur_slot ^= regs->cl_pointer_toggle;
570 }
571
572 p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
573
574 /* Copy pointer may point to entry in outbound list
575 * before entry has valid data
576 */
577 if (unlikely(p_outb_frame->tag > mhba->tag_pool.size ||
578 mhba->tag_cmd[p_outb_frame->tag] == NULL ||
579 p_outb_frame->request_id !=
580 mhba->tag_cmd[p_outb_frame->tag]->request_id))
581 if (mvumi_check_ob_frame(mhba, cur_obf, p_outb_frame))
582 continue;
583
584 if (!list_empty(&mhba->ob_data_list)) {
585 ob_data = (struct mvumi_ob_data *)
586 list_first_entry(&mhba->ob_data_list,
587 struct mvumi_ob_data, list);
588 list_del_init(&ob_data->list);
589 } else {
590 ob_data = NULL;
591 if (cur_obf == 0) {
592 cur_obf = mhba->list_num_io - 1;
593 mhba->ob_cur_slot ^= regs->cl_pointer_toggle;
594 } else
595 cur_obf -= 1;
596 break;
597 }
598
599 memcpy(ob_data->data, p_outb_frame, mhba->ob_max_size);
600 p_outb_frame->tag = 0xff;
601
602 list_add_tail(&ob_data->list, &mhba->free_ob_list);
603 }
604 mhba->ob_cur_slot &= ~regs->cl_slot_num_mask;
605 mhba->ob_cur_slot |= (cur_obf & regs->cl_slot_num_mask);
606 iowrite32(mhba->ob_cur_slot, regs->outb_read_pointer);
607 }
608
609 static void mvumi_reset(struct mvumi_hba *mhba)
610 {
611 struct mvumi_hw_regs *regs = mhba->regs;
612
613 iowrite32(0, regs->enpointa_mask_reg);
614 if (ioread32(regs->arm_to_pciea_msg1) != HANDSHAKE_DONESTATE)
615 return;
616
617 iowrite32(DRBL_SOFT_RESET, regs->pciea_to_arm_drbl_reg);
618 }
619
620 static unsigned char mvumi_start(struct mvumi_hba *mhba);
621
622 static int mvumi_wait_for_outstanding(struct mvumi_hba *mhba)
623 {
624 mhba->fw_state = FW_STATE_ABORT;
625 mvumi_reset(mhba);
626
627 if (mvumi_start(mhba))
628 return FAILED;
629 else
630 return SUCCESS;
631 }
632
633 static int mvumi_wait_for_fw(struct mvumi_hba *mhba)
634 {
635 struct mvumi_hw_regs *regs = mhba->regs;
636 u32 tmp;
637 unsigned long before;
638 before = jiffies;
639
640 iowrite32(0, regs->enpointa_mask_reg);
641 tmp = ioread32(regs->arm_to_pciea_msg1);
642 while (tmp != HANDSHAKE_READYSTATE) {
643 iowrite32(DRBL_MU_RESET, regs->pciea_to_arm_drbl_reg);
644 if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
645 dev_err(&mhba->pdev->dev,
646 "FW reset failed [0x%x].\n", tmp);
647 return FAILED;
648 }
649
650 msleep(500);
651 rmb();
652 tmp = ioread32(regs->arm_to_pciea_msg1);
653 }
654
655 return SUCCESS;
656 }
657
658 static void mvumi_backup_bar_addr(struct mvumi_hba *mhba)
659 {
660 unsigned char i;
661
662 for (i = 0; i < MAX_BASE_ADDRESS; i++) {
663 pci_read_config_dword(mhba->pdev, 0x10 + i * 4,
664 &mhba->pci_base[i]);
665 }
666 }
667
668 static void mvumi_restore_bar_addr(struct mvumi_hba *mhba)
669 {
670 unsigned char i;
671
672 for (i = 0; i < MAX_BASE_ADDRESS; i++) {
673 if (mhba->pci_base[i])
674 pci_write_config_dword(mhba->pdev, 0x10 + i * 4,
675 mhba->pci_base[i]);
676 }
677 }
678
679 static unsigned int mvumi_pci_set_master(struct pci_dev *pdev)
680 {
681 unsigned int ret = 0;
682 pci_set_master(pdev);
683
684 if (IS_DMA64) {
685 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
686 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
687 } else
688 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
689
690 return ret;
691 }
692
693 static int mvumi_reset_host_9580(struct mvumi_hba *mhba)
694 {
695 mhba->fw_state = FW_STATE_ABORT;
696
697 iowrite32(0, mhba->regs->reset_enable);
698 iowrite32(0xf, mhba->regs->reset_request);
699
700 iowrite32(0x10, mhba->regs->reset_enable);
701 iowrite32(0x10, mhba->regs->reset_request);
702 msleep(100);
703 pci_disable_device(mhba->pdev);
704
705 if (pci_enable_device(mhba->pdev)) {
706 dev_err(&mhba->pdev->dev, "enable device failed\n");
707 return FAILED;
708 }
709 if (mvumi_pci_set_master(mhba->pdev)) {
710 dev_err(&mhba->pdev->dev, "set master failed\n");
711 return FAILED;
712 }
713 mvumi_restore_bar_addr(mhba);
714 if (mvumi_wait_for_fw(mhba) == FAILED)
715 return FAILED;
716
717 return mvumi_wait_for_outstanding(mhba);
718 }
719
720 static int mvumi_reset_host_9143(struct mvumi_hba *mhba)
721 {
722 return mvumi_wait_for_outstanding(mhba);
723 }
724
725 static int mvumi_host_reset(struct scsi_cmnd *scmd)
726 {
727 struct mvumi_hba *mhba;
728
729 mhba = (struct mvumi_hba *) scmd->device->host->hostdata;
730
731 scmd_printk(KERN_NOTICE, scmd, "RESET -%ld cmd=%x retries=%x\n",
732 scmd->serial_number, scmd->cmnd[0], scmd->retries);
733
734 return mhba->instancet->reset_host(mhba);
735 }
736
737 static int mvumi_issue_blocked_cmd(struct mvumi_hba *mhba,
738 struct mvumi_cmd *cmd)
739 {
740 unsigned long flags;
741
742 cmd->cmd_status = REQ_STATUS_PENDING;
743
744 if (atomic_read(&cmd->sync_cmd)) {
745 dev_err(&mhba->pdev->dev,
746 "last blocked cmd not finished, sync_cmd = %d\n",
747 atomic_read(&cmd->sync_cmd));
748 BUG_ON(1);
749 return -1;
750 }
751 atomic_inc(&cmd->sync_cmd);
752 spin_lock_irqsave(mhba->shost->host_lock, flags);
753 mhba->instancet->fire_cmd(mhba, cmd);
754 spin_unlock_irqrestore(mhba->shost->host_lock, flags);
755
756 wait_event_timeout(mhba->int_cmd_wait_q,
757 (cmd->cmd_status != REQ_STATUS_PENDING),
758 MVUMI_INTERNAL_CMD_WAIT_TIME * HZ);
759
760 /* command timeout */
761 if (atomic_read(&cmd->sync_cmd)) {
762 spin_lock_irqsave(mhba->shost->host_lock, flags);
763 atomic_dec(&cmd->sync_cmd);
764 if (mhba->tag_cmd[cmd->frame->tag]) {
765 mhba->tag_cmd[cmd->frame->tag] = 0;
766 dev_warn(&mhba->pdev->dev, "TIMEOUT:release tag [%d]\n",
767 cmd->frame->tag);
768 tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
769 }
770 if (!list_empty(&cmd->queue_pointer)) {
771 dev_warn(&mhba->pdev->dev,
772 "TIMEOUT:A internal command doesn't send!\n");
773 list_del_init(&cmd->queue_pointer);
774 } else
775 atomic_dec(&mhba->fw_outstanding);
776
777 spin_unlock_irqrestore(mhba->shost->host_lock, flags);
778 }
779 return 0;
780 }
781
782 static void mvumi_release_fw(struct mvumi_hba *mhba)
783 {
784 mvumi_free_cmds(mhba);
785 mvumi_release_mem_resource(mhba);
786 mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
787 pci_free_consistent(mhba->pdev, HSP_MAX_SIZE,
788 mhba->handshake_page, mhba->handshake_page_phys);
789 kfree(mhba->regs);
790 pci_release_regions(mhba->pdev);
791 }
792
793 static unsigned char mvumi_flush_cache(struct mvumi_hba *mhba)
794 {
795 struct mvumi_cmd *cmd;
796 struct mvumi_msg_frame *frame;
797 unsigned char device_id, retry = 0;
798 unsigned char bitcount = sizeof(unsigned char) * 8;
799
800 for (device_id = 0; device_id < mhba->max_target_id; device_id++) {
801 if (!(mhba->target_map[device_id / bitcount] &
802 (1 << (device_id % bitcount))))
803 continue;
804 get_cmd: cmd = mvumi_create_internal_cmd(mhba, 0);
805 if (!cmd) {
806 if (retry++ >= 5) {
807 dev_err(&mhba->pdev->dev, "failed to get memory"
808 " for internal flush cache cmd for "
809 "device %d", device_id);
810 retry = 0;
811 continue;
812 } else
813 goto get_cmd;
814 }
815 cmd->scmd = NULL;
816 cmd->cmd_status = REQ_STATUS_PENDING;
817 atomic_set(&cmd->sync_cmd, 0);
818 frame = cmd->frame;
819 frame->req_function = CL_FUN_SCSI_CMD;
820 frame->device_id = device_id;
821 frame->cmd_flag = CMD_FLAG_NON_DATA;
822 frame->data_transfer_length = 0;
823 frame->cdb_length = MAX_COMMAND_SIZE;
824 memset(frame->cdb, 0, MAX_COMMAND_SIZE);
825 frame->cdb[0] = SCSI_CMD_MARVELL_SPECIFIC;
826 frame->cdb[1] = CDB_CORE_MODULE;
827 frame->cdb[2] = CDB_CORE_SHUTDOWN;
828
829 mvumi_issue_blocked_cmd(mhba, cmd);
830 if (cmd->cmd_status != SAM_STAT_GOOD) {
831 dev_err(&mhba->pdev->dev,
832 "device %d flush cache failed, status=0x%x.\n",
833 device_id, cmd->cmd_status);
834 }
835
836 mvumi_delete_internal_cmd(mhba, cmd);
837 }
838 return 0;
839 }
840
841 static unsigned char
842 mvumi_calculate_checksum(struct mvumi_hs_header *p_header,
843 unsigned short len)
844 {
845 unsigned char *ptr;
846 unsigned char ret = 0, i;
847
848 ptr = (unsigned char *) p_header->frame_content;
849 for (i = 0; i < len; i++) {
850 ret ^= *ptr;
851 ptr++;
852 }
853
854 return ret;
855 }
856
857 static void mvumi_hs_build_page(struct mvumi_hba *mhba,
858 struct mvumi_hs_header *hs_header)
859 {
860 struct mvumi_hs_page2 *hs_page2;
861 struct mvumi_hs_page4 *hs_page4;
862 struct mvumi_hs_page3 *hs_page3;
863 u64 time;
864 u64 local_time;
865
866 switch (hs_header->page_code) {
867 case HS_PAGE_HOST_INFO:
868 hs_page2 = (struct mvumi_hs_page2 *) hs_header;
869 hs_header->frame_length = sizeof(*hs_page2) - 4;
870 memset(hs_header->frame_content, 0, hs_header->frame_length);
871 hs_page2->host_type = 3; /* 3 mean linux*/
872 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)
873 hs_page2->host_cap = 0x08;/* host dynamic source mode */
874 hs_page2->host_ver.ver_major = VER_MAJOR;
875 hs_page2->host_ver.ver_minor = VER_MINOR;
876 hs_page2->host_ver.ver_oem = VER_OEM;
877 hs_page2->host_ver.ver_build = VER_BUILD;
878 hs_page2->system_io_bus = 0;
879 hs_page2->slot_number = 0;
880 hs_page2->intr_level = 0;
881 hs_page2->intr_vector = 0;
882 time = ktime_get_real_seconds();
883 local_time = (time - (sys_tz.tz_minuteswest * 60));
884 hs_page2->seconds_since1970 = local_time;
885 hs_header->checksum = mvumi_calculate_checksum(hs_header,
886 hs_header->frame_length);
887 break;
888
889 case HS_PAGE_FIRM_CTL:
890 hs_page3 = (struct mvumi_hs_page3 *) hs_header;
891 hs_header->frame_length = sizeof(*hs_page3) - 4;
892 memset(hs_header->frame_content, 0, hs_header->frame_length);
893 hs_header->checksum = mvumi_calculate_checksum(hs_header,
894 hs_header->frame_length);
895 break;
896
897 case HS_PAGE_CL_INFO:
898 hs_page4 = (struct mvumi_hs_page4 *) hs_header;
899 hs_header->frame_length = sizeof(*hs_page4) - 4;
900 memset(hs_header->frame_content, 0, hs_header->frame_length);
901 hs_page4->ib_baseaddr_l = lower_32_bits(mhba->ib_list_phys);
902 hs_page4->ib_baseaddr_h = upper_32_bits(mhba->ib_list_phys);
903
904 hs_page4->ob_baseaddr_l = lower_32_bits(mhba->ob_list_phys);
905 hs_page4->ob_baseaddr_h = upper_32_bits(mhba->ob_list_phys);
906 hs_page4->ib_entry_size = mhba->ib_max_size_setting;
907 hs_page4->ob_entry_size = mhba->ob_max_size_setting;
908 if (mhba->hba_capability
909 & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF) {
910 hs_page4->ob_depth = find_first_bit((unsigned long *)
911 &mhba->list_num_io,
912 BITS_PER_LONG);
913 hs_page4->ib_depth = find_first_bit((unsigned long *)
914 &mhba->list_num_io,
915 BITS_PER_LONG);
916 } else {
917 hs_page4->ob_depth = (u8) mhba->list_num_io;
918 hs_page4->ib_depth = (u8) mhba->list_num_io;
919 }
920 hs_header->checksum = mvumi_calculate_checksum(hs_header,
921 hs_header->frame_length);
922 break;
923
924 default:
925 dev_err(&mhba->pdev->dev, "cannot build page, code[0x%x]\n",
926 hs_header->page_code);
927 break;
928 }
929 }
930
931 /**
932 * mvumi_init_data - Initialize requested date for FW
933 * @mhba: Adapter soft state
934 */
935 static int mvumi_init_data(struct mvumi_hba *mhba)
936 {
937 struct mvumi_ob_data *ob_pool;
938 struct mvumi_res *res_mgnt;
939 unsigned int tmp_size, offset, i;
940 void *virmem, *v;
941 dma_addr_t p;
942
943 if (mhba->fw_flag & MVUMI_FW_ALLOC)
944 return 0;
945
946 tmp_size = mhba->ib_max_size * mhba->max_io;
947 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)
948 tmp_size += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
949
950 tmp_size += 128 + mhba->ob_max_size * mhba->max_io;
951 tmp_size += 8 + sizeof(u32)*2 + 16;
952
953 res_mgnt = mvumi_alloc_mem_resource(mhba,
954 RESOURCE_UNCACHED_MEMORY, tmp_size);
955 if (!res_mgnt) {
956 dev_err(&mhba->pdev->dev,
957 "failed to allocate memory for inbound list\n");
958 goto fail_alloc_dma_buf;
959 }
960
961 p = res_mgnt->bus_addr;
962 v = res_mgnt->virt_addr;
963 /* ib_list */
964 offset = round_up(p, 128) - p;
965 p += offset;
966 v += offset;
967 mhba->ib_list = v;
968 mhba->ib_list_phys = p;
969 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
970 v += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
971 p += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
972 mhba->ib_frame = v;
973 mhba->ib_frame_phys = p;
974 }
975 v += mhba->ib_max_size * mhba->max_io;
976 p += mhba->ib_max_size * mhba->max_io;
977
978 /* ib shadow */
979 offset = round_up(p, 8) - p;
980 p += offset;
981 v += offset;
982 mhba->ib_shadow = v;
983 mhba->ib_shadow_phys = p;
984 p += sizeof(u32)*2;
985 v += sizeof(u32)*2;
986 /* ob shadow */
987 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) {
988 offset = round_up(p, 8) - p;
989 p += offset;
990 v += offset;
991 mhba->ob_shadow = v;
992 mhba->ob_shadow_phys = p;
993 p += 8;
994 v += 8;
995 } else {
996 offset = round_up(p, 4) - p;
997 p += offset;
998 v += offset;
999 mhba->ob_shadow = v;
1000 mhba->ob_shadow_phys = p;
1001 p += 4;
1002 v += 4;
1003 }
1004
1005 /* ob list */
1006 offset = round_up(p, 128) - p;
1007 p += offset;
1008 v += offset;
1009
1010 mhba->ob_list = v;
1011 mhba->ob_list_phys = p;
1012
1013 /* ob data pool */
1014 tmp_size = mhba->max_io * (mhba->ob_max_size + sizeof(*ob_pool));
1015 tmp_size = round_up(tmp_size, 8);
1016
1017 res_mgnt = mvumi_alloc_mem_resource(mhba,
1018 RESOURCE_CACHED_MEMORY, tmp_size);
1019 if (!res_mgnt) {
1020 dev_err(&mhba->pdev->dev,
1021 "failed to allocate memory for outbound data buffer\n");
1022 goto fail_alloc_dma_buf;
1023 }
1024 virmem = res_mgnt->virt_addr;
1025
1026 for (i = mhba->max_io; i != 0; i--) {
1027 ob_pool = (struct mvumi_ob_data *) virmem;
1028 list_add_tail(&ob_pool->list, &mhba->ob_data_list);
1029 virmem += mhba->ob_max_size + sizeof(*ob_pool);
1030 }
1031
1032 tmp_size = sizeof(unsigned short) * mhba->max_io +
1033 sizeof(struct mvumi_cmd *) * mhba->max_io;
1034 tmp_size += round_up(mhba->max_target_id, sizeof(unsigned char) * 8) /
1035 (sizeof(unsigned char) * 8);
1036
1037 res_mgnt = mvumi_alloc_mem_resource(mhba,
1038 RESOURCE_CACHED_MEMORY, tmp_size);
1039 if (!res_mgnt) {
1040 dev_err(&mhba->pdev->dev,
1041 "failed to allocate memory for tag and target map\n");
1042 goto fail_alloc_dma_buf;
1043 }
1044
1045 virmem = res_mgnt->virt_addr;
1046 mhba->tag_pool.stack = virmem;
1047 mhba->tag_pool.size = mhba->max_io;
1048 tag_init(&mhba->tag_pool, mhba->max_io);
1049 virmem += sizeof(unsigned short) * mhba->max_io;
1050
1051 mhba->tag_cmd = virmem;
1052 virmem += sizeof(struct mvumi_cmd *) * mhba->max_io;
1053
1054 mhba->target_map = virmem;
1055
1056 mhba->fw_flag |= MVUMI_FW_ALLOC;
1057 return 0;
1058
1059 fail_alloc_dma_buf:
1060 mvumi_release_mem_resource(mhba);
1061 return -1;
1062 }
1063
1064 static int mvumi_hs_process_page(struct mvumi_hba *mhba,
1065 struct mvumi_hs_header *hs_header)
1066 {
1067 struct mvumi_hs_page1 *hs_page1;
1068 unsigned char page_checksum;
1069
1070 page_checksum = mvumi_calculate_checksum(hs_header,
1071 hs_header->frame_length);
1072 if (page_checksum != hs_header->checksum) {
1073 dev_err(&mhba->pdev->dev, "checksum error\n");
1074 return -1;
1075 }
1076
1077 switch (hs_header->page_code) {
1078 case HS_PAGE_FIRM_CAP:
1079 hs_page1 = (struct mvumi_hs_page1 *) hs_header;
1080
1081 mhba->max_io = hs_page1->max_io_support;
1082 mhba->list_num_io = hs_page1->cl_inout_list_depth;
1083 mhba->max_transfer_size = hs_page1->max_transfer_size;
1084 mhba->max_target_id = hs_page1->max_devices_support;
1085 mhba->hba_capability = hs_page1->capability;
1086 mhba->ib_max_size_setting = hs_page1->cl_in_max_entry_size;
1087 mhba->ib_max_size = (1 << hs_page1->cl_in_max_entry_size) << 2;
1088
1089 mhba->ob_max_size_setting = hs_page1->cl_out_max_entry_size;
1090 mhba->ob_max_size = (1 << hs_page1->cl_out_max_entry_size) << 2;
1091
1092 dev_dbg(&mhba->pdev->dev, "FW version:%d\n",
1093 hs_page1->fw_ver.ver_build);
1094
1095 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG)
1096 mhba->eot_flag = 22;
1097 else
1098 mhba->eot_flag = 27;
1099 if (mhba->hba_capability & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF)
1100 mhba->list_num_io = 1 << hs_page1->cl_inout_list_depth;
1101 break;
1102 default:
1103 dev_err(&mhba->pdev->dev, "handshake: page code error\n");
1104 return -1;
1105 }
1106 return 0;
1107 }
1108
1109 /**
1110 * mvumi_handshake - Move the FW to READY state
1111 * @mhba: Adapter soft state
1112 *
1113 * During the initialization, FW passes can potentially be in any one of
1114 * several possible states. If the FW in operational, waiting-for-handshake
1115 * states, driver must take steps to bring it to ready state. Otherwise, it
1116 * has to wait for the ready state.
1117 */
1118 static int mvumi_handshake(struct mvumi_hba *mhba)
1119 {
1120 unsigned int hs_state, tmp, hs_fun;
1121 struct mvumi_hs_header *hs_header;
1122 struct mvumi_hw_regs *regs = mhba->regs;
1123
1124 if (mhba->fw_state == FW_STATE_STARTING)
1125 hs_state = HS_S_START;
1126 else {
1127 tmp = ioread32(regs->arm_to_pciea_msg0);
1128 hs_state = HS_GET_STATE(tmp);
1129 dev_dbg(&mhba->pdev->dev, "handshake state[0x%x].\n", hs_state);
1130 if (HS_GET_STATUS(tmp) != HS_STATUS_OK) {
1131 mhba->fw_state = FW_STATE_STARTING;
1132 return -1;
1133 }
1134 }
1135
1136 hs_fun = 0;
1137 switch (hs_state) {
1138 case HS_S_START:
1139 mhba->fw_state = FW_STATE_HANDSHAKING;
1140 HS_SET_STATUS(hs_fun, HS_STATUS_OK);
1141 HS_SET_STATE(hs_fun, HS_S_RESET);
1142 iowrite32(HANDSHAKE_SIGNATURE, regs->pciea_to_arm_msg1);
1143 iowrite32(hs_fun, regs->pciea_to_arm_msg0);
1144 iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
1145 break;
1146
1147 case HS_S_RESET:
1148 iowrite32(lower_32_bits(mhba->handshake_page_phys),
1149 regs->pciea_to_arm_msg1);
1150 iowrite32(upper_32_bits(mhba->handshake_page_phys),
1151 regs->arm_to_pciea_msg1);
1152 HS_SET_STATUS(hs_fun, HS_STATUS_OK);
1153 HS_SET_STATE(hs_fun, HS_S_PAGE_ADDR);
1154 iowrite32(hs_fun, regs->pciea_to_arm_msg0);
1155 iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
1156 break;
1157
1158 case HS_S_PAGE_ADDR:
1159 case HS_S_QUERY_PAGE:
1160 case HS_S_SEND_PAGE:
1161 hs_header = (struct mvumi_hs_header *) mhba->handshake_page;
1162 if (hs_header->page_code == HS_PAGE_FIRM_CAP) {
1163 mhba->hba_total_pages =
1164 ((struct mvumi_hs_page1 *) hs_header)->total_pages;
1165
1166 if (mhba->hba_total_pages == 0)
1167 mhba->hba_total_pages = HS_PAGE_TOTAL-1;
1168 }
1169
1170 if (hs_state == HS_S_QUERY_PAGE) {
1171 if (mvumi_hs_process_page(mhba, hs_header)) {
1172 HS_SET_STATE(hs_fun, HS_S_ABORT);
1173 return -1;
1174 }
1175 if (mvumi_init_data(mhba)) {
1176 HS_SET_STATE(hs_fun, HS_S_ABORT);
1177 return -1;
1178 }
1179 } else if (hs_state == HS_S_PAGE_ADDR) {
1180 hs_header->page_code = 0;
1181 mhba->hba_total_pages = HS_PAGE_TOTAL-1;
1182 }
1183
1184 if ((hs_header->page_code + 1) <= mhba->hba_total_pages) {
1185 hs_header->page_code++;
1186 if (hs_header->page_code != HS_PAGE_FIRM_CAP) {
1187 mvumi_hs_build_page(mhba, hs_header);
1188 HS_SET_STATE(hs_fun, HS_S_SEND_PAGE);
1189 } else
1190 HS_SET_STATE(hs_fun, HS_S_QUERY_PAGE);
1191 } else
1192 HS_SET_STATE(hs_fun, HS_S_END);
1193
1194 HS_SET_STATUS(hs_fun, HS_STATUS_OK);
1195 iowrite32(hs_fun, regs->pciea_to_arm_msg0);
1196 iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
1197 break;
1198
1199 case HS_S_END:
1200 /* Set communication list ISR */
1201 tmp = ioread32(regs->enpointa_mask_reg);
1202 tmp |= regs->int_comaout | regs->int_comaerr;
1203 iowrite32(tmp, regs->enpointa_mask_reg);
1204 iowrite32(mhba->list_num_io, mhba->ib_shadow);
1205 /* Set InBound List Available count shadow */
1206 iowrite32(lower_32_bits(mhba->ib_shadow_phys),
1207 regs->inb_aval_count_basel);
1208 iowrite32(upper_32_bits(mhba->ib_shadow_phys),
1209 regs->inb_aval_count_baseh);
1210
1211 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) {
1212 /* Set OutBound List Available count shadow */
1213 iowrite32((mhba->list_num_io-1) |
1214 regs->cl_pointer_toggle,
1215 mhba->ob_shadow);
1216 iowrite32(lower_32_bits(mhba->ob_shadow_phys),
1217 regs->outb_copy_basel);
1218 iowrite32(upper_32_bits(mhba->ob_shadow_phys),
1219 regs->outb_copy_baseh);
1220 }
1221
1222 mhba->ib_cur_slot = (mhba->list_num_io - 1) |
1223 regs->cl_pointer_toggle;
1224 mhba->ob_cur_slot = (mhba->list_num_io - 1) |
1225 regs->cl_pointer_toggle;
1226 mhba->fw_state = FW_STATE_STARTED;
1227
1228 break;
1229 default:
1230 dev_err(&mhba->pdev->dev, "unknown handshake state [0x%x].\n",
1231 hs_state);
1232 return -1;
1233 }
1234 return 0;
1235 }
1236
1237 static unsigned char mvumi_handshake_event(struct mvumi_hba *mhba)
1238 {
1239 unsigned int isr_status;
1240 unsigned long before;
1241
1242 before = jiffies;
1243 mvumi_handshake(mhba);
1244 do {
1245 isr_status = mhba->instancet->read_fw_status_reg(mhba);
1246
1247 if (mhba->fw_state == FW_STATE_STARTED)
1248 return 0;
1249 if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
1250 dev_err(&mhba->pdev->dev,
1251 "no handshake response at state 0x%x.\n",
1252 mhba->fw_state);
1253 dev_err(&mhba->pdev->dev,
1254 "isr : global=0x%x,status=0x%x.\n",
1255 mhba->global_isr, isr_status);
1256 return -1;
1257 }
1258 rmb();
1259 usleep_range(1000, 2000);
1260 } while (!(isr_status & DRBL_HANDSHAKE_ISR));
1261
1262 return 0;
1263 }
1264
1265 static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba)
1266 {
1267 unsigned int tmp;
1268 unsigned long before;
1269
1270 before = jiffies;
1271 tmp = ioread32(mhba->regs->arm_to_pciea_msg1);
1272 while ((tmp != HANDSHAKE_READYSTATE) && (tmp != HANDSHAKE_DONESTATE)) {
1273 if (tmp != HANDSHAKE_READYSTATE)
1274 iowrite32(DRBL_MU_RESET,
1275 mhba->regs->pciea_to_arm_drbl_reg);
1276 if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
1277 dev_err(&mhba->pdev->dev,
1278 "invalid signature [0x%x].\n", tmp);
1279 return -1;
1280 }
1281 usleep_range(1000, 2000);
1282 rmb();
1283 tmp = ioread32(mhba->regs->arm_to_pciea_msg1);
1284 }
1285
1286 mhba->fw_state = FW_STATE_STARTING;
1287 dev_dbg(&mhba->pdev->dev, "start firmware handshake...\n");
1288 do {
1289 if (mvumi_handshake_event(mhba)) {
1290 dev_err(&mhba->pdev->dev,
1291 "handshake failed at state 0x%x.\n",
1292 mhba->fw_state);
1293 return -1;
1294 }
1295 } while (mhba->fw_state != FW_STATE_STARTED);
1296
1297 dev_dbg(&mhba->pdev->dev, "firmware handshake done\n");
1298
1299 return 0;
1300 }
1301
1302 static unsigned char mvumi_start(struct mvumi_hba *mhba)
1303 {
1304 unsigned int tmp;
1305 struct mvumi_hw_regs *regs = mhba->regs;
1306
1307 /* clear Door bell */
1308 tmp = ioread32(regs->arm_to_pciea_drbl_reg);
1309 iowrite32(tmp, regs->arm_to_pciea_drbl_reg);
1310
1311 iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg);
1312 tmp = ioread32(regs->enpointa_mask_reg) | regs->int_dl_cpu2pciea;
1313 iowrite32(tmp, regs->enpointa_mask_reg);
1314 msleep(100);
1315 if (mvumi_check_handshake(mhba))
1316 return -1;
1317
1318 return 0;
1319 }
1320
1321 /**
1322 * mvumi_complete_cmd - Completes a command
1323 * @mhba: Adapter soft state
1324 * @cmd: Command to be completed
1325 */
1326 static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
1327 struct mvumi_rsp_frame *ob_frame)
1328 {
1329 struct scsi_cmnd *scmd = cmd->scmd;
1330
1331 cmd->scmd->SCp.ptr = NULL;
1332 scmd->result = ob_frame->req_status;
1333
1334 switch (ob_frame->req_status) {
1335 case SAM_STAT_GOOD:
1336 scmd->result |= DID_OK << 16;
1337 break;
1338 case SAM_STAT_BUSY:
1339 scmd->result |= DID_BUS_BUSY << 16;
1340 break;
1341 case SAM_STAT_CHECK_CONDITION:
1342 scmd->result |= (DID_OK << 16);
1343 if (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) {
1344 memcpy(cmd->scmd->sense_buffer, ob_frame->payload,
1345 sizeof(struct mvumi_sense_data));
1346 scmd->result |= (DRIVER_SENSE << 24);
1347 }
1348 break;
1349 default:
1350 scmd->result |= (DRIVER_INVALID << 24) | (DID_ABORT << 16);
1351 break;
1352 }
1353
1354 if (scsi_bufflen(scmd)) {
1355 if (scsi_sg_count(scmd)) {
1356 pci_unmap_sg(mhba->pdev,
1357 scsi_sglist(scmd),
1358 scsi_sg_count(scmd),
1359 (int) scmd->sc_data_direction);
1360 } else {
1361 pci_unmap_single(mhba->pdev,
1362 scmd->SCp.dma_handle,
1363 scsi_bufflen(scmd),
1364 (int) scmd->sc_data_direction);
1365
1366 scmd->SCp.dma_handle = 0;
1367 }
1368 }
1369 cmd->scmd->scsi_done(scmd);
1370 mvumi_return_cmd(mhba, cmd);
1371 }
1372
1373 static void mvumi_complete_internal_cmd(struct mvumi_hba *mhba,
1374 struct mvumi_cmd *cmd,
1375 struct mvumi_rsp_frame *ob_frame)
1376 {
1377 if (atomic_read(&cmd->sync_cmd)) {
1378 cmd->cmd_status = ob_frame->req_status;
1379
1380 if ((ob_frame->req_status == SAM_STAT_CHECK_CONDITION) &&
1381 (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) &&
1382 cmd->data_buf) {
1383 memcpy(cmd->data_buf, ob_frame->payload,
1384 sizeof(struct mvumi_sense_data));
1385 }
1386 atomic_dec(&cmd->sync_cmd);
1387 wake_up(&mhba->int_cmd_wait_q);
1388 }
1389 }
1390
1391 static void mvumi_show_event(struct mvumi_hba *mhba,
1392 struct mvumi_driver_event *ptr)
1393 {
1394 unsigned int i;
1395
1396 dev_warn(&mhba->pdev->dev,
1397 "Event[0x%x] id[0x%x] severity[0x%x] device id[0x%x]\n",
1398 ptr->sequence_no, ptr->event_id, ptr->severity, ptr->device_id);
1399 if (ptr->param_count) {
1400 printk(KERN_WARNING "Event param(len 0x%x): ",
1401 ptr->param_count);
1402 for (i = 0; i < ptr->param_count; i++)
1403 printk(KERN_WARNING "0x%x ", ptr->params[i]);
1404
1405 printk(KERN_WARNING "\n");
1406 }
1407
1408 if (ptr->sense_data_length) {
1409 printk(KERN_WARNING "Event sense data(len 0x%x): ",
1410 ptr->sense_data_length);
1411 for (i = 0; i < ptr->sense_data_length; i++)
1412 printk(KERN_WARNING "0x%x ", ptr->sense_data[i]);
1413 printk(KERN_WARNING "\n");
1414 }
1415 }
1416
1417 static int mvumi_handle_hotplug(struct mvumi_hba *mhba, u16 devid, int status)
1418 {
1419 struct scsi_device *sdev;
1420 int ret = -1;
1421
1422 if (status == DEVICE_OFFLINE) {
1423 sdev = scsi_device_lookup(mhba->shost, 0, devid, 0);
1424 if (sdev) {
1425 dev_dbg(&mhba->pdev->dev, "remove disk %d-%d-%d.\n", 0,
1426 sdev->id, 0);
1427 scsi_remove_device(sdev);
1428 scsi_device_put(sdev);
1429 ret = 0;
1430 } else
1431 dev_err(&mhba->pdev->dev, " no disk[%d] to remove\n",
1432 devid);
1433 } else if (status == DEVICE_ONLINE) {
1434 sdev = scsi_device_lookup(mhba->shost, 0, devid, 0);
1435 if (!sdev) {
1436 scsi_add_device(mhba->shost, 0, devid, 0);
1437 dev_dbg(&mhba->pdev->dev, " add disk %d-%d-%d.\n", 0,
1438 devid, 0);
1439 ret = 0;
1440 } else {
1441 dev_err(&mhba->pdev->dev, " don't add disk %d-%d-%d.\n",
1442 0, devid, 0);
1443 scsi_device_put(sdev);
1444 }
1445 }
1446 return ret;
1447 }
1448
1449 static u64 mvumi_inquiry(struct mvumi_hba *mhba,
1450 unsigned int id, struct mvumi_cmd *cmd)
1451 {
1452 struct mvumi_msg_frame *frame;
1453 u64 wwid = 0;
1454 int cmd_alloc = 0;
1455 int data_buf_len = 64;
1456
1457 if (!cmd) {
1458 cmd = mvumi_create_internal_cmd(mhba, data_buf_len);
1459 if (cmd)
1460 cmd_alloc = 1;
1461 else
1462 return 0;
1463 } else {
1464 memset(cmd->data_buf, 0, data_buf_len);
1465 }
1466 cmd->scmd = NULL;
1467 cmd->cmd_status = REQ_STATUS_PENDING;
1468 atomic_set(&cmd->sync_cmd, 0);
1469 frame = cmd->frame;
1470 frame->device_id = (u16) id;
1471 frame->cmd_flag = CMD_FLAG_DATA_IN;
1472 frame->req_function = CL_FUN_SCSI_CMD;
1473 frame->cdb_length = 6;
1474 frame->data_transfer_length = MVUMI_INQUIRY_LENGTH;
1475 memset(frame->cdb, 0, frame->cdb_length);
1476 frame->cdb[0] = INQUIRY;
1477 frame->cdb[4] = frame->data_transfer_length;
1478
1479 mvumi_issue_blocked_cmd(mhba, cmd);
1480
1481 if (cmd->cmd_status == SAM_STAT_GOOD) {
1482 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143)
1483 wwid = id + 1;
1484 else
1485 memcpy((void *)&wwid,
1486 (cmd->data_buf + MVUMI_INQUIRY_UUID_OFF),
1487 MVUMI_INQUIRY_UUID_LEN);
1488 dev_dbg(&mhba->pdev->dev,
1489 "inquiry device(0:%d:0) wwid(%llx)\n", id, wwid);
1490 } else {
1491 wwid = 0;
1492 }
1493 if (cmd_alloc)
1494 mvumi_delete_internal_cmd(mhba, cmd);
1495
1496 return wwid;
1497 }
1498
1499 static void mvumi_detach_devices(struct mvumi_hba *mhba)
1500 {
1501 struct mvumi_device *mv_dev = NULL , *dev_next;
1502 struct scsi_device *sdev = NULL;
1503
1504 mutex_lock(&mhba->device_lock);
1505
1506 /* detach Hard Disk */
1507 list_for_each_entry_safe(mv_dev, dev_next,
1508 &mhba->shost_dev_list, list) {
1509 mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE);
1510 list_del_init(&mv_dev->list);
1511 dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n",
1512 mv_dev->id, mv_dev->wwid);
1513 kfree(mv_dev);
1514 }
1515 list_for_each_entry_safe(mv_dev, dev_next, &mhba->mhba_dev_list, list) {
1516 list_del_init(&mv_dev->list);
1517 dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n",
1518 mv_dev->id, mv_dev->wwid);
1519 kfree(mv_dev);
1520 }
1521
1522 /* detach virtual device */
1523 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
1524 sdev = scsi_device_lookup(mhba->shost, 0,
1525 mhba->max_target_id - 1, 0);
1526
1527 if (sdev) {
1528 scsi_remove_device(sdev);
1529 scsi_device_put(sdev);
1530 }
1531
1532 mutex_unlock(&mhba->device_lock);
1533 }
1534
1535 static void mvumi_rescan_devices(struct mvumi_hba *mhba, int id)
1536 {
1537 struct scsi_device *sdev;
1538
1539 sdev = scsi_device_lookup(mhba->shost, 0, id, 0);
1540 if (sdev) {
1541 scsi_rescan_device(&sdev->sdev_gendev);
1542 scsi_device_put(sdev);
1543 }
1544 }
1545
1546 static int mvumi_match_devices(struct mvumi_hba *mhba, int id, u64 wwid)
1547 {
1548 struct mvumi_device *mv_dev = NULL;
1549
1550 list_for_each_entry(mv_dev, &mhba->shost_dev_list, list) {
1551 if (mv_dev->wwid == wwid) {
1552 if (mv_dev->id != id) {
1553 dev_err(&mhba->pdev->dev,
1554 "%s has same wwid[%llx] ,"
1555 " but different id[%d %d]\n",
1556 __func__, mv_dev->wwid, mv_dev->id, id);
1557 return -1;
1558 } else {
1559 if (mhba->pdev->device ==
1560 PCI_DEVICE_ID_MARVELL_MV9143)
1561 mvumi_rescan_devices(mhba, id);
1562 return 1;
1563 }
1564 }
1565 }
1566 return 0;
1567 }
1568
1569 static void mvumi_remove_devices(struct mvumi_hba *mhba, int id)
1570 {
1571 struct mvumi_device *mv_dev = NULL, *dev_next;
1572
1573 list_for_each_entry_safe(mv_dev, dev_next,
1574 &mhba->shost_dev_list, list) {
1575 if (mv_dev->id == id) {
1576 dev_dbg(&mhba->pdev->dev,
1577 "detach device(0:%d:0) wwid(%llx) from HOST\n",
1578 mv_dev->id, mv_dev->wwid);
1579 mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE);
1580 list_del_init(&mv_dev->list);
1581 kfree(mv_dev);
1582 }
1583 }
1584 }
1585
1586 static int mvumi_probe_devices(struct mvumi_hba *mhba)
1587 {
1588 int id, maxid;
1589 u64 wwid = 0;
1590 struct mvumi_device *mv_dev = NULL;
1591 struct mvumi_cmd *cmd = NULL;
1592 int found = 0;
1593
1594 cmd = mvumi_create_internal_cmd(mhba, 64);
1595 if (!cmd)
1596 return -1;
1597
1598 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143)
1599 maxid = mhba->max_target_id;
1600 else
1601 maxid = mhba->max_target_id - 1;
1602
1603 for (id = 0; id < maxid; id++) {
1604 wwid = mvumi_inquiry(mhba, id, cmd);
1605 if (!wwid) {
1606 /* device no response, remove it */
1607 mvumi_remove_devices(mhba, id);
1608 } else {
1609 /* device response, add it */
1610 found = mvumi_match_devices(mhba, id, wwid);
1611 if (!found) {
1612 mvumi_remove_devices(mhba, id);
1613 mv_dev = kzalloc(sizeof(struct mvumi_device),
1614 GFP_KERNEL);
1615 if (!mv_dev) {
1616 dev_err(&mhba->pdev->dev,
1617 "%s alloc mv_dev failed\n",
1618 __func__);
1619 continue;
1620 }
1621 mv_dev->id = id;
1622 mv_dev->wwid = wwid;
1623 mv_dev->sdev = NULL;
1624 INIT_LIST_HEAD(&mv_dev->list);
1625 list_add_tail(&mv_dev->list,
1626 &mhba->mhba_dev_list);
1627 dev_dbg(&mhba->pdev->dev,
1628 "probe a new device(0:%d:0)"
1629 " wwid(%llx)\n", id, mv_dev->wwid);
1630 } else if (found == -1)
1631 return -1;
1632 else
1633 continue;
1634 }
1635 }
1636
1637 if (cmd)
1638 mvumi_delete_internal_cmd(mhba, cmd);
1639
1640 return 0;
1641 }
1642
1643 static int mvumi_rescan_bus(void *data)
1644 {
1645 int ret = 0;
1646 struct mvumi_hba *mhba = (struct mvumi_hba *) data;
1647 struct mvumi_device *mv_dev = NULL , *dev_next;
1648
1649 while (!kthread_should_stop()) {
1650
1651 set_current_state(TASK_INTERRUPTIBLE);
1652 if (!atomic_read(&mhba->pnp_count))
1653 schedule();
1654 msleep(1000);
1655 atomic_set(&mhba->pnp_count, 0);
1656 __set_current_state(TASK_RUNNING);
1657
1658 mutex_lock(&mhba->device_lock);
1659 ret = mvumi_probe_devices(mhba);
1660 if (!ret) {
1661 list_for_each_entry_safe(mv_dev, dev_next,
1662 &mhba->mhba_dev_list, list) {
1663 if (mvumi_handle_hotplug(mhba, mv_dev->id,
1664 DEVICE_ONLINE)) {
1665 dev_err(&mhba->pdev->dev,
1666 "%s add device(0:%d:0) failed"
1667 "wwid(%llx) has exist\n",
1668 __func__,
1669 mv_dev->id, mv_dev->wwid);
1670 list_del_init(&mv_dev->list);
1671 kfree(mv_dev);
1672 } else {
1673 list_move_tail(&mv_dev->list,
1674 &mhba->shost_dev_list);
1675 }
1676 }
1677 }
1678 mutex_unlock(&mhba->device_lock);
1679 }
1680 return 0;
1681 }
1682
1683 static void mvumi_proc_msg(struct mvumi_hba *mhba,
1684 struct mvumi_hotplug_event *param)
1685 {
1686 u16 size = param->size;
1687 const unsigned long *ar_bitmap;
1688 const unsigned long *re_bitmap;
1689 int index;
1690
1691 if (mhba->fw_flag & MVUMI_FW_ATTACH) {
1692 index = -1;
1693 ar_bitmap = (const unsigned long *) param->bitmap;
1694 re_bitmap = (const unsigned long *) ¶m->bitmap[size >> 3];
1695
1696 mutex_lock(&mhba->sas_discovery_mutex);
1697 do {
1698 index = find_next_zero_bit(ar_bitmap, size, index + 1);
1699 if (index >= size)
1700 break;
1701 mvumi_handle_hotplug(mhba, index, DEVICE_ONLINE);
1702 } while (1);
1703
1704 index = -1;
1705 do {
1706 index = find_next_zero_bit(re_bitmap, size, index + 1);
1707 if (index >= size)
1708 break;
1709 mvumi_handle_hotplug(mhba, index, DEVICE_OFFLINE);
1710 } while (1);
1711 mutex_unlock(&mhba->sas_discovery_mutex);
1712 }
1713 }
1714
1715 static void mvumi_notification(struct mvumi_hba *mhba, u8 msg, void *buffer)
1716 {
1717 if (msg == APICDB1_EVENT_GETEVENT) {
1718 int i, count;
1719 struct mvumi_driver_event *param = NULL;
1720 struct mvumi_event_req *er = buffer;
1721 count = er->count;
1722 if (count > MAX_EVENTS_RETURNED) {
1723 dev_err(&mhba->pdev->dev, "event count[0x%x] is bigger"
1724 " than max event count[0x%x].\n",
1725 count, MAX_EVENTS_RETURNED);
1726 return;
1727 }
1728 for (i = 0; i < count; i++) {
1729 param = &er->events[i];
1730 mvumi_show_event(mhba, param);
1731 }
1732 } else if (msg == APICDB1_HOST_GETEVENT) {
1733 mvumi_proc_msg(mhba, buffer);
1734 }
1735 }
1736
1737 static int mvumi_get_event(struct mvumi_hba *mhba, unsigned char msg)
1738 {
1739 struct mvumi_cmd *cmd;
1740 struct mvumi_msg_frame *frame;
1741
1742 cmd = mvumi_create_internal_cmd(mhba, 512);
1743 if (!cmd)
1744 return -1;
1745 cmd->scmd = NULL;
1746 cmd->cmd_status = REQ_STATUS_PENDING;
1747 atomic_set(&cmd->sync_cmd, 0);
1748 frame = cmd->frame;
1749 frame->device_id = 0;
1750 frame->cmd_flag = CMD_FLAG_DATA_IN;
1751 frame->req_function = CL_FUN_SCSI_CMD;
1752 frame->cdb_length = MAX_COMMAND_SIZE;
1753 frame->data_transfer_length = sizeof(struct mvumi_event_req);
1754 memset(frame->cdb, 0, MAX_COMMAND_SIZE);
1755 frame->cdb[0] = APICDB0_EVENT;
1756 frame->cdb[1] = msg;
1757 mvumi_issue_blocked_cmd(mhba, cmd);
1758
1759 if (cmd->cmd_status != SAM_STAT_GOOD)
1760 dev_err(&mhba->pdev->dev, "get event failed, status=0x%x.\n",
1761 cmd->cmd_status);
1762 else
1763 mvumi_notification(mhba, cmd->frame->cdb[1], cmd->data_buf);
1764
1765 mvumi_delete_internal_cmd(mhba, cmd);
1766 return 0;
1767 }
1768
1769 static void mvumi_scan_events(struct work_struct *work)
1770 {
1771 struct mvumi_events_wq *mu_ev =
1772 container_of(work, struct mvumi_events_wq, work_q);
1773
1774 mvumi_get_event(mu_ev->mhba, mu_ev->event);
1775 kfree(mu_ev);
1776 }
1777
1778 static void mvumi_launch_events(struct mvumi_hba *mhba, u32 isr_status)
1779 {
1780 struct mvumi_events_wq *mu_ev;
1781
1782 while (isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY)) {
1783 if (isr_status & DRBL_BUS_CHANGE) {
1784 atomic_inc(&mhba->pnp_count);
1785 wake_up_process(mhba->dm_thread);
1786 isr_status &= ~(DRBL_BUS_CHANGE);
1787 continue;
1788 }
1789
1790 mu_ev = kzalloc(sizeof(*mu_ev), GFP_ATOMIC);
1791 if (mu_ev) {
1792 INIT_WORK(&mu_ev->work_q, mvumi_scan_events);
1793 mu_ev->mhba = mhba;
1794 mu_ev->event = APICDB1_EVENT_GETEVENT;
1795 isr_status &= ~(DRBL_EVENT_NOTIFY);
1796 mu_ev->param = NULL;
1797 schedule_work(&mu_ev->work_q);
1798 }
1799 }
1800 }
1801
1802 static void mvumi_handle_clob(struct mvumi_hba *mhba)
1803 {
1804 struct mvumi_rsp_frame *ob_frame;
1805 struct mvumi_cmd *cmd;
1806 struct mvumi_ob_data *pool;
1807
1808 while (!list_empty(&mhba->free_ob_list)) {
1809 pool = list_first_entry(&mhba->free_ob_list,
1810 struct mvumi_ob_data, list);
1811 list_del_init(&pool->list);
1812 list_add_tail(&pool->list, &mhba->ob_data_list);
1813
1814 ob_frame = (struct mvumi_rsp_frame *) &pool->data[0];
1815 cmd = mhba->tag_cmd[ob_frame->tag];
1816
1817 atomic_dec(&mhba->fw_outstanding);
1818 mhba->tag_cmd[ob_frame->tag] = 0;
1819 tag_release_one(mhba, &mhba->tag_pool, ob_frame->tag);
1820 if (cmd->scmd)
1821 mvumi_complete_cmd(mhba, cmd, ob_frame);
1822 else
1823 mvumi_complete_internal_cmd(mhba, cmd, ob_frame);
1824 }
1825 mhba->instancet->fire_cmd(mhba, NULL);
1826 }
1827
1828 static irqreturn_t mvumi_isr_handler(int irq, void *devp)
1829 {
1830 struct mvumi_hba *mhba = (struct mvumi_hba *) devp;
1831 unsigned long flags;
1832
1833 spin_lock_irqsave(mhba->shost->host_lock, flags);
1834 if (unlikely(mhba->instancet->clear_intr(mhba) || !mhba->global_isr)) {
1835 spin_unlock_irqrestore(mhba->shost->host_lock, flags);
1836 return IRQ_NONE;
1837 }
1838
1839 if (mhba->global_isr & mhba->regs->int_dl_cpu2pciea) {
1840 if (mhba->isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY))
1841 mvumi_launch_events(mhba, mhba->isr_status);
1842 if (mhba->isr_status & DRBL_HANDSHAKE_ISR) {
1843 dev_warn(&mhba->pdev->dev, "enter handshake again!\n");
1844 mvumi_handshake(mhba);
1845 }
1846
1847 }
1848
1849 if (mhba->global_isr & mhba->regs->int_comaout)
1850 mvumi_receive_ob_list_entry(mhba);
1851
1852 mhba->global_isr = 0;
1853 mhba->isr_status = 0;
1854 if (mhba->fw_state == FW_STATE_STARTED)
1855 mvumi_handle_clob(mhba);
1856 spin_unlock_irqrestore(mhba->shost->host_lock, flags);
1857 return IRQ_HANDLED;
1858 }
1859
1860 static enum mvumi_qc_result mvumi_send_command(struct mvumi_hba *mhba,
1861 struct mvumi_cmd *cmd)
1862 {
1863 void *ib_entry;
1864 struct mvumi_msg_frame *ib_frame;
1865 unsigned int frame_len;
1866
1867 ib_frame = cmd->frame;
1868 if (unlikely(mhba->fw_state != FW_STATE_STARTED)) {
1869 dev_dbg(&mhba->pdev->dev, "firmware not ready.\n");
1870 return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
1871 }
1872 if (tag_is_empty(&mhba->tag_pool)) {
1873 dev_dbg(&mhba->pdev->dev, "no free tag.\n");
1874 return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
1875 }
1876 mvumi_get_ib_list_entry(mhba, &ib_entry);
1877
1878 cmd->frame->tag = tag_get_one(mhba, &mhba->tag_pool);
1879 cmd->frame->request_id = mhba->io_seq++;
1880 cmd->request_id = cmd->frame->request_id;
1881 mhba->tag_cmd[cmd->frame->tag] = cmd;
1882 frame_len = sizeof(*ib_frame) - 4 +
1883 ib_frame->sg_counts * sizeof(struct mvumi_sgl);
1884 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
1885 struct mvumi_dyn_list_entry *dle;
1886 dle = ib_entry;
1887 dle->src_low_addr =
1888 cpu_to_le32(lower_32_bits(cmd->frame_phys));
1889 dle->src_high_addr =
1890 cpu_to_le32(upper_32_bits(cmd->frame_phys));
1891 dle->if_length = (frame_len >> 2) & 0xFFF;
1892 } else {
1893 memcpy(ib_entry, ib_frame, frame_len);
1894 }
1895 return MV_QUEUE_COMMAND_RESULT_SENT;
1896 }
1897
1898 static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd)
1899 {
1900 unsigned short num_of_cl_sent = 0;
1901 unsigned int count;
1902 enum mvumi_qc_result result;
1903
1904 if (cmd)
1905 list_add_tail(&cmd->queue_pointer, &mhba->waiting_req_list);
1906 count = mhba->instancet->check_ib_list(mhba);
1907 if (list_empty(&mhba->waiting_req_list) || !count)
1908 return;
1909
1910 do {
1911 cmd = list_first_entry(&mhba->waiting_req_list,
1912 struct mvumi_cmd, queue_pointer);
1913 list_del_init(&cmd->queue_pointer);
1914 result = mvumi_send_command(mhba, cmd);
1915 switch (result) {
1916 case MV_QUEUE_COMMAND_RESULT_SENT:
1917 num_of_cl_sent++;
1918 break;
1919 case MV_QUEUE_COMMAND_RESULT_NO_RESOURCE:
1920 list_add(&cmd->queue_pointer, &mhba->waiting_req_list);
1921 if (num_of_cl_sent > 0)
1922 mvumi_send_ib_list_entry(mhba);
1923
1924 return;
1925 }
1926 } while (!list_empty(&mhba->waiting_req_list) && count--);
1927
1928 if (num_of_cl_sent > 0)
1929 mvumi_send_ib_list_entry(mhba);
1930 }
1931
1932 /**
1933 * mvumi_enable_intr - Enables interrupts
1934 * @mhba: Adapter soft state
1935 */
1936 static void mvumi_enable_intr(struct mvumi_hba *mhba)
1937 {
1938 unsigned int mask;
1939 struct mvumi_hw_regs *regs = mhba->regs;
1940
1941 iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg);
1942 mask = ioread32(regs->enpointa_mask_reg);
1943 mask |= regs->int_dl_cpu2pciea | regs->int_comaout | regs->int_comaerr;
1944 iowrite32(mask, regs->enpointa_mask_reg);
1945 }
1946
1947 /**
1948 * mvumi_disable_intr -Disables interrupt
1949 * @mhba: Adapter soft state
1950 */
1951 static void mvumi_disable_intr(struct mvumi_hba *mhba)
1952 {
1953 unsigned int mask;
1954 struct mvumi_hw_regs *regs = mhba->regs;
1955
1956 iowrite32(0, regs->arm_to_pciea_mask_reg);
1957 mask = ioread32(regs->enpointa_mask_reg);
1958 mask &= ~(regs->int_dl_cpu2pciea | regs->int_comaout |
1959 regs->int_comaerr);
1960 iowrite32(mask, regs->enpointa_mask_reg);
1961 }
1962
1963 static int mvumi_clear_intr(void *extend)
1964 {
1965 struct mvumi_hba *mhba = (struct mvumi_hba *) extend;
1966 unsigned int status, isr_status = 0, tmp = 0;
1967 struct mvumi_hw_regs *regs = mhba->regs;
1968
1969 status = ioread32(regs->main_int_cause_reg);
1970 if (!(status & regs->int_mu) || status == 0xFFFFFFFF)
1971 return 1;
1972 if (unlikely(status & regs->int_comaerr)) {
1973 tmp = ioread32(regs->outb_isr_cause);
1974 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) {
1975 if (tmp & regs->clic_out_err) {
1976 iowrite32(tmp & regs->clic_out_err,
1977 regs->outb_isr_cause);
1978 }
1979 } else {
1980 if (tmp & (regs->clic_in_err | regs->clic_out_err))
1981 iowrite32(tmp & (regs->clic_in_err |
1982 regs->clic_out_err),
1983 regs->outb_isr_cause);
1984 }
1985 status ^= mhba->regs->int_comaerr;
1986 /* inbound or outbound parity error, command will timeout */
1987 }
1988 if (status & regs->int_comaout) {
1989 tmp = ioread32(regs->outb_isr_cause);
1990 if (tmp & regs->clic_irq)
1991 iowrite32(tmp & regs->clic_irq, regs->outb_isr_cause);
1992 }
1993 if (status & regs->int_dl_cpu2pciea) {
1994 isr_status = ioread32(regs->arm_to_pciea_drbl_reg);
1995 if (isr_status)
1996 iowrite32(isr_status, regs->arm_to_pciea_drbl_reg);
1997 }
1998
1999 mhba->global_isr = status;
2000 mhba->isr_status = isr_status;
2001
2002 return 0;
2003 }
2004
2005 /**
2006 * mvumi_read_fw_status_reg - returns the current FW status value
2007 * @mhba: Adapter soft state
2008 */
2009 static unsigned int mvumi_read_fw_status_reg(struct mvumi_hba *mhba)
2010 {
2011 unsigned int status;
2012
2013 status = ioread32(mhba->regs->arm_to_pciea_drbl_reg);
2014 if (status)
2015 iowrite32(status, mhba->regs->arm_to_pciea_drbl_reg);
2016 return status;
2017 }
2018
2019 static struct mvumi_instance_template mvumi_instance_9143 = {
2020 .fire_cmd = mvumi_fire_cmd,
2021 .enable_intr = mvumi_enable_intr,
2022 .disable_intr = mvumi_disable_intr,
2023 .clear_intr = mvumi_clear_intr,
2024 .read_fw_status_reg = mvumi_read_fw_status_reg,
2025 .check_ib_list = mvumi_check_ib_list_9143,
2026 .check_ob_list = mvumi_check_ob_list_9143,
2027 .reset_host = mvumi_reset_host_9143,
2028 };
2029
2030 static struct mvumi_instance_template mvumi_instance_9580 = {
2031 .fire_cmd = mvumi_fire_cmd,
2032 .enable_intr = mvumi_enable_intr,
2033 .disable_intr = mvumi_disable_intr,
2034 .clear_intr = mvumi_clear_intr,
2035 .read_fw_status_reg = mvumi_read_fw_status_reg,
2036 .check_ib_list = mvumi_check_ib_list_9580,
2037 .check_ob_list = mvumi_check_ob_list_9580,
2038 .reset_host = mvumi_reset_host_9580,
2039 };
2040
2041 static int mvumi_slave_configure(struct scsi_device *sdev)
2042 {
2043 struct mvumi_hba *mhba;
2044 unsigned char bitcount = sizeof(unsigned char) * 8;
2045
2046 mhba = (struct mvumi_hba *) sdev->host->hostdata;
2047 if (sdev->id >= mhba->max_target_id)
2048 return -EINVAL;
2049
2050 mhba->target_map[sdev->id / bitcount] |= (1 << (sdev->id % bitcount));
2051 return 0;
2052 }
2053
2054 /**
2055 * mvumi_build_frame - Prepares a direct cdb (DCDB) command
2056 * @mhba: Adapter soft state
2057 * @scmd: SCSI command
2058 * @cmd: Command to be prepared in
2059 *
2060 * This function prepares CDB commands. These are typcially pass-through
2061 * commands to the devices.
2062 */
2063 static unsigned char mvumi_build_frame(struct mvumi_hba *mhba,
2064 struct scsi_cmnd *scmd, struct mvumi_cmd *cmd)
2065 {
2066 struct mvumi_msg_frame *pframe;
2067
2068 cmd->scmd = scmd;
2069 cmd->cmd_status = REQ_STATUS_PENDING;
2070 pframe = cmd->frame;
2071 pframe->device_id = ((unsigned short) scmd->device->id) |
2072 (((unsigned short) scmd->device->lun) << 8);
2073 pframe->cmd_flag = 0;
2074
2075 switch (scmd->sc_data_direction) {
2076 case DMA_NONE:
2077 pframe->cmd_flag |= CMD_FLAG_NON_DATA;
2078 break;
2079 case DMA_FROM_DEVICE:
2080 pframe->cmd_flag |= CMD_FLAG_DATA_IN;
2081 break;
2082 case DMA_TO_DEVICE:
2083 pframe->cmd_flag |= CMD_FLAG_DATA_OUT;
2084 break;
2085 case DMA_BIDIRECTIONAL:
2086 default:
2087 dev_warn(&mhba->pdev->dev, "unexpected data direction[%d] "
2088 "cmd[0x%x]\n", scmd->sc_data_direction, scmd->cmnd[0]);
2089 goto error;
2090 }
2091
2092 pframe->cdb_length = scmd->cmd_len;
2093 memcpy(pframe->cdb, scmd->cmnd, pframe->cdb_length);
2094 pframe->req_function = CL_FUN_SCSI_CMD;
2095 if (scsi_bufflen(scmd)) {
2096 if (mvumi_make_sgl(mhba, scmd, &pframe->payload[0],
2097 &pframe->sg_counts))
2098 goto error;
2099
2100 pframe->data_transfer_length = scsi_bufflen(scmd);
2101 } else {
2102 pframe->sg_counts = 0;
2103 pframe->data_transfer_length = 0;
2104 }
2105 return 0;
2106
2107 error:
2108 scmd->result = (DID_OK << 16) | (DRIVER_SENSE << 24) |
2109 SAM_STAT_CHECK_CONDITION;
2110 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x24,
2111 0);
2112 return -1;
2113 }
2114
2115 /**
2116 * mvumi_queue_command - Queue entry point
2117 * @scmd: SCSI command to be queued
2118 * @done: Callback entry point
2119 */
2120 static int mvumi_queue_command(struct Scsi_Host *shost,
2121 struct scsi_cmnd *scmd)
2122 {
2123 struct mvumi_cmd *cmd;
2124 struct mvumi_hba *mhba;
2125 unsigned long irq_flags;
2126
2127 spin_lock_irqsave(shost->host_lock, irq_flags);
2128 scsi_cmd_get_serial(shost, scmd);
2129
2130 mhba = (struct mvumi_hba *) shost->hostdata;
2131 scmd->result = 0;
2132 cmd = mvumi_get_cmd(mhba);
2133 if (unlikely(!cmd)) {
2134 spin_unlock_irqrestore(shost->host_lock, irq_flags);
2135 return SCSI_MLQUEUE_HOST_BUSY;
2136 }
2137
2138 if (unlikely(mvumi_build_frame(mhba, scmd, cmd)))
2139 goto out_return_cmd;
2140
2141 cmd->scmd = scmd;
2142 scmd->SCp.ptr = (char *) cmd;
2143 mhba->instancet->fire_cmd(mhba, cmd);
2144 spin_unlock_irqrestore(shost->host_lock, irq_flags);
2145 return 0;
2146
2147 out_return_cmd:
2148 mvumi_return_cmd(mhba, cmd);
2149 scmd->scsi_done(scmd);
2150 spin_unlock_irqrestore(shost->host_lock, irq_flags);
2151 return 0;
2152 }
2153
2154 static enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd)
2155 {
2156 struct mvumi_cmd *cmd = (struct mvumi_cmd *) scmd->SCp.ptr;
2157 struct Scsi_Host *host = scmd->device->host;
2158 struct mvumi_hba *mhba = shost_priv(host);
2159 unsigned long flags;
2160
2161 spin_lock_irqsave(mhba->shost->host_lock, flags);
2162
2163 if (mhba->tag_cmd[cmd->frame->tag]) {
2164 mhba->tag_cmd[cmd->frame->tag] = 0;
2165 tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
2166 }
2167 if (!list_empty(&cmd->queue_pointer))
2168 list_del_init(&cmd->queue_pointer);
2169 else
2170 atomic_dec(&mhba->fw_outstanding);
2171
2172 scmd->result = (DRIVER_INVALID << 24) | (DID_ABORT << 16);
2173 scmd->SCp.ptr = NULL;
2174 if (scsi_bufflen(scmd)) {
2175 if (scsi_sg_count(scmd)) {
2176 pci_unmap_sg(mhba->pdev,
2177 scsi_sglist(scmd),
2178 scsi_sg_count(scmd),
2179 (int)scmd->sc_data_direction);
2180 } else {
2181 pci_unmap_single(mhba->pdev,
2182 scmd->SCp.dma_handle,
2183 scsi_bufflen(scmd),
2184 (int)scmd->sc_data_direction);
2185
2186 scmd->SCp.dma_handle = 0;
2187 }
2188 }
2189 mvumi_return_cmd(mhba, cmd);
2190 spin_unlock_irqrestore(mhba->shost->host_lock, flags);
2191
2192 return BLK_EH_NOT_HANDLED;
2193 }
2194
2195 static int
2196 mvumi_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2197 sector_t capacity, int geom[])
2198 {
2199 int heads, sectors;
2200 sector_t cylinders;
2201 unsigned long tmp;
2202
2203 heads = 64;
2204 sectors = 32;
2205 tmp = heads * sectors;
2206 cylinders = capacity;
2207 sector_div(cylinders, tmp);
2208
2209 if (capacity >= 0x200000) {
2210 heads = 255;
2211 sectors = 63;
2212 tmp = heads * sectors;
2213 cylinders = capacity;
2214 sector_div(cylinders, tmp);
2215 }
2216 geom[0] = heads;
2217 geom[1] = sectors;
2218 geom[2] = cylinders;
2219
2220 return 0;
2221 }
2222
2223 static struct scsi_host_template mvumi_template = {
2224
2225 .module = THIS_MODULE,
2226 .name = "Marvell Storage Controller",
2227 .slave_configure = mvumi_slave_configure,
2228 .queuecommand = mvumi_queue_command,
2229 .eh_timed_out = mvumi_timed_out,
2230 .eh_host_reset_handler = mvumi_host_reset,
2231 .bios_param = mvumi_bios_param,
2232 .this_id = -1,
2233 };
2234
2235 static int mvumi_cfg_hw_reg(struct mvumi_hba *mhba)
2236 {
2237 void *base = NULL;
2238 struct mvumi_hw_regs *regs;
2239
2240 switch (mhba->pdev->device) {
2241 case PCI_DEVICE_ID_MARVELL_MV9143:
2242 mhba->mmio = mhba->base_addr[0];
2243 base = mhba->mmio;
2244 if (!mhba->regs) {
2245 mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL);
2246 if (mhba->regs == NULL)
2247 return -ENOMEM;
2248 }
2249 regs = mhba->regs;
2250
2251 /* For Arm */
2252 regs->ctrl_sts_reg = base + 0x20104;
2253 regs->rstoutn_mask_reg = base + 0x20108;
2254 regs->sys_soft_rst_reg = base + 0x2010C;
2255 regs->main_int_cause_reg = base + 0x20200;
2256 regs->enpointa_mask_reg = base + 0x2020C;
2257 regs->rstoutn_en_reg = base + 0xF1400;
2258 /* For Doorbell */
2259 regs->pciea_to_arm_drbl_reg = base + 0x20400;
2260 regs->arm_to_pciea_drbl_reg = base + 0x20408;
2261 regs->arm_to_pciea_mask_reg = base + 0x2040C;
2262 regs->pciea_to_arm_msg0 = base + 0x20430;
2263 regs->pciea_to_arm_msg1 = base + 0x20434;
2264 regs->arm_to_pciea_msg0 = base + 0x20438;
2265 regs->arm_to_pciea_msg1 = base + 0x2043C;
2266
2267 /* For Message Unit */
2268
2269 regs->inb_aval_count_basel = base + 0x508;
2270 regs->inb_aval_count_baseh = base + 0x50C;
2271 regs->inb_write_pointer = base + 0x518;
2272 regs->inb_read_pointer = base + 0x51C;
2273 regs->outb_coal_cfg = base + 0x568;
2274 regs->outb_copy_basel = base + 0x5B0;
2275 regs->outb_copy_baseh = base + 0x5B4;
2276 regs->outb_copy_pointer = base + 0x544;
2277 regs->outb_read_pointer = base + 0x548;
2278 regs->outb_isr_cause = base + 0x560;
2279 regs->outb_coal_cfg = base + 0x568;
2280 /* Bit setting for HW */
2281 regs->int_comaout = 1 << 8;
2282 regs->int_comaerr = 1 << 6;
2283 regs->int_dl_cpu2pciea = 1 << 1;
2284 regs->cl_pointer_toggle = 1 << 12;
2285 regs->clic_irq = 1 << 1;
2286 regs->clic_in_err = 1 << 8;
2287 regs->clic_out_err = 1 << 12;
2288 regs->cl_slot_num_mask = 0xFFF;
2289 regs->int_drbl_int_mask = 0x3FFFFFFF;
2290 regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout |
2291 regs->int_comaerr;
2292 break;
2293 case PCI_DEVICE_ID_MARVELL_MV9580:
2294 mhba->mmio = mhba->base_addr[2];
2295 base = mhba->mmio;
2296 if (!mhba->regs) {
2297 mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL);
2298 if (mhba->regs == NULL)
2299 return -ENOMEM;
2300 }
2301 regs = mhba->regs;
2302 /* For Arm */
2303 regs->ctrl_sts_reg = base + 0x20104;
2304 regs->rstoutn_mask_reg = base + 0x1010C;
2305 regs->sys_soft_rst_reg = base + 0x10108;
2306 regs->main_int_cause_reg = base + 0x10200;
2307 regs->enpointa_mask_reg = base + 0x1020C;
2308 regs->rstoutn_en_reg = base + 0xF1400;
2309
2310 /* For Doorbell */
2311 regs->pciea_to_arm_drbl_reg = base + 0x10460;
2312 regs->arm_to_pciea_drbl_reg = base + 0x10480;
2313 regs->arm_to_pciea_mask_reg = base + 0x10484;
2314 regs->pciea_to_arm_msg0 = base + 0x10400;
2315 regs->pciea_to_arm_msg1 = base + 0x10404;
2316 regs->arm_to_pciea_msg0 = base + 0x10420;
2317 regs->arm_to_pciea_msg1 = base + 0x10424;
2318
2319 /* For reset*/
2320 regs->reset_request = base + 0x10108;
2321 regs->reset_enable = base + 0x1010c;
2322
2323 /* For Message Unit */
2324 regs->inb_aval_count_basel = base + 0x4008;
2325 regs->inb_aval_count_baseh = base + 0x400C;
2326 regs->inb_write_pointer = base + 0x4018;
2327 regs->inb_read_pointer = base + 0x401C;
2328 regs->outb_copy_basel = base + 0x4058;
2329 regs->outb_copy_baseh = base + 0x405C;
2330 regs->outb_copy_pointer = base + 0x406C;
2331 regs->outb_read_pointer = base + 0x4070;
2332 regs->outb_coal_cfg = base + 0x4080;
2333 regs->outb_isr_cause = base + 0x4088;
2334 /* Bit setting for HW */
2335 regs->int_comaout = 1 << 4;
2336 regs->int_dl_cpu2pciea = 1 << 12;
2337 regs->int_comaerr = 1 << 29;
2338 regs->cl_pointer_toggle = 1 << 14;
2339 regs->cl_slot_num_mask = 0x3FFF;
2340 regs->clic_irq = 1 << 0;
2341 regs->clic_out_err = 1 << 1;
2342 regs->int_drbl_int_mask = 0x3FFFFFFF;
2343 regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout;
2344 break;
2345 default:
2346 return -1;
2347 break;
2348 }
2349
2350 return 0;
2351 }
2352
2353 /**
2354 * mvumi_init_fw - Initializes the FW
2355 * @mhba: Adapter soft state
2356 *
2357 * This is the main function for initializing firmware.
2358 */
2359 static int mvumi_init_fw(struct mvumi_hba *mhba)
2360 {
2361 int ret = 0;
2362
2363 if (pci_request_regions(mhba->pdev, MV_DRIVER_NAME)) {
2364 dev_err(&mhba->pdev->dev, "IO memory region busy!\n");
2365 return -EBUSY;
2366 }
2367 ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr);
2368 if (ret)
2369 goto fail_ioremap;
2370
2371 switch (mhba->pdev->device) {
2372 case PCI_DEVICE_ID_MARVELL_MV9143:
2373 mhba->instancet = &mvumi_instance_9143;
2374 mhba->io_seq = 0;
2375 mhba->max_sge = MVUMI_MAX_SG_ENTRY;
2376 mhba->request_id_enabled = 1;
2377 break;
2378 case PCI_DEVICE_ID_MARVELL_MV9580:
2379 mhba->instancet = &mvumi_instance_9580;
2380 mhba->io_seq = 0;
2381 mhba->max_sge = MVUMI_MAX_SG_ENTRY;
2382 break;
2383 default:
2384 dev_err(&mhba->pdev->dev, "device 0x%x not supported!\n",
2385 mhba->pdev->device);
2386 mhba->instancet = NULL;
2387 ret = -EINVAL;
2388 goto fail_alloc_mem;
2389 }
2390 dev_dbg(&mhba->pdev->dev, "device id : %04X is found.\n",
2391 mhba->pdev->device);
2392 ret = mvumi_cfg_hw_reg(mhba);
2393 if (ret) {
2394 dev_err(&mhba->pdev->dev,
2395 "failed to allocate memory for reg\n");
2396 ret = -ENOMEM;
2397 goto fail_alloc_mem;
2398 }
2399 mhba->handshake_page = pci_alloc_consistent(mhba->pdev, HSP_MAX_SIZE,
2400 &mhba->handshake_page_phys);
2401 if (!mhba->handshake_page) {
2402 dev_err(&mhba->pdev->dev,
2403 "failed to allocate memory for handshake\n");
2404 ret = -ENOMEM;
2405 goto fail_alloc_page;
2406 }
2407
2408 if (mvumi_start(mhba)) {
2409 ret = -EINVAL;
2410 goto fail_ready_state;
2411 }
2412 ret = mvumi_alloc_cmds(mhba);
2413 if (ret)
2414 goto fail_ready_state;
2415
2416 return 0;
2417
2418 fail_ready_state:
2419 mvumi_release_mem_resource(mhba);
2420 pci_free_consistent(mhba->pdev, HSP_MAX_SIZE,
2421 mhba->handshake_page, mhba->handshake_page_phys);
2422 fail_alloc_page:
2423 kfree(mhba->regs);
2424 fail_alloc_mem:
2425 mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
2426 fail_ioremap:
2427 pci_release_regions(mhba->pdev);
2428
2429 return ret;
2430 }
2431
2432 /**
2433 * mvumi_io_attach - Attaches this driver to SCSI mid-layer
2434 * @mhba: Adapter soft state
2435 */
2436 static int mvumi_io_attach(struct mvumi_hba *mhba)
2437 {
2438 struct Scsi_Host *host = mhba->shost;
2439 struct scsi_device *sdev = NULL;
2440 int ret;
2441 unsigned int max_sg = (mhba->ib_max_size + 4 -
2442 sizeof(struct mvumi_msg_frame)) / sizeof(struct mvumi_sgl);
2443
2444 host->irq = mhba->pdev->irq;
2445 host->unique_id = mhba->unique_id;
2446 host->can_queue = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
2447 host->sg_tablesize = mhba->max_sge > max_sg ? max_sg : mhba->max_sge;
2448 host->max_sectors = mhba->max_transfer_size / 512;
2449 host->cmd_per_lun = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
2450 host->max_id = mhba->max_target_id;
2451 host->max_cmd_len = MAX_COMMAND_SIZE;
2452
2453 ret = scsi_add_host(host, &mhba->pdev->dev);
2454 if (ret) {
2455 dev_err(&mhba->pdev->dev, "scsi_add_host failed\n");
2456 return ret;
2457 }
2458 mhba->fw_flag |= MVUMI_FW_ATTACH;
2459
2460 mutex_lock(&mhba->sas_discovery_mutex);
2461 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
2462 ret = scsi_add_device(host, 0, mhba->max_target_id - 1, 0);
2463 else
2464 ret = 0;
2465 if (ret) {
2466 dev_err(&mhba->pdev->dev, "add virtual device failed\n");
2467 mutex_unlock(&mhba->sas_discovery_mutex);
2468 goto fail_add_device;
2469 }
2470
2471 mhba->dm_thread = kthread_create(mvumi_rescan_bus,
2472 mhba, "mvumi_scanthread");
2473 if (IS_ERR(mhba->dm_thread)) {
2474 dev_err(&mhba->pdev->dev,
2475 "failed to create device scan thread\n");
2476 mutex_unlock(&mhba->sas_discovery_mutex);
2477 goto fail_create_thread;
2478 }
2479 atomic_set(&mhba->pnp_count, 1);
2480 wake_up_process(mhba->dm_thread);
2481
2482 mutex_unlock(&mhba->sas_discovery_mutex);
2483 return 0;
2484
2485 fail_create_thread:
2486 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
2487 sdev = scsi_device_lookup(mhba->shost, 0,
2488 mhba->max_target_id - 1, 0);
2489 if (sdev) {
2490 scsi_remove_device(sdev);
2491 scsi_device_put(sdev);
2492 }
2493 fail_add_device:
2494 scsi_remove_host(mhba->shost);
2495 return ret;
2496 }
2497
2498 /**
2499 * mvumi_probe_one - PCI hotplug entry point
2500 * @pdev: PCI device structure
2501 * @id: PCI ids of supported hotplugged adapter
2502 */
2503 static int mvumi_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2504 {
2505 struct Scsi_Host *host;
2506 struct mvumi_hba *mhba;
2507 int ret;
2508
2509 dev_dbg(&pdev->dev, " %#4.04x:%#4.04x:%#4.04x:%#4.04x: ",
2510 pdev->vendor, pdev->device, pdev->subsystem_vendor,
2511 pdev->subsystem_device);
2512
2513 ret = pci_enable_device(pdev);
2514 if (ret)
2515 return ret;
2516
2517 pci_set_master(pdev);
2518
2519 if (IS_DMA64) {
2520 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2521 if (ret) {
2522 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2523 if (ret)
2524 goto fail_set_dma_mask;
2525 }
2526 } else {
2527 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2528 if (ret)
2529 goto fail_set_dma_mask;
2530 }
2531
2532 host = scsi_host_alloc(&mvumi_template, sizeof(*mhba));
2533 if (!host) {
2534 dev_err(&pdev->dev, "scsi_host_alloc failed\n");
2535 ret = -ENOMEM;
2536 goto fail_alloc_instance;
2537 }
2538 mhba = shost_priv(host);
2539
2540 INIT_LIST_HEAD(&mhba->cmd_pool);
2541 INIT_LIST_HEAD(&mhba->ob_data_list);
2542 INIT_LIST_HEAD(&mhba->free_ob_list);
2543 INIT_LIST_HEAD(&mhba->res_list);
2544 INIT_LIST_HEAD(&mhba->waiting_req_list);
2545 mutex_init(&mhba->device_lock);
2546 INIT_LIST_HEAD(&mhba->mhba_dev_list);
2547 INIT_LIST_HEAD(&mhba->shost_dev_list);
2548 atomic_set(&mhba->fw_outstanding, 0);
2549 init_waitqueue_head(&mhba->int_cmd_wait_q);
2550 mutex_init(&mhba->sas_discovery_mutex);
2551
2552 mhba->pdev = pdev;
2553 mhba->shost = host;
2554 mhba->unique_id = pdev->bus->number << 8 | pdev->devfn;
2555
2556 ret = mvumi_init_fw(mhba);
2557 if (ret)
2558 goto fail_init_fw;
2559
2560 ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED,
2561 "mvumi", mhba);
2562 if (ret) {
2563 dev_err(&pdev->dev, "failed to register IRQ\n");
2564 goto fail_init_irq;
2565 }
2566
2567 mhba->instancet->enable_intr(mhba);
2568 pci_set_drvdata(pdev, mhba);
2569
2570 ret = mvumi_io_attach(mhba);
2571 if (ret)
2572 goto fail_io_attach;
2573
2574 mvumi_backup_bar_addr(mhba);
2575 dev_dbg(&pdev->dev, "probe mvumi driver successfully.\n");
2576
2577 return 0;
2578
2579 fail_io_attach:
2580 mhba->instancet->disable_intr(mhba);
2581 free_irq(mhba->pdev->irq, mhba);
2582 fail_init_irq:
2583 mvumi_release_fw(mhba);
2584 fail_init_fw:
2585 scsi_host_put(host);
2586
2587 fail_alloc_instance:
2588 fail_set_dma_mask:
2589 pci_disable_device(pdev);
2590
2591 return ret;
2592 }
2593
2594 static void mvumi_detach_one(struct pci_dev *pdev)
2595 {
2596 struct Scsi_Host *host;
2597 struct mvumi_hba *mhba;
2598
2599 mhba = pci_get_drvdata(pdev);
2600 if (mhba->dm_thread) {
2601 kthread_stop(mhba->dm_thread);
2602 mhba->dm_thread = NULL;
2603 }
2604
2605 mvumi_detach_devices(mhba);
2606 host = mhba->shost;
2607 scsi_remove_host(mhba->shost);
2608 mvumi_flush_cache(mhba);
2609
2610 mhba->instancet->disable_intr(mhba);
2611 free_irq(mhba->pdev->irq, mhba);
2612 mvumi_release_fw(mhba);
2613 scsi_host_put(host);
2614 pci_disable_device(pdev);
2615 dev_dbg(&pdev->dev, "driver is removed!\n");
2616 }
2617
2618 /**
2619 * mvumi_shutdown - Shutdown entry point
2620 * @device: Generic device structure
2621 */
2622 static void mvumi_shutdown(struct pci_dev *pdev)
2623 {
2624 struct mvumi_hba *mhba = pci_get_drvdata(pdev);
2625
2626 mvumi_flush_cache(mhba);
2627 }
2628
2629 static int __maybe_unused mvumi_suspend(struct pci_dev *pdev, pm_message_t state)
2630 {
2631 struct mvumi_hba *mhba = NULL;
2632
2633 mhba = pci_get_drvdata(pdev);
2634 mvumi_flush_cache(mhba);
2635
2636 pci_set_drvdata(pdev, mhba);
2637 mhba->instancet->disable_intr(mhba);
2638 free_irq(mhba->pdev->irq, mhba);
2639 mvumi_unmap_pci_addr(pdev, mhba->base_addr);
2640 pci_release_regions(pdev);
2641 pci_save_state(pdev);
2642 pci_disable_device(pdev);
2643 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2644
2645 return 0;
2646 }
2647
2648 static int __maybe_unused mvumi_resume(struct pci_dev *pdev)
2649 {
2650 int ret;
2651 struct mvumi_hba *mhba = NULL;
2652
2653 mhba = pci_get_drvdata(pdev);
2654
2655 pci_set_power_state(pdev, PCI_D0);
2656 pci_enable_wake(pdev, PCI_D0, 0);
2657 pci_restore_state(pdev);
2658
2659 ret = pci_enable_device(pdev);
2660 if (ret) {
2661 dev_err(&pdev->dev, "enable device failed\n");
2662 return ret;
2663 }
2664 pci_set_master(pdev);
2665 if (IS_DMA64) {
2666 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2667 if (ret) {
2668 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2669 if (ret)
2670 goto fail;
2671 }
2672 } else {
2673 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2674 if (ret)
2675 goto fail;
2676 }
2677 ret = pci_request_regions(mhba->pdev, MV_DRIVER_NAME);
2678 if (ret)
2679 goto fail;
2680 ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr);
2681 if (ret)
2682 goto release_regions;
2683
2684 if (mvumi_cfg_hw_reg(mhba)) {
2685 ret = -EINVAL;
2686 goto unmap_pci_addr;
2687 }
2688
2689 mhba->mmio = mhba->base_addr[0];
2690 mvumi_reset(mhba);
2691
2692 if (mvumi_start(mhba)) {
2693 ret = -EINVAL;
2694 goto unmap_pci_addr;
2695 }
2696
2697 ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED,
2698 "mvumi", mhba);
2699 if (ret) {
2700 dev_err(&pdev->dev, "failed to register IRQ\n");
2701 goto unmap_pci_addr;
2702 }
2703 mhba->instancet->enable_intr(mhba);
2704
2705 return 0;
2706
2707 unmap_pci_addr:
2708 mvumi_unmap_pci_addr(pdev, mhba->base_addr);
2709 release_regions:
2710 pci_release_regions(pdev);
2711 fail:
2712 pci_disable_device(pdev);
2713
2714 return ret;
2715 }
2716
2717 static struct pci_driver mvumi_pci_driver = {
2718
2719 .name = MV_DRIVER_NAME,
2720 .id_table = mvumi_pci_table,
2721 .probe = mvumi_probe_one,
2722 .remove = mvumi_detach_one,
2723 .shutdown = mvumi_shutdown,
2724 #ifdef CONFIG_PM
2725 .suspend = mvumi_suspend,
2726 .resume = mvumi_resume,
2727 #endif
2728 };
2729
2730 /**
2731 * mvumi_init - Driver load entry point
2732 */
2733 static int __init mvumi_init(void)
2734 {
2735 return pci_register_driver(&mvumi_pci_driver);
2736 }
2737
2738 /**
2739 * mvumi_exit - Driver unload entry point
2740 */
2741 static void __exit mvumi_exit(void)
2742 {
2743
2744 pci_unregister_driver(&mvumi_pci_driver);
2745 }
2746
2747 module_init(mvumi_init);
2748 module_exit(mvumi_exit);
2749
2750
2751
2752
2753
2754 /* LDV_COMMENT_BEGIN_MAIN */
2755 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful
2756
2757 /*###########################################################################*/
2758
2759 /*############## Driver Environment Generator 0.2 output ####################*/
2760
2761 /*###########################################################################*/
2762
2763
2764
2765 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
2766 void ldv_check_final_state(void);
2767
2768 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
2769 void ldv_check_return_value(int res);
2770
2771 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
2772 void ldv_check_return_value_probe(int res);
2773
2774 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
2775 void ldv_initialize(void);
2776
2777 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
2778 void ldv_handler_precall(void);
2779
2780 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
2781 int nondet_int(void);
2782
2783 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
2784 int LDV_IN_INTERRUPT;
2785
2786 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
2787 void ldv_main0_sequence_infinite_withcheck_stateful(void) {
2788
2789
2790
2791 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
2792 /*============================= VARIABLE DECLARATION PART =============================*/
2793 /** STRUCT: struct type: mvumi_instance_template, struct name: mvumi_instance_9143 **/
2794 /* content: static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd)*/
2795 /* LDV_COMMENT_END_PREP */
2796 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mvumi_fire_cmd" */
2797 struct mvumi_hba * var_group1;
2798 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mvumi_fire_cmd" */
2799 struct mvumi_cmd * var_group2;
2800 /* LDV_COMMENT_BEGIN_PREP */
2801 #ifdef CONFIG_PM
2802 #endif
2803 /* LDV_COMMENT_END_PREP */
2804 /* content: static void mvumi_enable_intr(struct mvumi_hba *mhba)*/
2805 /* LDV_COMMENT_END_PREP */
2806 /* LDV_COMMENT_BEGIN_PREP */
2807 #ifdef CONFIG_PM
2808 #endif
2809 /* LDV_COMMENT_END_PREP */
2810 /* content: static void mvumi_disable_intr(struct mvumi_hba *mhba)*/
2811 /* LDV_COMMENT_END_PREP */
2812 /* LDV_COMMENT_BEGIN_PREP */
2813 #ifdef CONFIG_PM
2814 #endif
2815 /* LDV_COMMENT_END_PREP */
2816 /* content: static int mvumi_clear_intr(void *extend)*/
2817 /* LDV_COMMENT_END_PREP */
2818 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mvumi_clear_intr" */
2819 void * var_mvumi_clear_intr_66_p0;
2820 /* LDV_COMMENT_BEGIN_PREP */
2821 #ifdef CONFIG_PM
2822 #endif
2823 /* LDV_COMMENT_END_PREP */
2824 /* content: static unsigned int mvumi_read_fw_status_reg(struct mvumi_hba *mhba)*/
2825 /* LDV_COMMENT_END_PREP */
2826 /* LDV_COMMENT_BEGIN_PREP */
2827 #ifdef CONFIG_PM
2828 #endif
2829 /* LDV_COMMENT_END_PREP */
2830 /* content: static unsigned int mvumi_check_ib_list_9143(struct mvumi_hba *mhba)*/
2831 /* LDV_COMMENT_END_PREP */
2832 /* LDV_COMMENT_BEGIN_PREP */
2833 #ifdef CONFIG_PM
2834 #endif
2835 /* LDV_COMMENT_END_PREP */
2836 /* content: static int mvumi_check_ob_list_9143(struct mvumi_hba *mhba, unsigned int *cur_obf, unsigned int *assign_obf_end)*/
2837 /* LDV_COMMENT_END_PREP */
2838 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mvumi_check_ob_list_9143" */
2839 unsigned int * var_mvumi_check_ob_list_9143_21_p1;
2840 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mvumi_check_ob_list_9143" */
2841 unsigned int * var_mvumi_check_ob_list_9143_21_p2;
2842 /* LDV_COMMENT_BEGIN_PREP */
2843 #ifdef CONFIG_PM
2844 #endif
2845 /* LDV_COMMENT_END_PREP */
2846 /* content: static int mvumi_reset_host_9143(struct mvumi_hba *mhba)*/
2847 /* LDV_COMMENT_END_PREP */
2848 /* LDV_COMMENT_BEGIN_PREP */
2849 #ifdef CONFIG_PM
2850 #endif
2851 /* LDV_COMMENT_END_PREP */
2852
2853 /** STRUCT: struct type: mvumi_instance_template, struct name: mvumi_instance_9580 **/
2854 /* content: static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd)*/
2855 /* LDV_COMMENT_END_PREP */
2856 /* LDV_COMMENT_BEGIN_PREP */
2857 #ifdef CONFIG_PM
2858 #endif
2859 /* LDV_COMMENT_END_PREP */
2860 /* content: static void mvumi_enable_intr(struct mvumi_hba *mhba)*/
2861 /* LDV_COMMENT_END_PREP */
2862 /* LDV_COMMENT_BEGIN_PREP */
2863 #ifdef CONFIG_PM
2864 #endif
2865 /* LDV_COMMENT_END_PREP */
2866 /* content: static void mvumi_disable_intr(struct mvumi_hba *mhba)*/
2867 /* LDV_COMMENT_END_PREP */
2868 /* LDV_COMMENT_BEGIN_PREP */
2869 #ifdef CONFIG_PM
2870 #endif
2871 /* LDV_COMMENT_END_PREP */
2872 /* content: static int mvumi_clear_intr(void *extend)*/
2873 /* LDV_COMMENT_END_PREP */
2874 /* LDV_COMMENT_BEGIN_PREP */
2875 #ifdef CONFIG_PM
2876 #endif
2877 /* LDV_COMMENT_END_PREP */
2878 /* content: static unsigned int mvumi_read_fw_status_reg(struct mvumi_hba *mhba)*/
2879 /* LDV_COMMENT_END_PREP */
2880 /* LDV_COMMENT_BEGIN_PREP */
2881 #ifdef CONFIG_PM
2882 #endif
2883 /* LDV_COMMENT_END_PREP */
2884 /* content: static unsigned int mvumi_check_ib_list_9580(struct mvumi_hba *mhba)*/
2885 /* LDV_COMMENT_END_PREP */
2886 /* LDV_COMMENT_BEGIN_PREP */
2887 #ifdef CONFIG_PM
2888 #endif
2889 /* LDV_COMMENT_END_PREP */
2890 /* content: static int mvumi_check_ob_list_9580(struct mvumi_hba *mhba, unsigned int *cur_obf, unsigned int *assign_obf_end)*/
2891 /* LDV_COMMENT_END_PREP */
2892 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mvumi_check_ob_list_9580" */
2893 unsigned int * var_mvumi_check_ob_list_9580_22_p1;
2894 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mvumi_check_ob_list_9580" */
2895 unsigned int * var_mvumi_check_ob_list_9580_22_p2;
2896 /* LDV_COMMENT_BEGIN_PREP */
2897 #ifdef CONFIG_PM
2898 #endif
2899 /* LDV_COMMENT_END_PREP */
2900 /* content: static int mvumi_reset_host_9580(struct mvumi_hba *mhba)*/
2901 /* LDV_COMMENT_END_PREP */
2902 /* LDV_COMMENT_BEGIN_PREP */
2903 #ifdef CONFIG_PM
2904 #endif
2905 /* LDV_COMMENT_END_PREP */
2906
2907 /** STRUCT: struct type: scsi_host_template, struct name: mvumi_template **/
2908 /* content: static int mvumi_slave_configure(struct scsi_device *sdev)*/
2909 /* LDV_COMMENT_END_PREP */
2910 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mvumi_slave_configure" */
2911 struct scsi_device * var_group3;
2912 /* LDV_COMMENT_BEGIN_PREP */
2913 #ifdef CONFIG_PM
2914 #endif
2915 /* LDV_COMMENT_END_PREP */
2916 /* content: static int mvumi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)*/
2917 /* LDV_COMMENT_END_PREP */
2918 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mvumi_queue_command" */
2919 struct Scsi_Host * var_group4;
2920 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mvumi_queue_command" */
2921 struct scsi_cmnd * var_group5;
2922 /* LDV_COMMENT_BEGIN_PREP */
2923 #ifdef CONFIG_PM
2924 #endif
2925 /* LDV_COMMENT_END_PREP */
2926 /* content: static enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd)*/
2927 /* LDV_COMMENT_END_PREP */
2928 /* LDV_COMMENT_BEGIN_PREP */
2929 #ifdef CONFIG_PM
2930 #endif
2931 /* LDV_COMMENT_END_PREP */
2932 /* content: static int mvumi_host_reset(struct scsi_cmnd *scmd)*/
2933 /* LDV_COMMENT_END_PREP */
2934 /* LDV_COMMENT_BEGIN_PREP */
2935 #ifdef CONFIG_PM
2936 #endif
2937 /* LDV_COMMENT_END_PREP */
2938
2939 /** STRUCT: struct type: pci_driver, struct name: mvumi_pci_driver **/
2940 /* content: static int mvumi_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)*/
2941 /* LDV_COMMENT_END_PREP */
2942 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mvumi_probe_one" */
2943 struct pci_dev * var_group6;
2944 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mvumi_probe_one" */
2945 const struct pci_device_id * var_mvumi_probe_one_75_p1;
2946 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "mvumi_probe_one" */
2947 static int res_mvumi_probe_one_75;
2948 /* LDV_COMMENT_BEGIN_PREP */
2949 #ifdef CONFIG_PM
2950 #endif
2951 /* LDV_COMMENT_END_PREP */
2952 /* content: static void mvumi_detach_one(struct pci_dev *pdev)*/
2953 /* LDV_COMMENT_END_PREP */
2954 /* LDV_COMMENT_BEGIN_PREP */
2955 #ifdef CONFIG_PM
2956 #endif
2957 /* LDV_COMMENT_END_PREP */
2958 /* content: static void mvumi_shutdown(struct pci_dev *pdev)*/
2959 /* LDV_COMMENT_END_PREP */
2960 /* LDV_COMMENT_BEGIN_PREP */
2961 #ifdef CONFIG_PM
2962 #endif
2963 /* LDV_COMMENT_END_PREP */
2964 /* content: static int __maybe_unused mvumi_suspend(struct pci_dev *pdev, pm_message_t state)*/
2965 /* LDV_COMMENT_END_PREP */
2966 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mvumi_suspend" */
2967 pm_message_t var_mvumi_suspend_78_p1;
2968 /* LDV_COMMENT_BEGIN_PREP */
2969 #ifdef CONFIG_PM
2970 #endif
2971 /* LDV_COMMENT_END_PREP */
2972 /* content: static int __maybe_unused mvumi_resume(struct pci_dev *pdev)*/
2973 /* LDV_COMMENT_END_PREP */
2974 /* LDV_COMMENT_BEGIN_PREP */
2975 #ifdef CONFIG_PM
2976 #endif
2977 /* LDV_COMMENT_END_PREP */
2978
2979 /** CALLBACK SECTION request_irq **/
2980 /* content: static irqreturn_t mvumi_isr_handler(int irq, void *devp)*/
2981 /* LDV_COMMENT_END_PREP */
2982 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mvumi_isr_handler" */
2983 int var_mvumi_isr_handler_61_p0;
2984 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mvumi_isr_handler" */
2985 void * var_mvumi_isr_handler_61_p1;
2986 /* LDV_COMMENT_BEGIN_PREP */
2987 #ifdef CONFIG_PM
2988 #endif
2989 /* LDV_COMMENT_END_PREP */
2990
2991
2992
2993
2994 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
2995 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
2996 /*============================= VARIABLE INITIALIZING PART =============================*/
2997 LDV_IN_INTERRUPT=1;
2998
2999
3000
3001
3002 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
3003 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
3004 /*============================= FUNCTION CALL SECTION =============================*/
3005 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
3006 ldv_initialize();
3007
3008 /** INIT: init_type: ST_MODULE_INIT **/
3009 /* content: static int __init mvumi_init(void)*/
3010 /* LDV_COMMENT_BEGIN_PREP */
3011 #ifdef CONFIG_PM
3012 #endif
3013 /* LDV_COMMENT_END_PREP */
3014 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver init function after driver loading to kernel. This function declared as "MODULE_INIT(function name)". */
3015 ldv_handler_precall();
3016 if(mvumi_init())
3017 goto ldv_final;
3018
3019
3020
3021
3022
3023
3024 int ldv_s_mvumi_pci_driver_pci_driver = 0;
3025
3026
3027
3028
3029
3030 while( nondet_int()
3031 || !(ldv_s_mvumi_pci_driver_pci_driver == 0)
3032 ) {
3033
3034 switch(nondet_int()) {
3035
3036 case 0: {
3037
3038 /** STRUCT: struct type: mvumi_instance_template, struct name: mvumi_instance_9143 **/
3039
3040
3041 /* content: static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd)*/
3042 /* LDV_COMMENT_END_PREP */
3043 /* LDV_COMMENT_FUNCTION_CALL Function from field "fire_cmd" from driver structure with callbacks "mvumi_instance_9143" */
3044 ldv_handler_precall();
3045 mvumi_fire_cmd( var_group1, var_group2);
3046 /* LDV_COMMENT_BEGIN_PREP */
3047 #ifdef CONFIG_PM
3048 #endif
3049 /* LDV_COMMENT_END_PREP */
3050
3051
3052
3053
3054 }
3055
3056 break;
3057 case 1: {
3058
3059 /** STRUCT: struct type: mvumi_instance_template, struct name: mvumi_instance_9143 **/
3060
3061
3062 /* content: static void mvumi_enable_intr(struct mvumi_hba *mhba)*/
3063 /* LDV_COMMENT_END_PREP */
3064 /* LDV_COMMENT_FUNCTION_CALL Function from field "enable_intr" from driver structure with callbacks "mvumi_instance_9143" */
3065 ldv_handler_precall();
3066 mvumi_enable_intr( var_group1);
3067 /* LDV_COMMENT_BEGIN_PREP */
3068 #ifdef CONFIG_PM
3069 #endif
3070 /* LDV_COMMENT_END_PREP */
3071
3072
3073
3074
3075 }
3076
3077 break;
3078 case 2: {
3079
3080 /** STRUCT: struct type: mvumi_instance_template, struct name: mvumi_instance_9143 **/
3081
3082
3083 /* content: static void mvumi_disable_intr(struct mvumi_hba *mhba)*/
3084 /* LDV_COMMENT_END_PREP */
3085 /* LDV_COMMENT_FUNCTION_CALL Function from field "disable_intr" from driver structure with callbacks "mvumi_instance_9143" */
3086 ldv_handler_precall();
3087 mvumi_disable_intr( var_group1);
3088 /* LDV_COMMENT_BEGIN_PREP */
3089 #ifdef CONFIG_PM
3090 #endif
3091 /* LDV_COMMENT_END_PREP */
3092
3093
3094
3095
3096 }
3097
3098 break;
3099 case 3: {
3100
3101 /** STRUCT: struct type: mvumi_instance_template, struct name: mvumi_instance_9143 **/
3102
3103
3104 /* content: static int mvumi_clear_intr(void *extend)*/
3105 /* LDV_COMMENT_END_PREP */
3106 /* LDV_COMMENT_FUNCTION_CALL Function from field "clear_intr" from driver structure with callbacks "mvumi_instance_9143" */
3107 ldv_handler_precall();
3108 mvumi_clear_intr( var_mvumi_clear_intr_66_p0);
3109 /* LDV_COMMENT_BEGIN_PREP */
3110 #ifdef CONFIG_PM
3111 #endif
3112 /* LDV_COMMENT_END_PREP */
3113
3114
3115
3116
3117 }
3118
3119 break;
3120 case 4: {
3121
3122 /** STRUCT: struct type: mvumi_instance_template, struct name: mvumi_instance_9143 **/
3123
3124
3125 /* content: static unsigned int mvumi_read_fw_status_reg(struct mvumi_hba *mhba)*/
3126 /* LDV_COMMENT_END_PREP */
3127 /* LDV_COMMENT_FUNCTION_CALL Function from field "read_fw_status_reg" from driver structure with callbacks "mvumi_instance_9143" */
3128 ldv_handler_precall();
3129 mvumi_read_fw_status_reg( var_group1);
3130 /* LDV_COMMENT_BEGIN_PREP */
3131 #ifdef CONFIG_PM
3132 #endif
3133 /* LDV_COMMENT_END_PREP */
3134
3135
3136
3137
3138 }
3139
3140 break;
3141 case 5: {
3142
3143 /** STRUCT: struct type: mvumi_instance_template, struct name: mvumi_instance_9143 **/
3144
3145
3146 /* content: static unsigned int mvumi_check_ib_list_9143(struct mvumi_hba *mhba)*/
3147 /* LDV_COMMENT_END_PREP */
3148 /* LDV_COMMENT_FUNCTION_CALL Function from field "check_ib_list" from driver structure with callbacks "mvumi_instance_9143" */
3149 ldv_handler_precall();
3150 mvumi_check_ib_list_9143( var_group1);
3151 /* LDV_COMMENT_BEGIN_PREP */
3152 #ifdef CONFIG_PM
3153 #endif
3154 /* LDV_COMMENT_END_PREP */
3155
3156
3157
3158
3159 }
3160
3161 break;
3162 case 6: {
3163
3164 /** STRUCT: struct type: mvumi_instance_template, struct name: mvumi_instance_9143 **/
3165
3166
3167 /* content: static int mvumi_check_ob_list_9143(struct mvumi_hba *mhba, unsigned int *cur_obf, unsigned int *assign_obf_end)*/
3168 /* LDV_COMMENT_END_PREP */
3169 /* LDV_COMMENT_FUNCTION_CALL Function from field "check_ob_list" from driver structure with callbacks "mvumi_instance_9143" */
3170 ldv_handler_precall();
3171 mvumi_check_ob_list_9143( var_group1, var_mvumi_check_ob_list_9143_21_p1, var_mvumi_check_ob_list_9143_21_p2);
3172 /* LDV_COMMENT_BEGIN_PREP */
3173 #ifdef CONFIG_PM
3174 #endif
3175 /* LDV_COMMENT_END_PREP */
3176
3177
3178
3179
3180 }
3181
3182 break;
3183 case 7: {
3184
3185 /** STRUCT: struct type: mvumi_instance_template, struct name: mvumi_instance_9143 **/
3186
3187
3188 /* content: static int mvumi_reset_host_9143(struct mvumi_hba *mhba)*/
3189 /* LDV_COMMENT_END_PREP */
3190 /* LDV_COMMENT_FUNCTION_CALL Function from field "reset_host" from driver structure with callbacks "mvumi_instance_9143" */
3191 ldv_handler_precall();
3192 mvumi_reset_host_9143( var_group1);
3193 /* LDV_COMMENT_BEGIN_PREP */
3194 #ifdef CONFIG_PM
3195 #endif
3196 /* LDV_COMMENT_END_PREP */
3197
3198
3199
3200
3201 }
3202
3203 break;
3204 case 8: {
3205
3206 /** STRUCT: struct type: mvumi_instance_template, struct name: mvumi_instance_9580 **/
3207
3208
3209 /* content: static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd)*/
3210 /* LDV_COMMENT_END_PREP */
3211 /* LDV_COMMENT_FUNCTION_CALL Function from field "fire_cmd" from driver structure with callbacks "mvumi_instance_9580" */
3212 ldv_handler_precall();
3213 mvumi_fire_cmd( var_group1, var_group2);
3214 /* LDV_COMMENT_BEGIN_PREP */
3215 #ifdef CONFIG_PM
3216 #endif
3217 /* LDV_COMMENT_END_PREP */
3218
3219
3220
3221
3222 }
3223
3224 break;
3225 case 9: {
3226
3227 /** STRUCT: struct type: mvumi_instance_template, struct name: mvumi_instance_9580 **/
3228
3229
3230 /* content: static void mvumi_enable_intr(struct mvumi_hba *mhba)*/
3231 /* LDV_COMMENT_END_PREP */
3232 /* LDV_COMMENT_FUNCTION_CALL Function from field "enable_intr" from driver structure with callbacks "mvumi_instance_9580" */
3233 ldv_handler_precall();
3234 mvumi_enable_intr( var_group1);
3235 /* LDV_COMMENT_BEGIN_PREP */
3236 #ifdef CONFIG_PM
3237 #endif
3238 /* LDV_COMMENT_END_PREP */
3239
3240
3241
3242
3243 }
3244
3245 break;
3246 case 10: {
3247
3248 /** STRUCT: struct type: mvumi_instance_template, struct name: mvumi_instance_9580 **/
3249
3250
3251 /* content: static void mvumi_disable_intr(struct mvumi_hba *mhba)*/
3252 /* LDV_COMMENT_END_PREP */
3253 /* LDV_COMMENT_FUNCTION_CALL Function from field "disable_intr" from driver structure with callbacks "mvumi_instance_9580" */
3254 ldv_handler_precall();
3255 mvumi_disable_intr( var_group1);
3256 /* LDV_COMMENT_BEGIN_PREP */
3257 #ifdef CONFIG_PM
3258 #endif
3259 /* LDV_COMMENT_END_PREP */
3260
3261
3262
3263
3264 }
3265
3266 break;
3267 case 11: {
3268
3269 /** STRUCT: struct type: mvumi_instance_template, struct name: mvumi_instance_9580 **/
3270
3271
3272 /* content: static int mvumi_clear_intr(void *extend)*/
3273 /* LDV_COMMENT_END_PREP */
3274 /* LDV_COMMENT_FUNCTION_CALL Function from field "clear_intr" from driver structure with callbacks "mvumi_instance_9580" */
3275 ldv_handler_precall();
3276 mvumi_clear_intr( var_mvumi_clear_intr_66_p0);
3277 /* LDV_COMMENT_BEGIN_PREP */
3278 #ifdef CONFIG_PM
3279 #endif
3280 /* LDV_COMMENT_END_PREP */
3281
3282
3283
3284
3285 }
3286
3287 break;
3288 case 12: {
3289
3290 /** STRUCT: struct type: mvumi_instance_template, struct name: mvumi_instance_9580 **/
3291
3292
3293 /* content: static unsigned int mvumi_read_fw_status_reg(struct mvumi_hba *mhba)*/
3294 /* LDV_COMMENT_END_PREP */
3295 /* LDV_COMMENT_FUNCTION_CALL Function from field "read_fw_status_reg" from driver structure with callbacks "mvumi_instance_9580" */
3296 ldv_handler_precall();
3297 mvumi_read_fw_status_reg( var_group1);
3298 /* LDV_COMMENT_BEGIN_PREP */
3299 #ifdef CONFIG_PM
3300 #endif
3301 /* LDV_COMMENT_END_PREP */
3302
3303
3304
3305
3306 }
3307
3308 break;
3309 case 13: {
3310
3311 /** STRUCT: struct type: mvumi_instance_template, struct name: mvumi_instance_9580 **/
3312
3313
3314 /* content: static unsigned int mvumi_check_ib_list_9580(struct mvumi_hba *mhba)*/
3315 /* LDV_COMMENT_END_PREP */
3316 /* LDV_COMMENT_FUNCTION_CALL Function from field "check_ib_list" from driver structure with callbacks "mvumi_instance_9580" */
3317 ldv_handler_precall();
3318 mvumi_check_ib_list_9580( var_group1);
3319 /* LDV_COMMENT_BEGIN_PREP */
3320 #ifdef CONFIG_PM
3321 #endif
3322 /* LDV_COMMENT_END_PREP */
3323
3324
3325
3326
3327 }
3328
3329 break;
3330 case 14: {
3331
3332 /** STRUCT: struct type: mvumi_instance_template, struct name: mvumi_instance_9580 **/
3333
3334
3335 /* content: static int mvumi_check_ob_list_9580(struct mvumi_hba *mhba, unsigned int *cur_obf, unsigned int *assign_obf_end)*/
3336 /* LDV_COMMENT_END_PREP */
3337 /* LDV_COMMENT_FUNCTION_CALL Function from field "check_ob_list" from driver structure with callbacks "mvumi_instance_9580" */
3338 ldv_handler_precall();
3339 mvumi_check_ob_list_9580( var_group1, var_mvumi_check_ob_list_9580_22_p1, var_mvumi_check_ob_list_9580_22_p2);
3340 /* LDV_COMMENT_BEGIN_PREP */
3341 #ifdef CONFIG_PM
3342 #endif
3343 /* LDV_COMMENT_END_PREP */
3344
3345
3346
3347
3348 }
3349
3350 break;
3351 case 15: {
3352
3353 /** STRUCT: struct type: mvumi_instance_template, struct name: mvumi_instance_9580 **/
3354
3355
3356 /* content: static int mvumi_reset_host_9580(struct mvumi_hba *mhba)*/
3357 /* LDV_COMMENT_END_PREP */
3358 /* LDV_COMMENT_FUNCTION_CALL Function from field "reset_host" from driver structure with callbacks "mvumi_instance_9580" */
3359 ldv_handler_precall();
3360 mvumi_reset_host_9580( var_group1);
3361 /* LDV_COMMENT_BEGIN_PREP */
3362 #ifdef CONFIG_PM
3363 #endif
3364 /* LDV_COMMENT_END_PREP */
3365
3366
3367
3368
3369 }
3370
3371 break;
3372 case 16: {
3373
3374 /** STRUCT: struct type: scsi_host_template, struct name: mvumi_template **/
3375
3376
3377 /* content: static int mvumi_slave_configure(struct scsi_device *sdev)*/
3378 /* LDV_COMMENT_END_PREP */
3379 /* LDV_COMMENT_FUNCTION_CALL Function from field "slave_configure" from driver structure with callbacks "mvumi_template" */
3380 ldv_handler_precall();
3381 mvumi_slave_configure( var_group3);
3382 /* LDV_COMMENT_BEGIN_PREP */
3383 #ifdef CONFIG_PM
3384 #endif
3385 /* LDV_COMMENT_END_PREP */
3386
3387
3388
3389
3390 }
3391
3392 break;
3393 case 17: {
3394
3395 /** STRUCT: struct type: scsi_host_template, struct name: mvumi_template **/
3396
3397
3398 /* content: static int mvumi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)*/
3399 /* LDV_COMMENT_END_PREP */
3400 /* LDV_COMMENT_FUNCTION_CALL Function from field "queuecommand" from driver structure with callbacks "mvumi_template" */
3401 ldv_handler_precall();
3402 mvumi_queue_command( var_group4, var_group5);
3403 /* LDV_COMMENT_BEGIN_PREP */
3404 #ifdef CONFIG_PM
3405 #endif
3406 /* LDV_COMMENT_END_PREP */
3407
3408
3409
3410
3411 }
3412
3413 break;
3414 case 18: {
3415
3416 /** STRUCT: struct type: scsi_host_template, struct name: mvumi_template **/
3417
3418
3419 /* content: static enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd)*/
3420 /* LDV_COMMENT_END_PREP */
3421 /* LDV_COMMENT_FUNCTION_CALL Function from field "eh_timed_out" from driver structure with callbacks "mvumi_template" */
3422 ldv_handler_precall();
3423 mvumi_timed_out( var_group5);
3424 /* LDV_COMMENT_BEGIN_PREP */
3425 #ifdef CONFIG_PM
3426 #endif
3427 /* LDV_COMMENT_END_PREP */
3428
3429
3430
3431
3432 }
3433
3434 break;
3435 case 19: {
3436
3437 /** STRUCT: struct type: scsi_host_template, struct name: mvumi_template **/
3438
3439
3440 /* content: static int mvumi_host_reset(struct scsi_cmnd *scmd)*/
3441 /* LDV_COMMENT_END_PREP */
3442 /* LDV_COMMENT_FUNCTION_CALL Function from field "eh_host_reset_handler" from driver structure with callbacks "mvumi_template" */
3443 ldv_handler_precall();
3444 mvumi_host_reset( var_group5);
3445 /* LDV_COMMENT_BEGIN_PREP */
3446 #ifdef CONFIG_PM
3447 #endif
3448 /* LDV_COMMENT_END_PREP */
3449
3450
3451
3452
3453 }
3454
3455 break;
3456 case 20: {
3457
3458 /** STRUCT: struct type: pci_driver, struct name: mvumi_pci_driver **/
3459 if(ldv_s_mvumi_pci_driver_pci_driver==0) {
3460
3461 /* content: static int mvumi_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)*/
3462 /* LDV_COMMENT_END_PREP */
3463 /* LDV_COMMENT_FUNCTION_CALL Function from field "probe" from driver structure with callbacks "mvumi_pci_driver". Standart function test for correct return result. */
3464 res_mvumi_probe_one_75 = mvumi_probe_one( var_group6, var_mvumi_probe_one_75_p1);
3465 ldv_check_return_value(res_mvumi_probe_one_75);
3466 ldv_check_return_value_probe(res_mvumi_probe_one_75);
3467 if(res_mvumi_probe_one_75)
3468 goto ldv_module_exit;
3469 /* LDV_COMMENT_BEGIN_PREP */
3470 #ifdef CONFIG_PM
3471 #endif
3472 /* LDV_COMMENT_END_PREP */
3473 ldv_s_mvumi_pci_driver_pci_driver++;
3474
3475 }
3476
3477 }
3478
3479 break;
3480 case 21: {
3481
3482 /** STRUCT: struct type: pci_driver, struct name: mvumi_pci_driver **/
3483 if(ldv_s_mvumi_pci_driver_pci_driver==1) {
3484
3485 /* content: static void mvumi_detach_one(struct pci_dev *pdev)*/
3486 /* LDV_COMMENT_END_PREP */
3487 /* LDV_COMMENT_FUNCTION_CALL Function from field "remove" from driver structure with callbacks "mvumi_pci_driver" */
3488 ldv_handler_precall();
3489 mvumi_detach_one( var_group6);
3490 /* LDV_COMMENT_BEGIN_PREP */
3491 #ifdef CONFIG_PM
3492 #endif
3493 /* LDV_COMMENT_END_PREP */
3494 ldv_s_mvumi_pci_driver_pci_driver=0;
3495
3496 }
3497
3498 }
3499
3500 break;
3501 case 22: {
3502
3503 /** STRUCT: struct type: pci_driver, struct name: mvumi_pci_driver **/
3504
3505
3506 /* content: static void mvumi_shutdown(struct pci_dev *pdev)*/
3507 /* LDV_COMMENT_END_PREP */
3508 /* LDV_COMMENT_FUNCTION_CALL Function from field "shutdown" from driver structure with callbacks "mvumi_pci_driver" */
3509 ldv_handler_precall();
3510 mvumi_shutdown( var_group6);
3511 /* LDV_COMMENT_BEGIN_PREP */
3512 #ifdef CONFIG_PM
3513 #endif
3514 /* LDV_COMMENT_END_PREP */
3515
3516
3517
3518
3519 }
3520
3521 break;
3522 case 23: {
3523
3524 /** STRUCT: struct type: pci_driver, struct name: mvumi_pci_driver **/
3525
3526
3527 /* content: static int __maybe_unused mvumi_suspend(struct pci_dev *pdev, pm_message_t state)*/
3528 /* LDV_COMMENT_END_PREP */
3529 /* LDV_COMMENT_FUNCTION_CALL Function from field "suspend" from driver structure with callbacks "mvumi_pci_driver" */
3530 ldv_handler_precall();
3531 mvumi_suspend( var_group6, var_mvumi_suspend_78_p1);
3532 /* LDV_COMMENT_BEGIN_PREP */
3533 #ifdef CONFIG_PM
3534 #endif
3535 /* LDV_COMMENT_END_PREP */
3536
3537
3538
3539
3540 }
3541
3542 break;
3543 case 24: {
3544
3545 /** STRUCT: struct type: pci_driver, struct name: mvumi_pci_driver **/
3546
3547
3548 /* content: static int __maybe_unused mvumi_resume(struct pci_dev *pdev)*/
3549 /* LDV_COMMENT_END_PREP */
3550 /* LDV_COMMENT_FUNCTION_CALL Function from field "resume" from driver structure with callbacks "mvumi_pci_driver" */
3551 ldv_handler_precall();
3552 mvumi_resume( var_group6);
3553 /* LDV_COMMENT_BEGIN_PREP */
3554 #ifdef CONFIG_PM
3555 #endif
3556 /* LDV_COMMENT_END_PREP */
3557
3558
3559
3560
3561 }
3562
3563 break;
3564 case 25: {
3565
3566 /** CALLBACK SECTION request_irq **/
3567 LDV_IN_INTERRUPT=2;
3568
3569 /* content: static irqreturn_t mvumi_isr_handler(int irq, void *devp)*/
3570 /* LDV_COMMENT_END_PREP */
3571 /* LDV_COMMENT_FUNCTION_CALL */
3572 ldv_handler_precall();
3573 mvumi_isr_handler( var_mvumi_isr_handler_61_p0, var_mvumi_isr_handler_61_p1);
3574 /* LDV_COMMENT_BEGIN_PREP */
3575 #ifdef CONFIG_PM
3576 #endif
3577 /* LDV_COMMENT_END_PREP */
3578 LDV_IN_INTERRUPT=1;
3579
3580
3581
3582 }
3583
3584 break;
3585 default: break;
3586
3587 }
3588
3589 }
3590
3591 ldv_module_exit:
3592
3593 /** INIT: init_type: ST_MODULE_EXIT **/
3594 /* content: static void __exit mvumi_exit(void)*/
3595 /* LDV_COMMENT_BEGIN_PREP */
3596 #ifdef CONFIG_PM
3597 #endif
3598 /* LDV_COMMENT_END_PREP */
3599 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver release function before driver will be uploaded from kernel. This function declared as "MODULE_EXIT(function name)". */
3600 ldv_handler_precall();
3601 mvumi_exit();
3602
3603 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
3604 ldv_final: ldv_check_final_state();
3605
3606 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
3607 return;
3608
3609 }
3610 #endif
3611
3612 /* LDV_COMMENT_END_MAIN */ 1
2 #include <linux/kernel.h>
3 bool ldv_is_err(const void *ptr);
4 bool ldv_is_err_or_null(const void *ptr);
5 void* ldv_err_ptr(long error);
6 long ldv_ptr_err(const void *ptr);
7
8 extern void ldv_dma_map_page(void);
9 extern void ldv_dma_mapping_error(void);
10 #line 1 "/home/vitaly/ldv-launches/work/current--X--drivers--X--defaultlinux-4.11-rc1.tar.xz--X--331_1a--X--cpachecker/linux-4.11-rc1.tar.xz/csd_deg_dscv/2204/dscv_tempdir/dscv/ri/331_1a/drivers/scsi/mvumi.c"
11
12 /*
13 * Marvell UMI driver
14 *
15 * Copyright 2011 Marvell. <jyli@marvell.com>
16 *
17 * This file is licensed under GPLv2.
18 *
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License as
21 * published by the Free Software Foundation; version 2 of the
22 * License.
23 *
24 * This program is distributed in the hope that it will be useful,
25 * but WITHOUT ANY WARRANTY; without even the implied warranty of
26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
27 * General Public License for more details.
28 *
29 * You should have received a copy of the GNU General Public License
30 * along with this program; if not, write to the Free Software
31 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
32 * USA
33 */
34
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/moduleparam.h>
38 #include <linux/init.h>
39 #include <linux/device.h>
40 #include <linux/pci.h>
41 #include <linux/list.h>
42 #include <linux/spinlock.h>
43 #include <linux/interrupt.h>
44 #include <linux/delay.h>
45 #include <linux/ktime.h>
46 #include <linux/blkdev.h>
47 #include <linux/io.h>
48 #include <scsi/scsi.h>
49 #include <scsi/scsi_cmnd.h>
50 #include <scsi/scsi_device.h>
51 #include <scsi/scsi_host.h>
52 #include <scsi/scsi_transport.h>
53 #include <scsi/scsi_eh.h>
54 #include <linux/uaccess.h>
55 #include <linux/kthread.h>
56
57 #include "mvumi.h"
58
59 MODULE_LICENSE("GPL");
60 MODULE_AUTHOR("jyli@marvell.com");
61 MODULE_DESCRIPTION("Marvell UMI Driver");
62
63 static const struct pci_device_id mvumi_pci_table[] = {
64 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9143) },
65 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9580) },
66 { 0 }
67 };
68
69 MODULE_DEVICE_TABLE(pci, mvumi_pci_table);
70
71 static void tag_init(struct mvumi_tag *st, unsigned short size)
72 {
73 unsigned short i;
74 BUG_ON(size != st->size);
75 st->top = size;
76 for (i = 0; i < size; i++)
77 st->stack[i] = size - 1 - i;
78 }
79
80 static unsigned short tag_get_one(struct mvumi_hba *mhba, struct mvumi_tag *st)
81 {
82 BUG_ON(st->top <= 0);
83 return st->stack[--st->top];
84 }
85
86 static void tag_release_one(struct mvumi_hba *mhba, struct mvumi_tag *st,
87 unsigned short tag)
88 {
89 BUG_ON(st->top >= st->size);
90 st->stack[st->top++] = tag;
91 }
92
93 static bool tag_is_empty(struct mvumi_tag *st)
94 {
95 if (st->top == 0)
96 return 1;
97 else
98 return 0;
99 }
100
101 static void mvumi_unmap_pci_addr(struct pci_dev *dev, void **addr_array)
102 {
103 int i;
104
105 for (i = 0; i < MAX_BASE_ADDRESS; i++)
106 if ((pci_resource_flags(dev, i) & IORESOURCE_MEM) &&
107 addr_array[i])
108 pci_iounmap(dev, addr_array[i]);
109 }
110
111 static int mvumi_map_pci_addr(struct pci_dev *dev, void **addr_array)
112 {
113 int i;
114
115 for (i = 0; i < MAX_BASE_ADDRESS; i++) {
116 if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
117 addr_array[i] = pci_iomap(dev, i, 0);
118 if (!addr_array[i]) {
119 dev_err(&dev->dev, "failed to map Bar[%d]\n",
120 i);
121 mvumi_unmap_pci_addr(dev, addr_array);
122 return -ENOMEM;
123 }
124 } else
125 addr_array[i] = NULL;
126
127 dev_dbg(&dev->dev, "Bar %d : %p.\n", i, addr_array[i]);
128 }
129
130 return 0;
131 }
132
133 static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba,
134 enum resource_type type, unsigned int size)
135 {
136 struct mvumi_res *res = kzalloc(sizeof(*res), GFP_ATOMIC);
137
138 if (!res) {
139 dev_err(&mhba->pdev->dev,
140 "Failed to allocate memory for resource manager.\n");
141 return NULL;
142 }
143
144 switch (type) {
145 case RESOURCE_CACHED_MEMORY:
146 res->virt_addr = kzalloc(size, GFP_ATOMIC);
147 if (!res->virt_addr) {
148 dev_err(&mhba->pdev->dev,
149 "unable to allocate memory,size = %d.\n", size);
150 kfree(res);
151 return NULL;
152 }
153 break;
154
155 case RESOURCE_UNCACHED_MEMORY:
156 size = round_up(size, 8);
157 res->virt_addr = pci_zalloc_consistent(mhba->pdev, size,
158 &res->bus_addr);
159 if (!res->virt_addr) {
160 dev_err(&mhba->pdev->dev,
161 "unable to allocate consistent mem,"
162 "size = %d.\n", size);
163 kfree(res);
164 return NULL;
165 }
166 break;
167
168 default:
169 dev_err(&mhba->pdev->dev, "unknown resource type %d.\n", type);
170 kfree(res);
171 return NULL;
172 }
173
174 res->type = type;
175 res->size = size;
176 INIT_LIST_HEAD(&res->entry);
177 list_add_tail(&res->entry, &mhba->res_list);
178
179 return res;
180 }
181
182 static void mvumi_release_mem_resource(struct mvumi_hba *mhba)
183 {
184 struct mvumi_res *res, *tmp;
185
186 list_for_each_entry_safe(res, tmp, &mhba->res_list, entry) {
187 switch (res->type) {
188 case RESOURCE_UNCACHED_MEMORY:
189 pci_free_consistent(mhba->pdev, res->size,
190 res->virt_addr, res->bus_addr);
191 break;
192 case RESOURCE_CACHED_MEMORY:
193 kfree(res->virt_addr);
194 break;
195 default:
196 dev_err(&mhba->pdev->dev,
197 "unknown resource type %d\n", res->type);
198 break;
199 }
200 list_del(&res->entry);
201 kfree(res);
202 }
203 mhba->fw_flag &= ~MVUMI_FW_ALLOC;
204 }
205
206 /**
207 * mvumi_make_sgl - Prepares SGL
208 * @mhba: Adapter soft state
209 * @scmd: SCSI command from the mid-layer
210 * @sgl_p: SGL to be filled in
211 * @sg_count return the number of SG elements
212 *
213 * If successful, this function returns 0. otherwise, it returns -1.
214 */
215 static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd,
216 void *sgl_p, unsigned char *sg_count)
217 {
218 struct scatterlist *sg;
219 struct mvumi_sgl *m_sg = (struct mvumi_sgl *) sgl_p;
220 unsigned int i;
221 unsigned int sgnum = scsi_sg_count(scmd);
222 dma_addr_t busaddr;
223
224 if (sgnum) {
225 sg = scsi_sglist(scmd);
226 *sg_count = pci_map_sg(mhba->pdev, sg, sgnum,
227 (int) scmd->sc_data_direction);
228 if (*sg_count > mhba->max_sge) {
229 dev_err(&mhba->pdev->dev, "sg count[0x%x] is bigger "
230 "than max sg[0x%x].\n",
231 *sg_count, mhba->max_sge);
232 return -1;
233 }
234 for (i = 0; i < *sg_count; i++) {
235 busaddr = sg_dma_address(&sg[i]);
236 m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
237 m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
238 m_sg->flags = 0;
239 sgd_setsz(mhba, m_sg, cpu_to_le32(sg_dma_len(&sg[i])));
240 if ((i + 1) == *sg_count)
241 m_sg->flags |= 1U << mhba->eot_flag;
242
243 sgd_inc(mhba, m_sg);
244 }
245 } else {
246 scmd->SCp.dma_handle = scsi_bufflen(scmd) ?
247 pci_map_single(mhba->pdev, scsi_sglist(scmd),
248 scsi_bufflen(scmd),
249 (int) scmd->sc_data_direction)
250 : 0;
251 busaddr = scmd->SCp.dma_handle;
252 m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
253 m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
254 m_sg->flags = 1U << mhba->eot_flag;
255 sgd_setsz(mhba, m_sg, cpu_to_le32(scsi_bufflen(scmd)));
256 *sg_count = 1;
257 }
258
259 return 0;
260 }
261
262 static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
263 unsigned int size)
264 {
265 struct mvumi_sgl *m_sg;
266 void *virt_addr;
267 dma_addr_t phy_addr;
268
269 if (size == 0)
270 return 0;
271
272 virt_addr = pci_zalloc_consistent(mhba->pdev, size, &phy_addr);
273 if (!virt_addr)
274 return -1;
275
276 m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0];
277 cmd->frame->sg_counts = 1;
278 cmd->data_buf = virt_addr;
279
280 m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(phy_addr));
281 m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(phy_addr));
282 m_sg->flags = 1U << mhba->eot_flag;
283 sgd_setsz(mhba, m_sg, cpu_to_le32(size));
284
285 return 0;
286 }
287
288 static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba,
289 unsigned int buf_size)
290 {
291 struct mvumi_cmd *cmd;
292
293 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
294 if (!cmd) {
295 dev_err(&mhba->pdev->dev, "failed to create a internal cmd\n");
296 return NULL;
297 }
298 INIT_LIST_HEAD(&cmd->queue_pointer);
299
300 cmd->frame = pci_alloc_consistent(mhba->pdev,
301 mhba->ib_max_size, &cmd->frame_phys);
302 if (!cmd->frame) {
303 dev_err(&mhba->pdev->dev, "failed to allocate memory for FW"
304 " frame,size = %d.\n", mhba->ib_max_size);
305 kfree(cmd);
306 return NULL;
307 }
308
309 if (buf_size) {
310 if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) {
311 dev_err(&mhba->pdev->dev, "failed to allocate memory"
312 " for internal frame\n");
313 pci_free_consistent(mhba->pdev, mhba->ib_max_size,
314 cmd->frame, cmd->frame_phys);
315 kfree(cmd);
316 return NULL;
317 }
318 } else
319 cmd->frame->sg_counts = 0;
320
321 return cmd;
322 }
323
324 static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba,
325 struct mvumi_cmd *cmd)
326 {
327 struct mvumi_sgl *m_sg;
328 unsigned int size;
329 dma_addr_t phy_addr;
330
331 if (cmd && cmd->frame) {
332 if (cmd->frame->sg_counts) {
333 m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0];
334 sgd_getsz(mhba, m_sg, size);
335
336 phy_addr = (dma_addr_t) m_sg->baseaddr_l |
337 (dma_addr_t) ((m_sg->baseaddr_h << 16) << 16);
338
339 pci_free_consistent(mhba->pdev, size, cmd->data_buf,
340 phy_addr);
341 }
342 pci_free_consistent(mhba->pdev, mhba->ib_max_size,
343 cmd->frame, cmd->frame_phys);
344 kfree(cmd);
345 }
346 }
347
348 /**
349 * mvumi_get_cmd - Get a command from the free pool
350 * @mhba: Adapter soft state
351 *
352 * Returns a free command from the pool
353 */
354 static struct mvumi_cmd *mvumi_get_cmd(struct mvumi_hba *mhba)
355 {
356 struct mvumi_cmd *cmd = NULL;
357
358 if (likely(!list_empty(&mhba->cmd_pool))) {
359 cmd = list_entry((&mhba->cmd_pool)->next,
360 struct mvumi_cmd, queue_pointer);
361 list_del_init(&cmd->queue_pointer);
362 } else
363 dev_warn(&mhba->pdev->dev, "command pool is empty!\n");
364
365 return cmd;
366 }
367
368 /**
369 * mvumi_return_cmd - Return a cmd to free command pool
370 * @mhba: Adapter soft state
371 * @cmd: Command packet to be returned to free command pool
372 */
373 static inline void mvumi_return_cmd(struct mvumi_hba *mhba,
374 struct mvumi_cmd *cmd)
375 {
376 cmd->scmd = NULL;
377 list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
378 }
379
380 /**
381 * mvumi_free_cmds - Free all the cmds in the free cmd pool
382 * @mhba: Adapter soft state
383 */
384 static void mvumi_free_cmds(struct mvumi_hba *mhba)
385 {
386 struct mvumi_cmd *cmd;
387
388 while (!list_empty(&mhba->cmd_pool)) {
389 cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
390 queue_pointer);
391 list_del(&cmd->queue_pointer);
392 if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC))
393 kfree(cmd->frame);
394 kfree(cmd);
395 }
396 }
397
398 /**
399 * mvumi_alloc_cmds - Allocates the command packets
400 * @mhba: Adapter soft state
401 *
402 */
403 static int mvumi_alloc_cmds(struct mvumi_hba *mhba)
404 {
405 int i;
406 struct mvumi_cmd *cmd;
407
408 for (i = 0; i < mhba->max_io; i++) {
409 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
410 if (!cmd)
411 goto err_exit;
412
413 INIT_LIST_HEAD(&cmd->queue_pointer);
414 list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
415 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
416 cmd->frame = mhba->ib_frame + i * mhba->ib_max_size;
417 cmd->frame_phys = mhba->ib_frame_phys
418 + i * mhba->ib_max_size;
419 } else
420 cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL);
421 if (!cmd->frame)
422 goto err_exit;
423 }
424 return 0;
425
426 err_exit:
427 dev_err(&mhba->pdev->dev,
428 "failed to allocate memory for cmd[0x%x].\n", i);
429 while (!list_empty(&mhba->cmd_pool)) {
430 cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
431 queue_pointer);
432 list_del(&cmd->queue_pointer);
433 if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC))
434 kfree(cmd->frame);
435 kfree(cmd);
436 }
437 return -ENOMEM;
438 }
439
440 static unsigned int mvumi_check_ib_list_9143(struct mvumi_hba *mhba)
441 {
442 unsigned int ib_rp_reg;
443 struct mvumi_hw_regs *regs = mhba->regs;
444
445 ib_rp_reg = ioread32(mhba->regs->inb_read_pointer);
446
447 if (unlikely(((ib_rp_reg & regs->cl_slot_num_mask) ==
448 (mhba->ib_cur_slot & regs->cl_slot_num_mask)) &&
449 ((ib_rp_reg & regs->cl_pointer_toggle)
450 != (mhba->ib_cur_slot & regs->cl_pointer_toggle)))) {
451 dev_warn(&mhba->pdev->dev, "no free slot to use.\n");
452 return 0;
453 }
454 if (atomic_read(&mhba->fw_outstanding) >= mhba->max_io) {
455 dev_warn(&mhba->pdev->dev, "firmware io overflow.\n");
456 return 0;
457 } else {
458 return mhba->max_io - atomic_read(&mhba->fw_outstanding);
459 }
460 }
461
462 static unsigned int mvumi_check_ib_list_9580(struct mvumi_hba *mhba)
463 {
464 unsigned int count;
465 if (atomic_read(&mhba->fw_outstanding) >= (mhba->max_io - 1))
466 return 0;
467 count = ioread32(mhba->ib_shadow);
468 if (count == 0xffff)
469 return 0;
470 return count;
471 }
472
473 static void mvumi_get_ib_list_entry(struct mvumi_hba *mhba, void **ib_entry)
474 {
475 unsigned int cur_ib_entry;
476
477 cur_ib_entry = mhba->ib_cur_slot & mhba->regs->cl_slot_num_mask;
478 cur_ib_entry++;
479 if (cur_ib_entry >= mhba->list_num_io) {
480 cur_ib_entry -= mhba->list_num_io;
481 mhba->ib_cur_slot ^= mhba->regs->cl_pointer_toggle;
482 }
483 mhba->ib_cur_slot &= ~mhba->regs->cl_slot_num_mask;
484 mhba->ib_cur_slot |= (cur_ib_entry & mhba->regs->cl_slot_num_mask);
485 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
486 *ib_entry = mhba->ib_list + cur_ib_entry *
487 sizeof(struct mvumi_dyn_list_entry);
488 } else {
489 *ib_entry = mhba->ib_list + cur_ib_entry * mhba->ib_max_size;
490 }
491 atomic_inc(&mhba->fw_outstanding);
492 }
493
494 static void mvumi_send_ib_list_entry(struct mvumi_hba *mhba)
495 {
496 iowrite32(0xffff, mhba->ib_shadow);
497 iowrite32(mhba->ib_cur_slot, mhba->regs->inb_write_pointer);
498 }
499
500 static char mvumi_check_ob_frame(struct mvumi_hba *mhba,
501 unsigned int cur_obf, struct mvumi_rsp_frame *p_outb_frame)
502 {
503 unsigned short tag, request_id;
504
505 udelay(1);
506 p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
507 request_id = p_outb_frame->request_id;
508 tag = p_outb_frame->tag;
509 if (tag > mhba->tag_pool.size) {
510 dev_err(&mhba->pdev->dev, "ob frame data error\n");
511 return -1;
512 }
513 if (mhba->tag_cmd[tag] == NULL) {
514 dev_err(&mhba->pdev->dev, "tag[0x%x] with NO command\n", tag);
515 return -1;
516 } else if (mhba->tag_cmd[tag]->request_id != request_id &&
517 mhba->request_id_enabled) {
518 dev_err(&mhba->pdev->dev, "request ID from FW:0x%x,"
519 "cmd request ID:0x%x\n", request_id,
520 mhba->tag_cmd[tag]->request_id);
521 return -1;
522 }
523
524 return 0;
525 }
526
527 static int mvumi_check_ob_list_9143(struct mvumi_hba *mhba,
528 unsigned int *cur_obf, unsigned int *assign_obf_end)
529 {
530 unsigned int ob_write, ob_write_shadow;
531 struct mvumi_hw_regs *regs = mhba->regs;
532
533 do {
534 ob_write = ioread32(regs->outb_copy_pointer);
535 ob_write_shadow = ioread32(mhba->ob_shadow);
536 } while ((ob_write & regs->cl_slot_num_mask) != ob_write_shadow);
537
538 *cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask;
539 *assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask;
540
541 if ((ob_write & regs->cl_pointer_toggle) !=
542 (mhba->ob_cur_slot & regs->cl_pointer_toggle)) {
543 *assign_obf_end += mhba->list_num_io;
544 }
545 return 0;
546 }
547
548 static int mvumi_check_ob_list_9580(struct mvumi_hba *mhba,
549 unsigned int *cur_obf, unsigned int *assign_obf_end)
550 {
551 unsigned int ob_write;
552 struct mvumi_hw_regs *regs = mhba->regs;
553
554 ob_write = ioread32(regs->outb_read_pointer);
555 ob_write = ioread32(regs->outb_copy_pointer);
556 *cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask;
557 *assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask;
558 if (*assign_obf_end < *cur_obf)
559 *assign_obf_end += mhba->list_num_io;
560 else if (*assign_obf_end == *cur_obf)
561 return -1;
562 return 0;
563 }
564
565 static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba)
566 {
567 unsigned int cur_obf, assign_obf_end, i;
568 struct mvumi_ob_data *ob_data;
569 struct mvumi_rsp_frame *p_outb_frame;
570 struct mvumi_hw_regs *regs = mhba->regs;
571
572 if (mhba->instancet->check_ob_list(mhba, &cur_obf, &assign_obf_end))
573 return;
574
575 for (i = (assign_obf_end - cur_obf); i != 0; i--) {
576 cur_obf++;
577 if (cur_obf >= mhba->list_num_io) {
578 cur_obf -= mhba->list_num_io;
579 mhba->ob_cur_slot ^= regs->cl_pointer_toggle;
580 }
581
582 p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
583
584 /* Copy pointer may point to entry in outbound list
585 * before entry has valid data
586 */
587 if (unlikely(p_outb_frame->tag > mhba->tag_pool.size ||
588 mhba->tag_cmd[p_outb_frame->tag] == NULL ||
589 p_outb_frame->request_id !=
590 mhba->tag_cmd[p_outb_frame->tag]->request_id))
591 if (mvumi_check_ob_frame(mhba, cur_obf, p_outb_frame))
592 continue;
593
594 if (!list_empty(&mhba->ob_data_list)) {
595 ob_data = (struct mvumi_ob_data *)
596 list_first_entry(&mhba->ob_data_list,
597 struct mvumi_ob_data, list);
598 list_del_init(&ob_data->list);
599 } else {
600 ob_data = NULL;
601 if (cur_obf == 0) {
602 cur_obf = mhba->list_num_io - 1;
603 mhba->ob_cur_slot ^= regs->cl_pointer_toggle;
604 } else
605 cur_obf -= 1;
606 break;
607 }
608
609 memcpy(ob_data->data, p_outb_frame, mhba->ob_max_size);
610 p_outb_frame->tag = 0xff;
611
612 list_add_tail(&ob_data->list, &mhba->free_ob_list);
613 }
614 mhba->ob_cur_slot &= ~regs->cl_slot_num_mask;
615 mhba->ob_cur_slot |= (cur_obf & regs->cl_slot_num_mask);
616 iowrite32(mhba->ob_cur_slot, regs->outb_read_pointer);
617 }
618
619 static void mvumi_reset(struct mvumi_hba *mhba)
620 {
621 struct mvumi_hw_regs *regs = mhba->regs;
622
623 iowrite32(0, regs->enpointa_mask_reg);
624 if (ioread32(regs->arm_to_pciea_msg1) != HANDSHAKE_DONESTATE)
625 return;
626
627 iowrite32(DRBL_SOFT_RESET, regs->pciea_to_arm_drbl_reg);
628 }
629
630 static unsigned char mvumi_start(struct mvumi_hba *mhba);
631
632 static int mvumi_wait_for_outstanding(struct mvumi_hba *mhba)
633 {
634 mhba->fw_state = FW_STATE_ABORT;
635 mvumi_reset(mhba);
636
637 if (mvumi_start(mhba))
638 return FAILED;
639 else
640 return SUCCESS;
641 }
642
643 static int mvumi_wait_for_fw(struct mvumi_hba *mhba)
644 {
645 struct mvumi_hw_regs *regs = mhba->regs;
646 u32 tmp;
647 unsigned long before;
648 before = jiffies;
649
650 iowrite32(0, regs->enpointa_mask_reg);
651 tmp = ioread32(regs->arm_to_pciea_msg1);
652 while (tmp != HANDSHAKE_READYSTATE) {
653 iowrite32(DRBL_MU_RESET, regs->pciea_to_arm_drbl_reg);
654 if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
655 dev_err(&mhba->pdev->dev,
656 "FW reset failed [0x%x].\n", tmp);
657 return FAILED;
658 }
659
660 msleep(500);
661 rmb();
662 tmp = ioread32(regs->arm_to_pciea_msg1);
663 }
664
665 return SUCCESS;
666 }
667
668 static void mvumi_backup_bar_addr(struct mvumi_hba *mhba)
669 {
670 unsigned char i;
671
672 for (i = 0; i < MAX_BASE_ADDRESS; i++) {
673 pci_read_config_dword(mhba->pdev, 0x10 + i * 4,
674 &mhba->pci_base[i]);
675 }
676 }
677
678 static void mvumi_restore_bar_addr(struct mvumi_hba *mhba)
679 {
680 unsigned char i;
681
682 for (i = 0; i < MAX_BASE_ADDRESS; i++) {
683 if (mhba->pci_base[i])
684 pci_write_config_dword(mhba->pdev, 0x10 + i * 4,
685 mhba->pci_base[i]);
686 }
687 }
688
689 static unsigned int mvumi_pci_set_master(struct pci_dev *pdev)
690 {
691 unsigned int ret = 0;
692 pci_set_master(pdev);
693
694 if (IS_DMA64) {
695 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
696 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
697 } else
698 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
699
700 return ret;
701 }
702
703 static int mvumi_reset_host_9580(struct mvumi_hba *mhba)
704 {
705 mhba->fw_state = FW_STATE_ABORT;
706
707 iowrite32(0, mhba->regs->reset_enable);
708 iowrite32(0xf, mhba->regs->reset_request);
709
710 iowrite32(0x10, mhba->regs->reset_enable);
711 iowrite32(0x10, mhba->regs->reset_request);
712 msleep(100);
713 pci_disable_device(mhba->pdev);
714
715 if (pci_enable_device(mhba->pdev)) {
716 dev_err(&mhba->pdev->dev, "enable device failed\n");
717 return FAILED;
718 }
719 if (mvumi_pci_set_master(mhba->pdev)) {
720 dev_err(&mhba->pdev->dev, "set master failed\n");
721 return FAILED;
722 }
723 mvumi_restore_bar_addr(mhba);
724 if (mvumi_wait_for_fw(mhba) == FAILED)
725 return FAILED;
726
727 return mvumi_wait_for_outstanding(mhba);
728 }
729
730 static int mvumi_reset_host_9143(struct mvumi_hba *mhba)
731 {
732 return mvumi_wait_for_outstanding(mhba);
733 }
734
735 static int mvumi_host_reset(struct scsi_cmnd *scmd)
736 {
737 struct mvumi_hba *mhba;
738
739 mhba = (struct mvumi_hba *) scmd->device->host->hostdata;
740
741 scmd_printk(KERN_NOTICE, scmd, "RESET -%ld cmd=%x retries=%x\n",
742 scmd->serial_number, scmd->cmnd[0], scmd->retries);
743
744 return mhba->instancet->reset_host(mhba);
745 }
746
747 static int mvumi_issue_blocked_cmd(struct mvumi_hba *mhba,
748 struct mvumi_cmd *cmd)
749 {
750 unsigned long flags;
751
752 cmd->cmd_status = REQ_STATUS_PENDING;
753
754 if (atomic_read(&cmd->sync_cmd)) {
755 dev_err(&mhba->pdev->dev,
756 "last blocked cmd not finished, sync_cmd = %d\n",
757 atomic_read(&cmd->sync_cmd));
758 BUG_ON(1);
759 return -1;
760 }
761 atomic_inc(&cmd->sync_cmd);
762 spin_lock_irqsave(mhba->shost->host_lock, flags);
763 mhba->instancet->fire_cmd(mhba, cmd);
764 spin_unlock_irqrestore(mhba->shost->host_lock, flags);
765
766 wait_event_timeout(mhba->int_cmd_wait_q,
767 (cmd->cmd_status != REQ_STATUS_PENDING),
768 MVUMI_INTERNAL_CMD_WAIT_TIME * HZ);
769
770 /* command timeout */
771 if (atomic_read(&cmd->sync_cmd)) {
772 spin_lock_irqsave(mhba->shost->host_lock, flags);
773 atomic_dec(&cmd->sync_cmd);
774 if (mhba->tag_cmd[cmd->frame->tag]) {
775 mhba->tag_cmd[cmd->frame->tag] = 0;
776 dev_warn(&mhba->pdev->dev, "TIMEOUT:release tag [%d]\n",
777 cmd->frame->tag);
778 tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
779 }
780 if (!list_empty(&cmd->queue_pointer)) {
781 dev_warn(&mhba->pdev->dev,
782 "TIMEOUT:A internal command doesn't send!\n");
783 list_del_init(&cmd->queue_pointer);
784 } else
785 atomic_dec(&mhba->fw_outstanding);
786
787 spin_unlock_irqrestore(mhba->shost->host_lock, flags);
788 }
789 return 0;
790 }
791
792 static void mvumi_release_fw(struct mvumi_hba *mhba)
793 {
794 mvumi_free_cmds(mhba);
795 mvumi_release_mem_resource(mhba);
796 mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
797 pci_free_consistent(mhba->pdev, HSP_MAX_SIZE,
798 mhba->handshake_page, mhba->handshake_page_phys);
799 kfree(mhba->regs);
800 pci_release_regions(mhba->pdev);
801 }
802
803 static unsigned char mvumi_flush_cache(struct mvumi_hba *mhba)
804 {
805 struct mvumi_cmd *cmd;
806 struct mvumi_msg_frame *frame;
807 unsigned char device_id, retry = 0;
808 unsigned char bitcount = sizeof(unsigned char) * 8;
809
810 for (device_id = 0; device_id < mhba->max_target_id; device_id++) {
811 if (!(mhba->target_map[device_id / bitcount] &
812 (1 << (device_id % bitcount))))
813 continue;
814 get_cmd: cmd = mvumi_create_internal_cmd(mhba, 0);
815 if (!cmd) {
816 if (retry++ >= 5) {
817 dev_err(&mhba->pdev->dev, "failed to get memory"
818 " for internal flush cache cmd for "
819 "device %d", device_id);
820 retry = 0;
821 continue;
822 } else
823 goto get_cmd;
824 }
825 cmd->scmd = NULL;
826 cmd->cmd_status = REQ_STATUS_PENDING;
827 atomic_set(&cmd->sync_cmd, 0);
828 frame = cmd->frame;
829 frame->req_function = CL_FUN_SCSI_CMD;
830 frame->device_id = device_id;
831 frame->cmd_flag = CMD_FLAG_NON_DATA;
832 frame->data_transfer_length = 0;
833 frame->cdb_length = MAX_COMMAND_SIZE;
834 memset(frame->cdb, 0, MAX_COMMAND_SIZE);
835 frame->cdb[0] = SCSI_CMD_MARVELL_SPECIFIC;
836 frame->cdb[1] = CDB_CORE_MODULE;
837 frame->cdb[2] = CDB_CORE_SHUTDOWN;
838
839 mvumi_issue_blocked_cmd(mhba, cmd);
840 if (cmd->cmd_status != SAM_STAT_GOOD) {
841 dev_err(&mhba->pdev->dev,
842 "device %d flush cache failed, status=0x%x.\n",
843 device_id, cmd->cmd_status);
844 }
845
846 mvumi_delete_internal_cmd(mhba, cmd);
847 }
848 return 0;
849 }
850
851 static unsigned char
852 mvumi_calculate_checksum(struct mvumi_hs_header *p_header,
853 unsigned short len)
854 {
855 unsigned char *ptr;
856 unsigned char ret = 0, i;
857
858 ptr = (unsigned char *) p_header->frame_content;
859 for (i = 0; i < len; i++) {
860 ret ^= *ptr;
861 ptr++;
862 }
863
864 return ret;
865 }
866
867 static void mvumi_hs_build_page(struct mvumi_hba *mhba,
868 struct mvumi_hs_header *hs_header)
869 {
870 struct mvumi_hs_page2 *hs_page2;
871 struct mvumi_hs_page4 *hs_page4;
872 struct mvumi_hs_page3 *hs_page3;
873 u64 time;
874 u64 local_time;
875
876 switch (hs_header->page_code) {
877 case HS_PAGE_HOST_INFO:
878 hs_page2 = (struct mvumi_hs_page2 *) hs_header;
879 hs_header->frame_length = sizeof(*hs_page2) - 4;
880 memset(hs_header->frame_content, 0, hs_header->frame_length);
881 hs_page2->host_type = 3; /* 3 mean linux*/
882 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)
883 hs_page2->host_cap = 0x08;/* host dynamic source mode */
884 hs_page2->host_ver.ver_major = VER_MAJOR;
885 hs_page2->host_ver.ver_minor = VER_MINOR;
886 hs_page2->host_ver.ver_oem = VER_OEM;
887 hs_page2->host_ver.ver_build = VER_BUILD;
888 hs_page2->system_io_bus = 0;
889 hs_page2->slot_number = 0;
890 hs_page2->intr_level = 0;
891 hs_page2->intr_vector = 0;
892 time = ktime_get_real_seconds();
893 local_time = (time - (sys_tz.tz_minuteswest * 60));
894 hs_page2->seconds_since1970 = local_time;
895 hs_header->checksum = mvumi_calculate_checksum(hs_header,
896 hs_header->frame_length);
897 break;
898
899 case HS_PAGE_FIRM_CTL:
900 hs_page3 = (struct mvumi_hs_page3 *) hs_header;
901 hs_header->frame_length = sizeof(*hs_page3) - 4;
902 memset(hs_header->frame_content, 0, hs_header->frame_length);
903 hs_header->checksum = mvumi_calculate_checksum(hs_header,
904 hs_header->frame_length);
905 break;
906
907 case HS_PAGE_CL_INFO:
908 hs_page4 = (struct mvumi_hs_page4 *) hs_header;
909 hs_header->frame_length = sizeof(*hs_page4) - 4;
910 memset(hs_header->frame_content, 0, hs_header->frame_length);
911 hs_page4->ib_baseaddr_l = lower_32_bits(mhba->ib_list_phys);
912 hs_page4->ib_baseaddr_h = upper_32_bits(mhba->ib_list_phys);
913
914 hs_page4->ob_baseaddr_l = lower_32_bits(mhba->ob_list_phys);
915 hs_page4->ob_baseaddr_h = upper_32_bits(mhba->ob_list_phys);
916 hs_page4->ib_entry_size = mhba->ib_max_size_setting;
917 hs_page4->ob_entry_size = mhba->ob_max_size_setting;
918 if (mhba->hba_capability
919 & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF) {
920 hs_page4->ob_depth = find_first_bit((unsigned long *)
921 &mhba->list_num_io,
922 BITS_PER_LONG);
923 hs_page4->ib_depth = find_first_bit((unsigned long *)
924 &mhba->list_num_io,
925 BITS_PER_LONG);
926 } else {
927 hs_page4->ob_depth = (u8) mhba->list_num_io;
928 hs_page4->ib_depth = (u8) mhba->list_num_io;
929 }
930 hs_header->checksum = mvumi_calculate_checksum(hs_header,
931 hs_header->frame_length);
932 break;
933
934 default:
935 dev_err(&mhba->pdev->dev, "cannot build page, code[0x%x]\n",
936 hs_header->page_code);
937 break;
938 }
939 }
940
941 /**
942 * mvumi_init_data - Initialize requested date for FW
943 * @mhba: Adapter soft state
944 */
945 static int mvumi_init_data(struct mvumi_hba *mhba)
946 {
947 struct mvumi_ob_data *ob_pool;
948 struct mvumi_res *res_mgnt;
949 unsigned int tmp_size, offset, i;
950 void *virmem, *v;
951 dma_addr_t p;
952
953 if (mhba->fw_flag & MVUMI_FW_ALLOC)
954 return 0;
955
956 tmp_size = mhba->ib_max_size * mhba->max_io;
957 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)
958 tmp_size += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
959
960 tmp_size += 128 + mhba->ob_max_size * mhba->max_io;
961 tmp_size += 8 + sizeof(u32)*2 + 16;
962
963 res_mgnt = mvumi_alloc_mem_resource(mhba,
964 RESOURCE_UNCACHED_MEMORY, tmp_size);
965 if (!res_mgnt) {
966 dev_err(&mhba->pdev->dev,
967 "failed to allocate memory for inbound list\n");
968 goto fail_alloc_dma_buf;
969 }
970
971 p = res_mgnt->bus_addr;
972 v = res_mgnt->virt_addr;
973 /* ib_list */
974 offset = round_up(p, 128) - p;
975 p += offset;
976 v += offset;
977 mhba->ib_list = v;
978 mhba->ib_list_phys = p;
979 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
980 v += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
981 p += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
982 mhba->ib_frame = v;
983 mhba->ib_frame_phys = p;
984 }
985 v += mhba->ib_max_size * mhba->max_io;
986 p += mhba->ib_max_size * mhba->max_io;
987
988 /* ib shadow */
989 offset = round_up(p, 8) - p;
990 p += offset;
991 v += offset;
992 mhba->ib_shadow = v;
993 mhba->ib_shadow_phys = p;
994 p += sizeof(u32)*2;
995 v += sizeof(u32)*2;
996 /* ob shadow */
997 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) {
998 offset = round_up(p, 8) - p;
999 p += offset;
1000 v += offset;
1001 mhba->ob_shadow = v;
1002 mhba->ob_shadow_phys = p;
1003 p += 8;
1004 v += 8;
1005 } else {
1006 offset = round_up(p, 4) - p;
1007 p += offset;
1008 v += offset;
1009 mhba->ob_shadow = v;
1010 mhba->ob_shadow_phys = p;
1011 p += 4;
1012 v += 4;
1013 }
1014
1015 /* ob list */
1016 offset = round_up(p, 128) - p;
1017 p += offset;
1018 v += offset;
1019
1020 mhba->ob_list = v;
1021 mhba->ob_list_phys = p;
1022
1023 /* ob data pool */
1024 tmp_size = mhba->max_io * (mhba->ob_max_size + sizeof(*ob_pool));
1025 tmp_size = round_up(tmp_size, 8);
1026
1027 res_mgnt = mvumi_alloc_mem_resource(mhba,
1028 RESOURCE_CACHED_MEMORY, tmp_size);
1029 if (!res_mgnt) {
1030 dev_err(&mhba->pdev->dev,
1031 "failed to allocate memory for outbound data buffer\n");
1032 goto fail_alloc_dma_buf;
1033 }
1034 virmem = res_mgnt->virt_addr;
1035
1036 for (i = mhba->max_io; i != 0; i--) {
1037 ob_pool = (struct mvumi_ob_data *) virmem;
1038 list_add_tail(&ob_pool->list, &mhba->ob_data_list);
1039 virmem += mhba->ob_max_size + sizeof(*ob_pool);
1040 }
1041
1042 tmp_size = sizeof(unsigned short) * mhba->max_io +
1043 sizeof(struct mvumi_cmd *) * mhba->max_io;
1044 tmp_size += round_up(mhba->max_target_id, sizeof(unsigned char) * 8) /
1045 (sizeof(unsigned char) * 8);
1046
1047 res_mgnt = mvumi_alloc_mem_resource(mhba,
1048 RESOURCE_CACHED_MEMORY, tmp_size);
1049 if (!res_mgnt) {
1050 dev_err(&mhba->pdev->dev,
1051 "failed to allocate memory for tag and target map\n");
1052 goto fail_alloc_dma_buf;
1053 }
1054
1055 virmem = res_mgnt->virt_addr;
1056 mhba->tag_pool.stack = virmem;
1057 mhba->tag_pool.size = mhba->max_io;
1058 tag_init(&mhba->tag_pool, mhba->max_io);
1059 virmem += sizeof(unsigned short) * mhba->max_io;
1060
1061 mhba->tag_cmd = virmem;
1062 virmem += sizeof(struct mvumi_cmd *) * mhba->max_io;
1063
1064 mhba->target_map = virmem;
1065
1066 mhba->fw_flag |= MVUMI_FW_ALLOC;
1067 return 0;
1068
1069 fail_alloc_dma_buf:
1070 mvumi_release_mem_resource(mhba);
1071 return -1;
1072 }
1073
1074 static int mvumi_hs_process_page(struct mvumi_hba *mhba,
1075 struct mvumi_hs_header *hs_header)
1076 {
1077 struct mvumi_hs_page1 *hs_page1;
1078 unsigned char page_checksum;
1079
1080 page_checksum = mvumi_calculate_checksum(hs_header,
1081 hs_header->frame_length);
1082 if (page_checksum != hs_header->checksum) {
1083 dev_err(&mhba->pdev->dev, "checksum error\n");
1084 return -1;
1085 }
1086
1087 switch (hs_header->page_code) {
1088 case HS_PAGE_FIRM_CAP:
1089 hs_page1 = (struct mvumi_hs_page1 *) hs_header;
1090
1091 mhba->max_io = hs_page1->max_io_support;
1092 mhba->list_num_io = hs_page1->cl_inout_list_depth;
1093 mhba->max_transfer_size = hs_page1->max_transfer_size;
1094 mhba->max_target_id = hs_page1->max_devices_support;
1095 mhba->hba_capability = hs_page1->capability;
1096 mhba->ib_max_size_setting = hs_page1->cl_in_max_entry_size;
1097 mhba->ib_max_size = (1 << hs_page1->cl_in_max_entry_size) << 2;
1098
1099 mhba->ob_max_size_setting = hs_page1->cl_out_max_entry_size;
1100 mhba->ob_max_size = (1 << hs_page1->cl_out_max_entry_size) << 2;
1101
1102 dev_dbg(&mhba->pdev->dev, "FW version:%d\n",
1103 hs_page1->fw_ver.ver_build);
1104
1105 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG)
1106 mhba->eot_flag = 22;
1107 else
1108 mhba->eot_flag = 27;
1109 if (mhba->hba_capability & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF)
1110 mhba->list_num_io = 1 << hs_page1->cl_inout_list_depth;
1111 break;
1112 default:
1113 dev_err(&mhba->pdev->dev, "handshake: page code error\n");
1114 return -1;
1115 }
1116 return 0;
1117 }
1118
1119 /**
1120 * mvumi_handshake - Move the FW to READY state
1121 * @mhba: Adapter soft state
1122 *
1123 * During the initialization, FW passes can potentially be in any one of
1124 * several possible states. If the FW in operational, waiting-for-handshake
1125 * states, driver must take steps to bring it to ready state. Otherwise, it
1126 * has to wait for the ready state.
1127 */
1128 static int mvumi_handshake(struct mvumi_hba *mhba)
1129 {
1130 unsigned int hs_state, tmp, hs_fun;
1131 struct mvumi_hs_header *hs_header;
1132 struct mvumi_hw_regs *regs = mhba->regs;
1133
1134 if (mhba->fw_state == FW_STATE_STARTING)
1135 hs_state = HS_S_START;
1136 else {
1137 tmp = ioread32(regs->arm_to_pciea_msg0);
1138 hs_state = HS_GET_STATE(tmp);
1139 dev_dbg(&mhba->pdev->dev, "handshake state[0x%x].\n", hs_state);
1140 if (HS_GET_STATUS(tmp) != HS_STATUS_OK) {
1141 mhba->fw_state = FW_STATE_STARTING;
1142 return -1;
1143 }
1144 }
1145
1146 hs_fun = 0;
1147 switch (hs_state) {
1148 case HS_S_START:
1149 mhba->fw_state = FW_STATE_HANDSHAKING;
1150 HS_SET_STATUS(hs_fun, HS_STATUS_OK);
1151 HS_SET_STATE(hs_fun, HS_S_RESET);
1152 iowrite32(HANDSHAKE_SIGNATURE, regs->pciea_to_arm_msg1);
1153 iowrite32(hs_fun, regs->pciea_to_arm_msg0);
1154 iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
1155 break;
1156
1157 case HS_S_RESET:
1158 iowrite32(lower_32_bits(mhba->handshake_page_phys),
1159 regs->pciea_to_arm_msg1);
1160 iowrite32(upper_32_bits(mhba->handshake_page_phys),
1161 regs->arm_to_pciea_msg1);
1162 HS_SET_STATUS(hs_fun, HS_STATUS_OK);
1163 HS_SET_STATE(hs_fun, HS_S_PAGE_ADDR);
1164 iowrite32(hs_fun, regs->pciea_to_arm_msg0);
1165 iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
1166 break;
1167
1168 case HS_S_PAGE_ADDR:
1169 case HS_S_QUERY_PAGE:
1170 case HS_S_SEND_PAGE:
1171 hs_header = (struct mvumi_hs_header *) mhba->handshake_page;
1172 if (hs_header->page_code == HS_PAGE_FIRM_CAP) {
1173 mhba->hba_total_pages =
1174 ((struct mvumi_hs_page1 *) hs_header)->total_pages;
1175
1176 if (mhba->hba_total_pages == 0)
1177 mhba->hba_total_pages = HS_PAGE_TOTAL-1;
1178 }
1179
1180 if (hs_state == HS_S_QUERY_PAGE) {
1181 if (mvumi_hs_process_page(mhba, hs_header)) {
1182 HS_SET_STATE(hs_fun, HS_S_ABORT);
1183 return -1;
1184 }
1185 if (mvumi_init_data(mhba)) {
1186 HS_SET_STATE(hs_fun, HS_S_ABORT);
1187 return -1;
1188 }
1189 } else if (hs_state == HS_S_PAGE_ADDR) {
1190 hs_header->page_code = 0;
1191 mhba->hba_total_pages = HS_PAGE_TOTAL-1;
1192 }
1193
1194 if ((hs_header->page_code + 1) <= mhba->hba_total_pages) {
1195 hs_header->page_code++;
1196 if (hs_header->page_code != HS_PAGE_FIRM_CAP) {
1197 mvumi_hs_build_page(mhba, hs_header);
1198 HS_SET_STATE(hs_fun, HS_S_SEND_PAGE);
1199 } else
1200 HS_SET_STATE(hs_fun, HS_S_QUERY_PAGE);
1201 } else
1202 HS_SET_STATE(hs_fun, HS_S_END);
1203
1204 HS_SET_STATUS(hs_fun, HS_STATUS_OK);
1205 iowrite32(hs_fun, regs->pciea_to_arm_msg0);
1206 iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
1207 break;
1208
1209 case HS_S_END:
1210 /* Set communication list ISR */
1211 tmp = ioread32(regs->enpointa_mask_reg);
1212 tmp |= regs->int_comaout | regs->int_comaerr;
1213 iowrite32(tmp, regs->enpointa_mask_reg);
1214 iowrite32(mhba->list_num_io, mhba->ib_shadow);
1215 /* Set InBound List Available count shadow */
1216 iowrite32(lower_32_bits(mhba->ib_shadow_phys),
1217 regs->inb_aval_count_basel);
1218 iowrite32(upper_32_bits(mhba->ib_shadow_phys),
1219 regs->inb_aval_count_baseh);
1220
1221 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) {
1222 /* Set OutBound List Available count shadow */
1223 iowrite32((mhba->list_num_io-1) |
1224 regs->cl_pointer_toggle,
1225 mhba->ob_shadow);
1226 iowrite32(lower_32_bits(mhba->ob_shadow_phys),
1227 regs->outb_copy_basel);
1228 iowrite32(upper_32_bits(mhba->ob_shadow_phys),
1229 regs->outb_copy_baseh);
1230 }
1231
1232 mhba->ib_cur_slot = (mhba->list_num_io - 1) |
1233 regs->cl_pointer_toggle;
1234 mhba->ob_cur_slot = (mhba->list_num_io - 1) |
1235 regs->cl_pointer_toggle;
1236 mhba->fw_state = FW_STATE_STARTED;
1237
1238 break;
1239 default:
1240 dev_err(&mhba->pdev->dev, "unknown handshake state [0x%x].\n",
1241 hs_state);
1242 return -1;
1243 }
1244 return 0;
1245 }
1246
1247 static unsigned char mvumi_handshake_event(struct mvumi_hba *mhba)
1248 {
1249 unsigned int isr_status;
1250 unsigned long before;
1251
1252 before = jiffies;
1253 mvumi_handshake(mhba);
1254 do {
1255 isr_status = mhba->instancet->read_fw_status_reg(mhba);
1256
1257 if (mhba->fw_state == FW_STATE_STARTED)
1258 return 0;
1259 if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
1260 dev_err(&mhba->pdev->dev,
1261 "no handshake response at state 0x%x.\n",
1262 mhba->fw_state);
1263 dev_err(&mhba->pdev->dev,
1264 "isr : global=0x%x,status=0x%x.\n",
1265 mhba->global_isr, isr_status);
1266 return -1;
1267 }
1268 rmb();
1269 usleep_range(1000, 2000);
1270 } while (!(isr_status & DRBL_HANDSHAKE_ISR));
1271
1272 return 0;
1273 }
1274
1275 static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba)
1276 {
1277 unsigned int tmp;
1278 unsigned long before;
1279
1280 before = jiffies;
1281 tmp = ioread32(mhba->regs->arm_to_pciea_msg1);
1282 while ((tmp != HANDSHAKE_READYSTATE) && (tmp != HANDSHAKE_DONESTATE)) {
1283 if (tmp != HANDSHAKE_READYSTATE)
1284 iowrite32(DRBL_MU_RESET,
1285 mhba->regs->pciea_to_arm_drbl_reg);
1286 if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
1287 dev_err(&mhba->pdev->dev,
1288 "invalid signature [0x%x].\n", tmp);
1289 return -1;
1290 }
1291 usleep_range(1000, 2000);
1292 rmb();
1293 tmp = ioread32(mhba->regs->arm_to_pciea_msg1);
1294 }
1295
1296 mhba->fw_state = FW_STATE_STARTING;
1297 dev_dbg(&mhba->pdev->dev, "start firmware handshake...\n");
1298 do {
1299 if (mvumi_handshake_event(mhba)) {
1300 dev_err(&mhba->pdev->dev,
1301 "handshake failed at state 0x%x.\n",
1302 mhba->fw_state);
1303 return -1;
1304 }
1305 } while (mhba->fw_state != FW_STATE_STARTED);
1306
1307 dev_dbg(&mhba->pdev->dev, "firmware handshake done\n");
1308
1309 return 0;
1310 }
1311
1312 static unsigned char mvumi_start(struct mvumi_hba *mhba)
1313 {
1314 unsigned int tmp;
1315 struct mvumi_hw_regs *regs = mhba->regs;
1316
1317 /* clear Door bell */
1318 tmp = ioread32(regs->arm_to_pciea_drbl_reg);
1319 iowrite32(tmp, regs->arm_to_pciea_drbl_reg);
1320
1321 iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg);
1322 tmp = ioread32(regs->enpointa_mask_reg) | regs->int_dl_cpu2pciea;
1323 iowrite32(tmp, regs->enpointa_mask_reg);
1324 msleep(100);
1325 if (mvumi_check_handshake(mhba))
1326 return -1;
1327
1328 return 0;
1329 }
1330
1331 /**
1332 * mvumi_complete_cmd - Completes a command
1333 * @mhba: Adapter soft state
1334 * @cmd: Command to be completed
1335 */
1336 static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
1337 struct mvumi_rsp_frame *ob_frame)
1338 {
1339 struct scsi_cmnd *scmd = cmd->scmd;
1340
1341 cmd->scmd->SCp.ptr = NULL;
1342 scmd->result = ob_frame->req_status;
1343
1344 switch (ob_frame->req_status) {
1345 case SAM_STAT_GOOD:
1346 scmd->result |= DID_OK << 16;
1347 break;
1348 case SAM_STAT_BUSY:
1349 scmd->result |= DID_BUS_BUSY << 16;
1350 break;
1351 case SAM_STAT_CHECK_CONDITION:
1352 scmd->result |= (DID_OK << 16);
1353 if (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) {
1354 memcpy(cmd->scmd->sense_buffer, ob_frame->payload,
1355 sizeof(struct mvumi_sense_data));
1356 scmd->result |= (DRIVER_SENSE << 24);
1357 }
1358 break;
1359 default:
1360 scmd->result |= (DRIVER_INVALID << 24) | (DID_ABORT << 16);
1361 break;
1362 }
1363
1364 if (scsi_bufflen(scmd)) {
1365 if (scsi_sg_count(scmd)) {
1366 pci_unmap_sg(mhba->pdev,
1367 scsi_sglist(scmd),
1368 scsi_sg_count(scmd),
1369 (int) scmd->sc_data_direction);
1370 } else {
1371 pci_unmap_single(mhba->pdev,
1372 scmd->SCp.dma_handle,
1373 scsi_bufflen(scmd),
1374 (int) scmd->sc_data_direction);
1375
1376 scmd->SCp.dma_handle = 0;
1377 }
1378 }
1379 cmd->scmd->scsi_done(scmd);
1380 mvumi_return_cmd(mhba, cmd);
1381 }
1382
1383 static void mvumi_complete_internal_cmd(struct mvumi_hba *mhba,
1384 struct mvumi_cmd *cmd,
1385 struct mvumi_rsp_frame *ob_frame)
1386 {
1387 if (atomic_read(&cmd->sync_cmd)) {
1388 cmd->cmd_status = ob_frame->req_status;
1389
1390 if ((ob_frame->req_status == SAM_STAT_CHECK_CONDITION) &&
1391 (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) &&
1392 cmd->data_buf) {
1393 memcpy(cmd->data_buf, ob_frame->payload,
1394 sizeof(struct mvumi_sense_data));
1395 }
1396 atomic_dec(&cmd->sync_cmd);
1397 wake_up(&mhba->int_cmd_wait_q);
1398 }
1399 }
1400
1401 static void mvumi_show_event(struct mvumi_hba *mhba,
1402 struct mvumi_driver_event *ptr)
1403 {
1404 unsigned int i;
1405
1406 dev_warn(&mhba->pdev->dev,
1407 "Event[0x%x] id[0x%x] severity[0x%x] device id[0x%x]\n",
1408 ptr->sequence_no, ptr->event_id, ptr->severity, ptr->device_id);
1409 if (ptr->param_count) {
1410 printk(KERN_WARNING "Event param(len 0x%x): ",
1411 ptr->param_count);
1412 for (i = 0; i < ptr->param_count; i++)
1413 printk(KERN_WARNING "0x%x ", ptr->params[i]);
1414
1415 printk(KERN_WARNING "\n");
1416 }
1417
1418 if (ptr->sense_data_length) {
1419 printk(KERN_WARNING "Event sense data(len 0x%x): ",
1420 ptr->sense_data_length);
1421 for (i = 0; i < ptr->sense_data_length; i++)
1422 printk(KERN_WARNING "0x%x ", ptr->sense_data[i]);
1423 printk(KERN_WARNING "\n");
1424 }
1425 }
1426
1427 static int mvumi_handle_hotplug(struct mvumi_hba *mhba, u16 devid, int status)
1428 {
1429 struct scsi_device *sdev;
1430 int ret = -1;
1431
1432 if (status == DEVICE_OFFLINE) {
1433 sdev = scsi_device_lookup(mhba->shost, 0, devid, 0);
1434 if (sdev) {
1435 dev_dbg(&mhba->pdev->dev, "remove disk %d-%d-%d.\n", 0,
1436 sdev->id, 0);
1437 scsi_remove_device(sdev);
1438 scsi_device_put(sdev);
1439 ret = 0;
1440 } else
1441 dev_err(&mhba->pdev->dev, " no disk[%d] to remove\n",
1442 devid);
1443 } else if (status == DEVICE_ONLINE) {
1444 sdev = scsi_device_lookup(mhba->shost, 0, devid, 0);
1445 if (!sdev) {
1446 scsi_add_device(mhba->shost, 0, devid, 0);
1447 dev_dbg(&mhba->pdev->dev, " add disk %d-%d-%d.\n", 0,
1448 devid, 0);
1449 ret = 0;
1450 } else {
1451 dev_err(&mhba->pdev->dev, " don't add disk %d-%d-%d.\n",
1452 0, devid, 0);
1453 scsi_device_put(sdev);
1454 }
1455 }
1456 return ret;
1457 }
1458
1459 static u64 mvumi_inquiry(struct mvumi_hba *mhba,
1460 unsigned int id, struct mvumi_cmd *cmd)
1461 {
1462 struct mvumi_msg_frame *frame;
1463 u64 wwid = 0;
1464 int cmd_alloc = 0;
1465 int data_buf_len = 64;
1466
1467 if (!cmd) {
1468 cmd = mvumi_create_internal_cmd(mhba, data_buf_len);
1469 if (cmd)
1470 cmd_alloc = 1;
1471 else
1472 return 0;
1473 } else {
1474 memset(cmd->data_buf, 0, data_buf_len);
1475 }
1476 cmd->scmd = NULL;
1477 cmd->cmd_status = REQ_STATUS_PENDING;
1478 atomic_set(&cmd->sync_cmd, 0);
1479 frame = cmd->frame;
1480 frame->device_id = (u16) id;
1481 frame->cmd_flag = CMD_FLAG_DATA_IN;
1482 frame->req_function = CL_FUN_SCSI_CMD;
1483 frame->cdb_length = 6;
1484 frame->data_transfer_length = MVUMI_INQUIRY_LENGTH;
1485 memset(frame->cdb, 0, frame->cdb_length);
1486 frame->cdb[0] = INQUIRY;
1487 frame->cdb[4] = frame->data_transfer_length;
1488
1489 mvumi_issue_blocked_cmd(mhba, cmd);
1490
1491 if (cmd->cmd_status == SAM_STAT_GOOD) {
1492 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143)
1493 wwid = id + 1;
1494 else
1495 memcpy((void *)&wwid,
1496 (cmd->data_buf + MVUMI_INQUIRY_UUID_OFF),
1497 MVUMI_INQUIRY_UUID_LEN);
1498 dev_dbg(&mhba->pdev->dev,
1499 "inquiry device(0:%d:0) wwid(%llx)\n", id, wwid);
1500 } else {
1501 wwid = 0;
1502 }
1503 if (cmd_alloc)
1504 mvumi_delete_internal_cmd(mhba, cmd);
1505
1506 return wwid;
1507 }
1508
1509 static void mvumi_detach_devices(struct mvumi_hba *mhba)
1510 {
1511 struct mvumi_device *mv_dev = NULL , *dev_next;
1512 struct scsi_device *sdev = NULL;
1513
1514 mutex_lock(&mhba->device_lock);
1515
1516 /* detach Hard Disk */
1517 list_for_each_entry_safe(mv_dev, dev_next,
1518 &mhba->shost_dev_list, list) {
1519 mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE);
1520 list_del_init(&mv_dev->list);
1521 dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n",
1522 mv_dev->id, mv_dev->wwid);
1523 kfree(mv_dev);
1524 }
1525 list_for_each_entry_safe(mv_dev, dev_next, &mhba->mhba_dev_list, list) {
1526 list_del_init(&mv_dev->list);
1527 dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n",
1528 mv_dev->id, mv_dev->wwid);
1529 kfree(mv_dev);
1530 }
1531
1532 /* detach virtual device */
1533 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
1534 sdev = scsi_device_lookup(mhba->shost, 0,
1535 mhba->max_target_id - 1, 0);
1536
1537 if (sdev) {
1538 scsi_remove_device(sdev);
1539 scsi_device_put(sdev);
1540 }
1541
1542 mutex_unlock(&mhba->device_lock);
1543 }
1544
1545 static void mvumi_rescan_devices(struct mvumi_hba *mhba, int id)
1546 {
1547 struct scsi_device *sdev;
1548
1549 sdev = scsi_device_lookup(mhba->shost, 0, id, 0);
1550 if (sdev) {
1551 scsi_rescan_device(&sdev->sdev_gendev);
1552 scsi_device_put(sdev);
1553 }
1554 }
1555
1556 static int mvumi_match_devices(struct mvumi_hba *mhba, int id, u64 wwid)
1557 {
1558 struct mvumi_device *mv_dev = NULL;
1559
1560 list_for_each_entry(mv_dev, &mhba->shost_dev_list, list) {
1561 if (mv_dev->wwid == wwid) {
1562 if (mv_dev->id != id) {
1563 dev_err(&mhba->pdev->dev,
1564 "%s has same wwid[%llx] ,"
1565 " but different id[%d %d]\n",
1566 __func__, mv_dev->wwid, mv_dev->id, id);
1567 return -1;
1568 } else {
1569 if (mhba->pdev->device ==
1570 PCI_DEVICE_ID_MARVELL_MV9143)
1571 mvumi_rescan_devices(mhba, id);
1572 return 1;
1573 }
1574 }
1575 }
1576 return 0;
1577 }
1578
1579 static void mvumi_remove_devices(struct mvumi_hba *mhba, int id)
1580 {
1581 struct mvumi_device *mv_dev = NULL, *dev_next;
1582
1583 list_for_each_entry_safe(mv_dev, dev_next,
1584 &mhba->shost_dev_list, list) {
1585 if (mv_dev->id == id) {
1586 dev_dbg(&mhba->pdev->dev,
1587 "detach device(0:%d:0) wwid(%llx) from HOST\n",
1588 mv_dev->id, mv_dev->wwid);
1589 mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE);
1590 list_del_init(&mv_dev->list);
1591 kfree(mv_dev);
1592 }
1593 }
1594 }
1595
1596 static int mvumi_probe_devices(struct mvumi_hba *mhba)
1597 {
1598 int id, maxid;
1599 u64 wwid = 0;
1600 struct mvumi_device *mv_dev = NULL;
1601 struct mvumi_cmd *cmd = NULL;
1602 int found = 0;
1603
1604 cmd = mvumi_create_internal_cmd(mhba, 64);
1605 if (!cmd)
1606 return -1;
1607
1608 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143)
1609 maxid = mhba->max_target_id;
1610 else
1611 maxid = mhba->max_target_id - 1;
1612
1613 for (id = 0; id < maxid; id++) {
1614 wwid = mvumi_inquiry(mhba, id, cmd);
1615 if (!wwid) {
1616 /* device no response, remove it */
1617 mvumi_remove_devices(mhba, id);
1618 } else {
1619 /* device response, add it */
1620 found = mvumi_match_devices(mhba, id, wwid);
1621 if (!found) {
1622 mvumi_remove_devices(mhba, id);
1623 mv_dev = kzalloc(sizeof(struct mvumi_device),
1624 GFP_KERNEL);
1625 if (!mv_dev) {
1626 dev_err(&mhba->pdev->dev,
1627 "%s alloc mv_dev failed\n",
1628 __func__);
1629 continue;
1630 }
1631 mv_dev->id = id;
1632 mv_dev->wwid = wwid;
1633 mv_dev->sdev = NULL;
1634 INIT_LIST_HEAD(&mv_dev->list);
1635 list_add_tail(&mv_dev->list,
1636 &mhba->mhba_dev_list);
1637 dev_dbg(&mhba->pdev->dev,
1638 "probe a new device(0:%d:0)"
1639 " wwid(%llx)\n", id, mv_dev->wwid);
1640 } else if (found == -1)
1641 return -1;
1642 else
1643 continue;
1644 }
1645 }
1646
1647 if (cmd)
1648 mvumi_delete_internal_cmd(mhba, cmd);
1649
1650 return 0;
1651 }
1652
1653 static int mvumi_rescan_bus(void *data)
1654 {
1655 int ret = 0;
1656 struct mvumi_hba *mhba = (struct mvumi_hba *) data;
1657 struct mvumi_device *mv_dev = NULL , *dev_next;
1658
1659 while (!kthread_should_stop()) {
1660
1661 set_current_state(TASK_INTERRUPTIBLE);
1662 if (!atomic_read(&mhba->pnp_count))
1663 schedule();
1664 msleep(1000);
1665 atomic_set(&mhba->pnp_count, 0);
1666 __set_current_state(TASK_RUNNING);
1667
1668 mutex_lock(&mhba->device_lock);
1669 ret = mvumi_probe_devices(mhba);
1670 if (!ret) {
1671 list_for_each_entry_safe(mv_dev, dev_next,
1672 &mhba->mhba_dev_list, list) {
1673 if (mvumi_handle_hotplug(mhba, mv_dev->id,
1674 DEVICE_ONLINE)) {
1675 dev_err(&mhba->pdev->dev,
1676 "%s add device(0:%d:0) failed"
1677 "wwid(%llx) has exist\n",
1678 __func__,
1679 mv_dev->id, mv_dev->wwid);
1680 list_del_init(&mv_dev->list);
1681 kfree(mv_dev);
1682 } else {
1683 list_move_tail(&mv_dev->list,
1684 &mhba->shost_dev_list);
1685 }
1686 }
1687 }
1688 mutex_unlock(&mhba->device_lock);
1689 }
1690 return 0;
1691 }
1692
1693 static void mvumi_proc_msg(struct mvumi_hba *mhba,
1694 struct mvumi_hotplug_event *param)
1695 {
1696 u16 size = param->size;
1697 const unsigned long *ar_bitmap;
1698 const unsigned long *re_bitmap;
1699 int index;
1700
1701 if (mhba->fw_flag & MVUMI_FW_ATTACH) {
1702 index = -1;
1703 ar_bitmap = (const unsigned long *) param->bitmap;
1704 re_bitmap = (const unsigned long *) ¶m->bitmap[size >> 3];
1705
1706 mutex_lock(&mhba->sas_discovery_mutex);
1707 do {
1708 index = find_next_zero_bit(ar_bitmap, size, index + 1);
1709 if (index >= size)
1710 break;
1711 mvumi_handle_hotplug(mhba, index, DEVICE_ONLINE);
1712 } while (1);
1713
1714 index = -1;
1715 do {
1716 index = find_next_zero_bit(re_bitmap, size, index + 1);
1717 if (index >= size)
1718 break;
1719 mvumi_handle_hotplug(mhba, index, DEVICE_OFFLINE);
1720 } while (1);
1721 mutex_unlock(&mhba->sas_discovery_mutex);
1722 }
1723 }
1724
1725 static void mvumi_notification(struct mvumi_hba *mhba, u8 msg, void *buffer)
1726 {
1727 if (msg == APICDB1_EVENT_GETEVENT) {
1728 int i, count;
1729 struct mvumi_driver_event *param = NULL;
1730 struct mvumi_event_req *er = buffer;
1731 count = er->count;
1732 if (count > MAX_EVENTS_RETURNED) {
1733 dev_err(&mhba->pdev->dev, "event count[0x%x] is bigger"
1734 " than max event count[0x%x].\n",
1735 count, MAX_EVENTS_RETURNED);
1736 return;
1737 }
1738 for (i = 0; i < count; i++) {
1739 param = &er->events[i];
1740 mvumi_show_event(mhba, param);
1741 }
1742 } else if (msg == APICDB1_HOST_GETEVENT) {
1743 mvumi_proc_msg(mhba, buffer);
1744 }
1745 }
1746
1747 static int mvumi_get_event(struct mvumi_hba *mhba, unsigned char msg)
1748 {
1749 struct mvumi_cmd *cmd;
1750 struct mvumi_msg_frame *frame;
1751
1752 cmd = mvumi_create_internal_cmd(mhba, 512);
1753 if (!cmd)
1754 return -1;
1755 cmd->scmd = NULL;
1756 cmd->cmd_status = REQ_STATUS_PENDING;
1757 atomic_set(&cmd->sync_cmd, 0);
1758 frame = cmd->frame;
1759 frame->device_id = 0;
1760 frame->cmd_flag = CMD_FLAG_DATA_IN;
1761 frame->req_function = CL_FUN_SCSI_CMD;
1762 frame->cdb_length = MAX_COMMAND_SIZE;
1763 frame->data_transfer_length = sizeof(struct mvumi_event_req);
1764 memset(frame->cdb, 0, MAX_COMMAND_SIZE);
1765 frame->cdb[0] = APICDB0_EVENT;
1766 frame->cdb[1] = msg;
1767 mvumi_issue_blocked_cmd(mhba, cmd);
1768
1769 if (cmd->cmd_status != SAM_STAT_GOOD)
1770 dev_err(&mhba->pdev->dev, "get event failed, status=0x%x.\n",
1771 cmd->cmd_status);
1772 else
1773 mvumi_notification(mhba, cmd->frame->cdb[1], cmd->data_buf);
1774
1775 mvumi_delete_internal_cmd(mhba, cmd);
1776 return 0;
1777 }
1778
1779 static void mvumi_scan_events(struct work_struct *work)
1780 {
1781 struct mvumi_events_wq *mu_ev =
1782 container_of(work, struct mvumi_events_wq, work_q);
1783
1784 mvumi_get_event(mu_ev->mhba, mu_ev->event);
1785 kfree(mu_ev);
1786 }
1787
1788 static void mvumi_launch_events(struct mvumi_hba *mhba, u32 isr_status)
1789 {
1790 struct mvumi_events_wq *mu_ev;
1791
1792 while (isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY)) {
1793 if (isr_status & DRBL_BUS_CHANGE) {
1794 atomic_inc(&mhba->pnp_count);
1795 wake_up_process(mhba->dm_thread);
1796 isr_status &= ~(DRBL_BUS_CHANGE);
1797 continue;
1798 }
1799
1800 mu_ev = kzalloc(sizeof(*mu_ev), GFP_ATOMIC);
1801 if (mu_ev) {
1802 INIT_WORK(&mu_ev->work_q, mvumi_scan_events);
1803 mu_ev->mhba = mhba;
1804 mu_ev->event = APICDB1_EVENT_GETEVENT;
1805 isr_status &= ~(DRBL_EVENT_NOTIFY);
1806 mu_ev->param = NULL;
1807 schedule_work(&mu_ev->work_q);
1808 }
1809 }
1810 }
1811
1812 static void mvumi_handle_clob(struct mvumi_hba *mhba)
1813 {
1814 struct mvumi_rsp_frame *ob_frame;
1815 struct mvumi_cmd *cmd;
1816 struct mvumi_ob_data *pool;
1817
1818 while (!list_empty(&mhba->free_ob_list)) {
1819 pool = list_first_entry(&mhba->free_ob_list,
1820 struct mvumi_ob_data, list);
1821 list_del_init(&pool->list);
1822 list_add_tail(&pool->list, &mhba->ob_data_list);
1823
1824 ob_frame = (struct mvumi_rsp_frame *) &pool->data[0];
1825 cmd = mhba->tag_cmd[ob_frame->tag];
1826
1827 atomic_dec(&mhba->fw_outstanding);
1828 mhba->tag_cmd[ob_frame->tag] = 0;
1829 tag_release_one(mhba, &mhba->tag_pool, ob_frame->tag);
1830 if (cmd->scmd)
1831 mvumi_complete_cmd(mhba, cmd, ob_frame);
1832 else
1833 mvumi_complete_internal_cmd(mhba, cmd, ob_frame);
1834 }
1835 mhba->instancet->fire_cmd(mhba, NULL);
1836 }
1837
1838 static irqreturn_t mvumi_isr_handler(int irq, void *devp)
1839 {
1840 struct mvumi_hba *mhba = (struct mvumi_hba *) devp;
1841 unsigned long flags;
1842
1843 spin_lock_irqsave(mhba->shost->host_lock, flags);
1844 if (unlikely(mhba->instancet->clear_intr(mhba) || !mhba->global_isr)) {
1845 spin_unlock_irqrestore(mhba->shost->host_lock, flags);
1846 return IRQ_NONE;
1847 }
1848
1849 if (mhba->global_isr & mhba->regs->int_dl_cpu2pciea) {
1850 if (mhba->isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY))
1851 mvumi_launch_events(mhba, mhba->isr_status);
1852 if (mhba->isr_status & DRBL_HANDSHAKE_ISR) {
1853 dev_warn(&mhba->pdev->dev, "enter handshake again!\n");
1854 mvumi_handshake(mhba);
1855 }
1856
1857 }
1858
1859 if (mhba->global_isr & mhba->regs->int_comaout)
1860 mvumi_receive_ob_list_entry(mhba);
1861
1862 mhba->global_isr = 0;
1863 mhba->isr_status = 0;
1864 if (mhba->fw_state == FW_STATE_STARTED)
1865 mvumi_handle_clob(mhba);
1866 spin_unlock_irqrestore(mhba->shost->host_lock, flags);
1867 return IRQ_HANDLED;
1868 }
1869
1870 static enum mvumi_qc_result mvumi_send_command(struct mvumi_hba *mhba,
1871 struct mvumi_cmd *cmd)
1872 {
1873 void *ib_entry;
1874 struct mvumi_msg_frame *ib_frame;
1875 unsigned int frame_len;
1876
1877 ib_frame = cmd->frame;
1878 if (unlikely(mhba->fw_state != FW_STATE_STARTED)) {
1879 dev_dbg(&mhba->pdev->dev, "firmware not ready.\n");
1880 return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
1881 }
1882 if (tag_is_empty(&mhba->tag_pool)) {
1883 dev_dbg(&mhba->pdev->dev, "no free tag.\n");
1884 return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
1885 }
1886 mvumi_get_ib_list_entry(mhba, &ib_entry);
1887
1888 cmd->frame->tag = tag_get_one(mhba, &mhba->tag_pool);
1889 cmd->frame->request_id = mhba->io_seq++;
1890 cmd->request_id = cmd->frame->request_id;
1891 mhba->tag_cmd[cmd->frame->tag] = cmd;
1892 frame_len = sizeof(*ib_frame) - 4 +
1893 ib_frame->sg_counts * sizeof(struct mvumi_sgl);
1894 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
1895 struct mvumi_dyn_list_entry *dle;
1896 dle = ib_entry;
1897 dle->src_low_addr =
1898 cpu_to_le32(lower_32_bits(cmd->frame_phys));
1899 dle->src_high_addr =
1900 cpu_to_le32(upper_32_bits(cmd->frame_phys));
1901 dle->if_length = (frame_len >> 2) & 0xFFF;
1902 } else {
1903 memcpy(ib_entry, ib_frame, frame_len);
1904 }
1905 return MV_QUEUE_COMMAND_RESULT_SENT;
1906 }
1907
1908 static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd)
1909 {
1910 unsigned short num_of_cl_sent = 0;
1911 unsigned int count;
1912 enum mvumi_qc_result result;
1913
1914 if (cmd)
1915 list_add_tail(&cmd->queue_pointer, &mhba->waiting_req_list);
1916 count = mhba->instancet->check_ib_list(mhba);
1917 if (list_empty(&mhba->waiting_req_list) || !count)
1918 return;
1919
1920 do {
1921 cmd = list_first_entry(&mhba->waiting_req_list,
1922 struct mvumi_cmd, queue_pointer);
1923 list_del_init(&cmd->queue_pointer);
1924 result = mvumi_send_command(mhba, cmd);
1925 switch (result) {
1926 case MV_QUEUE_COMMAND_RESULT_SENT:
1927 num_of_cl_sent++;
1928 break;
1929 case MV_QUEUE_COMMAND_RESULT_NO_RESOURCE:
1930 list_add(&cmd->queue_pointer, &mhba->waiting_req_list);
1931 if (num_of_cl_sent > 0)
1932 mvumi_send_ib_list_entry(mhba);
1933
1934 return;
1935 }
1936 } while (!list_empty(&mhba->waiting_req_list) && count--);
1937
1938 if (num_of_cl_sent > 0)
1939 mvumi_send_ib_list_entry(mhba);
1940 }
1941
1942 /**
1943 * mvumi_enable_intr - Enables interrupts
1944 * @mhba: Adapter soft state
1945 */
1946 static void mvumi_enable_intr(struct mvumi_hba *mhba)
1947 {
1948 unsigned int mask;
1949 struct mvumi_hw_regs *regs = mhba->regs;
1950
1951 iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg);
1952 mask = ioread32(regs->enpointa_mask_reg);
1953 mask |= regs->int_dl_cpu2pciea | regs->int_comaout | regs->int_comaerr;
1954 iowrite32(mask, regs->enpointa_mask_reg);
1955 }
1956
1957 /**
1958 * mvumi_disable_intr -Disables interrupt
1959 * @mhba: Adapter soft state
1960 */
1961 static void mvumi_disable_intr(struct mvumi_hba *mhba)
1962 {
1963 unsigned int mask;
1964 struct mvumi_hw_regs *regs = mhba->regs;
1965
1966 iowrite32(0, regs->arm_to_pciea_mask_reg);
1967 mask = ioread32(regs->enpointa_mask_reg);
1968 mask &= ~(regs->int_dl_cpu2pciea | regs->int_comaout |
1969 regs->int_comaerr);
1970 iowrite32(mask, regs->enpointa_mask_reg);
1971 }
1972
1973 static int mvumi_clear_intr(void *extend)
1974 {
1975 struct mvumi_hba *mhba = (struct mvumi_hba *) extend;
1976 unsigned int status, isr_status = 0, tmp = 0;
1977 struct mvumi_hw_regs *regs = mhba->regs;
1978
1979 status = ioread32(regs->main_int_cause_reg);
1980 if (!(status & regs->int_mu) || status == 0xFFFFFFFF)
1981 return 1;
1982 if (unlikely(status & regs->int_comaerr)) {
1983 tmp = ioread32(regs->outb_isr_cause);
1984 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) {
1985 if (tmp & regs->clic_out_err) {
1986 iowrite32(tmp & regs->clic_out_err,
1987 regs->outb_isr_cause);
1988 }
1989 } else {
1990 if (tmp & (regs->clic_in_err | regs->clic_out_err))
1991 iowrite32(tmp & (regs->clic_in_err |
1992 regs->clic_out_err),
1993 regs->outb_isr_cause);
1994 }
1995 status ^= mhba->regs->int_comaerr;
1996 /* inbound or outbound parity error, command will timeout */
1997 }
1998 if (status & regs->int_comaout) {
1999 tmp = ioread32(regs->outb_isr_cause);
2000 if (tmp & regs->clic_irq)
2001 iowrite32(tmp & regs->clic_irq, regs->outb_isr_cause);
2002 }
2003 if (status & regs->int_dl_cpu2pciea) {
2004 isr_status = ioread32(regs->arm_to_pciea_drbl_reg);
2005 if (isr_status)
2006 iowrite32(isr_status, regs->arm_to_pciea_drbl_reg);
2007 }
2008
2009 mhba->global_isr = status;
2010 mhba->isr_status = isr_status;
2011
2012 return 0;
2013 }
2014
2015 /**
2016 * mvumi_read_fw_status_reg - returns the current FW status value
2017 * @mhba: Adapter soft state
2018 */
2019 static unsigned int mvumi_read_fw_status_reg(struct mvumi_hba *mhba)
2020 {
2021 unsigned int status;
2022
2023 status = ioread32(mhba->regs->arm_to_pciea_drbl_reg);
2024 if (status)
2025 iowrite32(status, mhba->regs->arm_to_pciea_drbl_reg);
2026 return status;
2027 }
2028
2029 static struct mvumi_instance_template mvumi_instance_9143 = {
2030 .fire_cmd = mvumi_fire_cmd,
2031 .enable_intr = mvumi_enable_intr,
2032 .disable_intr = mvumi_disable_intr,
2033 .clear_intr = mvumi_clear_intr,
2034 .read_fw_status_reg = mvumi_read_fw_status_reg,
2035 .check_ib_list = mvumi_check_ib_list_9143,
2036 .check_ob_list = mvumi_check_ob_list_9143,
2037 .reset_host = mvumi_reset_host_9143,
2038 };
2039
2040 static struct mvumi_instance_template mvumi_instance_9580 = {
2041 .fire_cmd = mvumi_fire_cmd,
2042 .enable_intr = mvumi_enable_intr,
2043 .disable_intr = mvumi_disable_intr,
2044 .clear_intr = mvumi_clear_intr,
2045 .read_fw_status_reg = mvumi_read_fw_status_reg,
2046 .check_ib_list = mvumi_check_ib_list_9580,
2047 .check_ob_list = mvumi_check_ob_list_9580,
2048 .reset_host = mvumi_reset_host_9580,
2049 };
2050
2051 static int mvumi_slave_configure(struct scsi_device *sdev)
2052 {
2053 struct mvumi_hba *mhba;
2054 unsigned char bitcount = sizeof(unsigned char) * 8;
2055
2056 mhba = (struct mvumi_hba *) sdev->host->hostdata;
2057 if (sdev->id >= mhba->max_target_id)
2058 return -EINVAL;
2059
2060 mhba->target_map[sdev->id / bitcount] |= (1 << (sdev->id % bitcount));
2061 return 0;
2062 }
2063
2064 /**
2065 * mvumi_build_frame - Prepares a direct cdb (DCDB) command
2066 * @mhba: Adapter soft state
2067 * @scmd: SCSI command
2068 * @cmd: Command to be prepared in
2069 *
2070 * This function prepares CDB commands. These are typcially pass-through
2071 * commands to the devices.
2072 */
2073 static unsigned char mvumi_build_frame(struct mvumi_hba *mhba,
2074 struct scsi_cmnd *scmd, struct mvumi_cmd *cmd)
2075 {
2076 struct mvumi_msg_frame *pframe;
2077
2078 cmd->scmd = scmd;
2079 cmd->cmd_status = REQ_STATUS_PENDING;
2080 pframe = cmd->frame;
2081 pframe->device_id = ((unsigned short) scmd->device->id) |
2082 (((unsigned short) scmd->device->lun) << 8);
2083 pframe->cmd_flag = 0;
2084
2085 switch (scmd->sc_data_direction) {
2086 case DMA_NONE:
2087 pframe->cmd_flag |= CMD_FLAG_NON_DATA;
2088 break;
2089 case DMA_FROM_DEVICE:
2090 pframe->cmd_flag |= CMD_FLAG_DATA_IN;
2091 break;
2092 case DMA_TO_DEVICE:
2093 pframe->cmd_flag |= CMD_FLAG_DATA_OUT;
2094 break;
2095 case DMA_BIDIRECTIONAL:
2096 default:
2097 dev_warn(&mhba->pdev->dev, "unexpected data direction[%d] "
2098 "cmd[0x%x]\n", scmd->sc_data_direction, scmd->cmnd[0]);
2099 goto error;
2100 }
2101
2102 pframe->cdb_length = scmd->cmd_len;
2103 memcpy(pframe->cdb, scmd->cmnd, pframe->cdb_length);
2104 pframe->req_function = CL_FUN_SCSI_CMD;
2105 if (scsi_bufflen(scmd)) {
2106 if (mvumi_make_sgl(mhba, scmd, &pframe->payload[0],
2107 &pframe->sg_counts))
2108 goto error;
2109
2110 pframe->data_transfer_length = scsi_bufflen(scmd);
2111 } else {
2112 pframe->sg_counts = 0;
2113 pframe->data_transfer_length = 0;
2114 }
2115 return 0;
2116
2117 error:
2118 scmd->result = (DID_OK << 16) | (DRIVER_SENSE << 24) |
2119 SAM_STAT_CHECK_CONDITION;
2120 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x24,
2121 0);
2122 return -1;
2123 }
2124
2125 /**
2126 * mvumi_queue_command - Queue entry point
2127 * @scmd: SCSI command to be queued
2128 * @done: Callback entry point
2129 */
2130 static int mvumi_queue_command(struct Scsi_Host *shost,
2131 struct scsi_cmnd *scmd)
2132 {
2133 struct mvumi_cmd *cmd;
2134 struct mvumi_hba *mhba;
2135 unsigned long irq_flags;
2136
2137 spin_lock_irqsave(shost->host_lock, irq_flags);
2138 scsi_cmd_get_serial(shost, scmd);
2139
2140 mhba = (struct mvumi_hba *) shost->hostdata;
2141 scmd->result = 0;
2142 cmd = mvumi_get_cmd(mhba);
2143 if (unlikely(!cmd)) {
2144 spin_unlock_irqrestore(shost->host_lock, irq_flags);
2145 return SCSI_MLQUEUE_HOST_BUSY;
2146 }
2147
2148 if (unlikely(mvumi_build_frame(mhba, scmd, cmd)))
2149 goto out_return_cmd;
2150
2151 cmd->scmd = scmd;
2152 scmd->SCp.ptr = (char *) cmd;
2153 mhba->instancet->fire_cmd(mhba, cmd);
2154 spin_unlock_irqrestore(shost->host_lock, irq_flags);
2155 return 0;
2156
2157 out_return_cmd:
2158 mvumi_return_cmd(mhba, cmd);
2159 scmd->scsi_done(scmd);
2160 spin_unlock_irqrestore(shost->host_lock, irq_flags);
2161 return 0;
2162 }
2163
2164 static enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd)
2165 {
2166 struct mvumi_cmd *cmd = (struct mvumi_cmd *) scmd->SCp.ptr;
2167 struct Scsi_Host *host = scmd->device->host;
2168 struct mvumi_hba *mhba = shost_priv(host);
2169 unsigned long flags;
2170
2171 spin_lock_irqsave(mhba->shost->host_lock, flags);
2172
2173 if (mhba->tag_cmd[cmd->frame->tag]) {
2174 mhba->tag_cmd[cmd->frame->tag] = 0;
2175 tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
2176 }
2177 if (!list_empty(&cmd->queue_pointer))
2178 list_del_init(&cmd->queue_pointer);
2179 else
2180 atomic_dec(&mhba->fw_outstanding);
2181
2182 scmd->result = (DRIVER_INVALID << 24) | (DID_ABORT << 16);
2183 scmd->SCp.ptr = NULL;
2184 if (scsi_bufflen(scmd)) {
2185 if (scsi_sg_count(scmd)) {
2186 pci_unmap_sg(mhba->pdev,
2187 scsi_sglist(scmd),
2188 scsi_sg_count(scmd),
2189 (int)scmd->sc_data_direction);
2190 } else {
2191 pci_unmap_single(mhba->pdev,
2192 scmd->SCp.dma_handle,
2193 scsi_bufflen(scmd),
2194 (int)scmd->sc_data_direction);
2195
2196 scmd->SCp.dma_handle = 0;
2197 }
2198 }
2199 mvumi_return_cmd(mhba, cmd);
2200 spin_unlock_irqrestore(mhba->shost->host_lock, flags);
2201
2202 return BLK_EH_NOT_HANDLED;
2203 }
2204
2205 static int
2206 mvumi_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2207 sector_t capacity, int geom[])
2208 {
2209 int heads, sectors;
2210 sector_t cylinders;
2211 unsigned long tmp;
2212
2213 heads = 64;
2214 sectors = 32;
2215 tmp = heads * sectors;
2216 cylinders = capacity;
2217 sector_div(cylinders, tmp);
2218
2219 if (capacity >= 0x200000) {
2220 heads = 255;
2221 sectors = 63;
2222 tmp = heads * sectors;
2223 cylinders = capacity;
2224 sector_div(cylinders, tmp);
2225 }
2226 geom[0] = heads;
2227 geom[1] = sectors;
2228 geom[2] = cylinders;
2229
2230 return 0;
2231 }
2232
2233 static struct scsi_host_template mvumi_template = {
2234
2235 .module = THIS_MODULE,
2236 .name = "Marvell Storage Controller",
2237 .slave_configure = mvumi_slave_configure,
2238 .queuecommand = mvumi_queue_command,
2239 .eh_timed_out = mvumi_timed_out,
2240 .eh_host_reset_handler = mvumi_host_reset,
2241 .bios_param = mvumi_bios_param,
2242 .this_id = -1,
2243 };
2244
2245 static int mvumi_cfg_hw_reg(struct mvumi_hba *mhba)
2246 {
2247 void *base = NULL;
2248 struct mvumi_hw_regs *regs;
2249
2250 switch (mhba->pdev->device) {
2251 case PCI_DEVICE_ID_MARVELL_MV9143:
2252 mhba->mmio = mhba->base_addr[0];
2253 base = mhba->mmio;
2254 if (!mhba->regs) {
2255 mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL);
2256 if (mhba->regs == NULL)
2257 return -ENOMEM;
2258 }
2259 regs = mhba->regs;
2260
2261 /* For Arm */
2262 regs->ctrl_sts_reg = base + 0x20104;
2263 regs->rstoutn_mask_reg = base + 0x20108;
2264 regs->sys_soft_rst_reg = base + 0x2010C;
2265 regs->main_int_cause_reg = base + 0x20200;
2266 regs->enpointa_mask_reg = base + 0x2020C;
2267 regs->rstoutn_en_reg = base + 0xF1400;
2268 /* For Doorbell */
2269 regs->pciea_to_arm_drbl_reg = base + 0x20400;
2270 regs->arm_to_pciea_drbl_reg = base + 0x20408;
2271 regs->arm_to_pciea_mask_reg = base + 0x2040C;
2272 regs->pciea_to_arm_msg0 = base + 0x20430;
2273 regs->pciea_to_arm_msg1 = base + 0x20434;
2274 regs->arm_to_pciea_msg0 = base + 0x20438;
2275 regs->arm_to_pciea_msg1 = base + 0x2043C;
2276
2277 /* For Message Unit */
2278
2279 regs->inb_aval_count_basel = base + 0x508;
2280 regs->inb_aval_count_baseh = base + 0x50C;
2281 regs->inb_write_pointer = base + 0x518;
2282 regs->inb_read_pointer = base + 0x51C;
2283 regs->outb_coal_cfg = base + 0x568;
2284 regs->outb_copy_basel = base + 0x5B0;
2285 regs->outb_copy_baseh = base + 0x5B4;
2286 regs->outb_copy_pointer = base + 0x544;
2287 regs->outb_read_pointer = base + 0x548;
2288 regs->outb_isr_cause = base + 0x560;
2289 regs->outb_coal_cfg = base + 0x568;
2290 /* Bit setting for HW */
2291 regs->int_comaout = 1 << 8;
2292 regs->int_comaerr = 1 << 6;
2293 regs->int_dl_cpu2pciea = 1 << 1;
2294 regs->cl_pointer_toggle = 1 << 12;
2295 regs->clic_irq = 1 << 1;
2296 regs->clic_in_err = 1 << 8;
2297 regs->clic_out_err = 1 << 12;
2298 regs->cl_slot_num_mask = 0xFFF;
2299 regs->int_drbl_int_mask = 0x3FFFFFFF;
2300 regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout |
2301 regs->int_comaerr;
2302 break;
2303 case PCI_DEVICE_ID_MARVELL_MV9580:
2304 mhba->mmio = mhba->base_addr[2];
2305 base = mhba->mmio;
2306 if (!mhba->regs) {
2307 mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL);
2308 if (mhba->regs == NULL)
2309 return -ENOMEM;
2310 }
2311 regs = mhba->regs;
2312 /* For Arm */
2313 regs->ctrl_sts_reg = base + 0x20104;
2314 regs->rstoutn_mask_reg = base + 0x1010C;
2315 regs->sys_soft_rst_reg = base + 0x10108;
2316 regs->main_int_cause_reg = base + 0x10200;
2317 regs->enpointa_mask_reg = base + 0x1020C;
2318 regs->rstoutn_en_reg = base + 0xF1400;
2319
2320 /* For Doorbell */
2321 regs->pciea_to_arm_drbl_reg = base + 0x10460;
2322 regs->arm_to_pciea_drbl_reg = base + 0x10480;
2323 regs->arm_to_pciea_mask_reg = base + 0x10484;
2324 regs->pciea_to_arm_msg0 = base + 0x10400;
2325 regs->pciea_to_arm_msg1 = base + 0x10404;
2326 regs->arm_to_pciea_msg0 = base + 0x10420;
2327 regs->arm_to_pciea_msg1 = base + 0x10424;
2328
2329 /* For reset*/
2330 regs->reset_request = base + 0x10108;
2331 regs->reset_enable = base + 0x1010c;
2332
2333 /* For Message Unit */
2334 regs->inb_aval_count_basel = base + 0x4008;
2335 regs->inb_aval_count_baseh = base + 0x400C;
2336 regs->inb_write_pointer = base + 0x4018;
2337 regs->inb_read_pointer = base + 0x401C;
2338 regs->outb_copy_basel = base + 0x4058;
2339 regs->outb_copy_baseh = base + 0x405C;
2340 regs->outb_copy_pointer = base + 0x406C;
2341 regs->outb_read_pointer = base + 0x4070;
2342 regs->outb_coal_cfg = base + 0x4080;
2343 regs->outb_isr_cause = base + 0x4088;
2344 /* Bit setting for HW */
2345 regs->int_comaout = 1 << 4;
2346 regs->int_dl_cpu2pciea = 1 << 12;
2347 regs->int_comaerr = 1 << 29;
2348 regs->cl_pointer_toggle = 1 << 14;
2349 regs->cl_slot_num_mask = 0x3FFF;
2350 regs->clic_irq = 1 << 0;
2351 regs->clic_out_err = 1 << 1;
2352 regs->int_drbl_int_mask = 0x3FFFFFFF;
2353 regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout;
2354 break;
2355 default:
2356 return -1;
2357 break;
2358 }
2359
2360 return 0;
2361 }
2362
2363 /**
2364 * mvumi_init_fw - Initializes the FW
2365 * @mhba: Adapter soft state
2366 *
2367 * This is the main function for initializing firmware.
2368 */
2369 static int mvumi_init_fw(struct mvumi_hba *mhba)
2370 {
2371 int ret = 0;
2372
2373 if (pci_request_regions(mhba->pdev, MV_DRIVER_NAME)) {
2374 dev_err(&mhba->pdev->dev, "IO memory region busy!\n");
2375 return -EBUSY;
2376 }
2377 ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr);
2378 if (ret)
2379 goto fail_ioremap;
2380
2381 switch (mhba->pdev->device) {
2382 case PCI_DEVICE_ID_MARVELL_MV9143:
2383 mhba->instancet = &mvumi_instance_9143;
2384 mhba->io_seq = 0;
2385 mhba->max_sge = MVUMI_MAX_SG_ENTRY;
2386 mhba->request_id_enabled = 1;
2387 break;
2388 case PCI_DEVICE_ID_MARVELL_MV9580:
2389 mhba->instancet = &mvumi_instance_9580;
2390 mhba->io_seq = 0;
2391 mhba->max_sge = MVUMI_MAX_SG_ENTRY;
2392 break;
2393 default:
2394 dev_err(&mhba->pdev->dev, "device 0x%x not supported!\n",
2395 mhba->pdev->device);
2396 mhba->instancet = NULL;
2397 ret = -EINVAL;
2398 goto fail_alloc_mem;
2399 }
2400 dev_dbg(&mhba->pdev->dev, "device id : %04X is found.\n",
2401 mhba->pdev->device);
2402 ret = mvumi_cfg_hw_reg(mhba);
2403 if (ret) {
2404 dev_err(&mhba->pdev->dev,
2405 "failed to allocate memory for reg\n");
2406 ret = -ENOMEM;
2407 goto fail_alloc_mem;
2408 }
2409 mhba->handshake_page = pci_alloc_consistent(mhba->pdev, HSP_MAX_SIZE,
2410 &mhba->handshake_page_phys);
2411 if (!mhba->handshake_page) {
2412 dev_err(&mhba->pdev->dev,
2413 "failed to allocate memory for handshake\n");
2414 ret = -ENOMEM;
2415 goto fail_alloc_page;
2416 }
2417
2418 if (mvumi_start(mhba)) {
2419 ret = -EINVAL;
2420 goto fail_ready_state;
2421 }
2422 ret = mvumi_alloc_cmds(mhba);
2423 if (ret)
2424 goto fail_ready_state;
2425
2426 return 0;
2427
2428 fail_ready_state:
2429 mvumi_release_mem_resource(mhba);
2430 pci_free_consistent(mhba->pdev, HSP_MAX_SIZE,
2431 mhba->handshake_page, mhba->handshake_page_phys);
2432 fail_alloc_page:
2433 kfree(mhba->regs);
2434 fail_alloc_mem:
2435 mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
2436 fail_ioremap:
2437 pci_release_regions(mhba->pdev);
2438
2439 return ret;
2440 }
2441
2442 /**
2443 * mvumi_io_attach - Attaches this driver to SCSI mid-layer
2444 * @mhba: Adapter soft state
2445 */
2446 static int mvumi_io_attach(struct mvumi_hba *mhba)
2447 {
2448 struct Scsi_Host *host = mhba->shost;
2449 struct scsi_device *sdev = NULL;
2450 int ret;
2451 unsigned int max_sg = (mhba->ib_max_size + 4 -
2452 sizeof(struct mvumi_msg_frame)) / sizeof(struct mvumi_sgl);
2453
2454 host->irq = mhba->pdev->irq;
2455 host->unique_id = mhba->unique_id;
2456 host->can_queue = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
2457 host->sg_tablesize = mhba->max_sge > max_sg ? max_sg : mhba->max_sge;
2458 host->max_sectors = mhba->max_transfer_size / 512;
2459 host->cmd_per_lun = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
2460 host->max_id = mhba->max_target_id;
2461 host->max_cmd_len = MAX_COMMAND_SIZE;
2462
2463 ret = scsi_add_host(host, &mhba->pdev->dev);
2464 if (ret) {
2465 dev_err(&mhba->pdev->dev, "scsi_add_host failed\n");
2466 return ret;
2467 }
2468 mhba->fw_flag |= MVUMI_FW_ATTACH;
2469
2470 mutex_lock(&mhba->sas_discovery_mutex);
2471 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
2472 ret = scsi_add_device(host, 0, mhba->max_target_id - 1, 0);
2473 else
2474 ret = 0;
2475 if (ret) {
2476 dev_err(&mhba->pdev->dev, "add virtual device failed\n");
2477 mutex_unlock(&mhba->sas_discovery_mutex);
2478 goto fail_add_device;
2479 }
2480
2481 mhba->dm_thread = kthread_create(mvumi_rescan_bus,
2482 mhba, "mvumi_scanthread");
2483 if (IS_ERR(mhba->dm_thread)) {
2484 dev_err(&mhba->pdev->dev,
2485 "failed to create device scan thread\n");
2486 mutex_unlock(&mhba->sas_discovery_mutex);
2487 goto fail_create_thread;
2488 }
2489 atomic_set(&mhba->pnp_count, 1);
2490 wake_up_process(mhba->dm_thread);
2491
2492 mutex_unlock(&mhba->sas_discovery_mutex);
2493 return 0;
2494
2495 fail_create_thread:
2496 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
2497 sdev = scsi_device_lookup(mhba->shost, 0,
2498 mhba->max_target_id - 1, 0);
2499 if (sdev) {
2500 scsi_remove_device(sdev);
2501 scsi_device_put(sdev);
2502 }
2503 fail_add_device:
2504 scsi_remove_host(mhba->shost);
2505 return ret;
2506 }
2507
2508 /**
2509 * mvumi_probe_one - PCI hotplug entry point
2510 * @pdev: PCI device structure
2511 * @id: PCI ids of supported hotplugged adapter
2512 */
2513 static int mvumi_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2514 {
2515 struct Scsi_Host *host;
2516 struct mvumi_hba *mhba;
2517 int ret;
2518
2519 dev_dbg(&pdev->dev, " %#4.04x:%#4.04x:%#4.04x:%#4.04x: ",
2520 pdev->vendor, pdev->device, pdev->subsystem_vendor,
2521 pdev->subsystem_device);
2522
2523 ret = pci_enable_device(pdev);
2524 if (ret)
2525 return ret;
2526
2527 pci_set_master(pdev);
2528
2529 if (IS_DMA64) {
2530 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2531 if (ret) {
2532 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2533 if (ret)
2534 goto fail_set_dma_mask;
2535 }
2536 } else {
2537 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2538 if (ret)
2539 goto fail_set_dma_mask;
2540 }
2541
2542 host = scsi_host_alloc(&mvumi_template, sizeof(*mhba));
2543 if (!host) {
2544 dev_err(&pdev->dev, "scsi_host_alloc failed\n");
2545 ret = -ENOMEM;
2546 goto fail_alloc_instance;
2547 }
2548 mhba = shost_priv(host);
2549
2550 INIT_LIST_HEAD(&mhba->cmd_pool);
2551 INIT_LIST_HEAD(&mhba->ob_data_list);
2552 INIT_LIST_HEAD(&mhba->free_ob_list);
2553 INIT_LIST_HEAD(&mhba->res_list);
2554 INIT_LIST_HEAD(&mhba->waiting_req_list);
2555 mutex_init(&mhba->device_lock);
2556 INIT_LIST_HEAD(&mhba->mhba_dev_list);
2557 INIT_LIST_HEAD(&mhba->shost_dev_list);
2558 atomic_set(&mhba->fw_outstanding, 0);
2559 init_waitqueue_head(&mhba->int_cmd_wait_q);
2560 mutex_init(&mhba->sas_discovery_mutex);
2561
2562 mhba->pdev = pdev;
2563 mhba->shost = host;
2564 mhba->unique_id = pdev->bus->number << 8 | pdev->devfn;
2565
2566 ret = mvumi_init_fw(mhba);
2567 if (ret)
2568 goto fail_init_fw;
2569
2570 ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED,
2571 "mvumi", mhba);
2572 if (ret) {
2573 dev_err(&pdev->dev, "failed to register IRQ\n");
2574 goto fail_init_irq;
2575 }
2576
2577 mhba->instancet->enable_intr(mhba);
2578 pci_set_drvdata(pdev, mhba);
2579
2580 ret = mvumi_io_attach(mhba);
2581 if (ret)
2582 goto fail_io_attach;
2583
2584 mvumi_backup_bar_addr(mhba);
2585 dev_dbg(&pdev->dev, "probe mvumi driver successfully.\n");
2586
2587 return 0;
2588
2589 fail_io_attach:
2590 mhba->instancet->disable_intr(mhba);
2591 free_irq(mhba->pdev->irq, mhba);
2592 fail_init_irq:
2593 mvumi_release_fw(mhba);
2594 fail_init_fw:
2595 scsi_host_put(host);
2596
2597 fail_alloc_instance:
2598 fail_set_dma_mask:
2599 pci_disable_device(pdev);
2600
2601 return ret;
2602 }
2603
2604 static void mvumi_detach_one(struct pci_dev *pdev)
2605 {
2606 struct Scsi_Host *host;
2607 struct mvumi_hba *mhba;
2608
2609 mhba = pci_get_drvdata(pdev);
2610 if (mhba->dm_thread) {
2611 kthread_stop(mhba->dm_thread);
2612 mhba->dm_thread = NULL;
2613 }
2614
2615 mvumi_detach_devices(mhba);
2616 host = mhba->shost;
2617 scsi_remove_host(mhba->shost);
2618 mvumi_flush_cache(mhba);
2619
2620 mhba->instancet->disable_intr(mhba);
2621 free_irq(mhba->pdev->irq, mhba);
2622 mvumi_release_fw(mhba);
2623 scsi_host_put(host);
2624 pci_disable_device(pdev);
2625 dev_dbg(&pdev->dev, "driver is removed!\n");
2626 }
2627
2628 /**
2629 * mvumi_shutdown - Shutdown entry point
2630 * @device: Generic device structure
2631 */
2632 static void mvumi_shutdown(struct pci_dev *pdev)
2633 {
2634 struct mvumi_hba *mhba = pci_get_drvdata(pdev);
2635
2636 mvumi_flush_cache(mhba);
2637 }
2638
2639 static int __maybe_unused mvumi_suspend(struct pci_dev *pdev, pm_message_t state)
2640 {
2641 struct mvumi_hba *mhba = NULL;
2642
2643 mhba = pci_get_drvdata(pdev);
2644 mvumi_flush_cache(mhba);
2645
2646 pci_set_drvdata(pdev, mhba);
2647 mhba->instancet->disable_intr(mhba);
2648 free_irq(mhba->pdev->irq, mhba);
2649 mvumi_unmap_pci_addr(pdev, mhba->base_addr);
2650 pci_release_regions(pdev);
2651 pci_save_state(pdev);
2652 pci_disable_device(pdev);
2653 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2654
2655 return 0;
2656 }
2657
2658 static int __maybe_unused mvumi_resume(struct pci_dev *pdev)
2659 {
2660 int ret;
2661 struct mvumi_hba *mhba = NULL;
2662
2663 mhba = pci_get_drvdata(pdev);
2664
2665 pci_set_power_state(pdev, PCI_D0);
2666 pci_enable_wake(pdev, PCI_D0, 0);
2667 pci_restore_state(pdev);
2668
2669 ret = pci_enable_device(pdev);
2670 if (ret) {
2671 dev_err(&pdev->dev, "enable device failed\n");
2672 return ret;
2673 }
2674 pci_set_master(pdev);
2675 if (IS_DMA64) {
2676 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2677 if (ret) {
2678 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2679 if (ret)
2680 goto fail;
2681 }
2682 } else {
2683 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2684 if (ret)
2685 goto fail;
2686 }
2687 ret = pci_request_regions(mhba->pdev, MV_DRIVER_NAME);
2688 if (ret)
2689 goto fail;
2690 ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr);
2691 if (ret)
2692 goto release_regions;
2693
2694 if (mvumi_cfg_hw_reg(mhba)) {
2695 ret = -EINVAL;
2696 goto unmap_pci_addr;
2697 }
2698
2699 mhba->mmio = mhba->base_addr[0];
2700 mvumi_reset(mhba);
2701
2702 if (mvumi_start(mhba)) {
2703 ret = -EINVAL;
2704 goto unmap_pci_addr;
2705 }
2706
2707 ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED,
2708 "mvumi", mhba);
2709 if (ret) {
2710 dev_err(&pdev->dev, "failed to register IRQ\n");
2711 goto unmap_pci_addr;
2712 }
2713 mhba->instancet->enable_intr(mhba);
2714
2715 return 0;
2716
2717 unmap_pci_addr:
2718 mvumi_unmap_pci_addr(pdev, mhba->base_addr);
2719 release_regions:
2720 pci_release_regions(pdev);
2721 fail:
2722 pci_disable_device(pdev);
2723
2724 return ret;
2725 }
2726
2727 static struct pci_driver mvumi_pci_driver = {
2728
2729 .name = MV_DRIVER_NAME,
2730 .id_table = mvumi_pci_table,
2731 .probe = mvumi_probe_one,
2732 .remove = mvumi_detach_one,
2733 .shutdown = mvumi_shutdown,
2734 #ifdef CONFIG_PM
2735 .suspend = mvumi_suspend,
2736 .resume = mvumi_resume,
2737 #endif
2738 };
2739
2740 /**
2741 * mvumi_init - Driver load entry point
2742 */
2743 static int __init mvumi_init(void)
2744 {
2745 return pci_register_driver(&mvumi_pci_driver);
2746 }
2747
2748 /**
2749 * mvumi_exit - Driver unload entry point
2750 */
2751 static void __exit mvumi_exit(void)
2752 {
2753
2754 pci_unregister_driver(&mvumi_pci_driver);
2755 }
2756
2757 module_init(mvumi_init);
2758 module_exit(mvumi_exit);
2759
2760
2761
2762
2763
2764 /* LDV_COMMENT_BEGIN_MAIN */
2765 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful
2766
2767 /*###########################################################################*/
2768
2769 /*############## Driver Environment Generator 0.2 output ####################*/
2770
2771 /*###########################################################################*/
2772
2773
2774
2775 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
2776 void ldv_check_final_state(void);
2777
2778 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
2779 void ldv_check_return_value(int res);
2780
2781 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
2782 void ldv_check_return_value_probe(int res);
2783
2784 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
2785 void ldv_initialize(void);
2786
2787 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
2788 void ldv_handler_precall(void);
2789
2790 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
2791 int nondet_int(void);
2792
2793 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
2794 int LDV_IN_INTERRUPT;
2795
2796 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
2797 void ldv_main0_sequence_infinite_withcheck_stateful(void) {
2798
2799
2800
2801 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
2802 /*============================= VARIABLE DECLARATION PART =============================*/
2803 /** STRUCT: struct type: mvumi_instance_template, struct name: mvumi_instance_9143 **/
2804 /* content: static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd)*/
2805 /* LDV_COMMENT_END_PREP */
2806 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mvumi_fire_cmd" */
2807 struct mvumi_hba * var_group1;
2808 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mvumi_fire_cmd" */
2809 struct mvumi_cmd * var_group2;
2810 /* LDV_COMMENT_BEGIN_PREP */
2811 #ifdef CONFIG_PM
2812 #endif
2813 /* LDV_COMMENT_END_PREP */
2814 /* content: static void mvumi_enable_intr(struct mvumi_hba *mhba)*/
2815 /* LDV_COMMENT_END_PREP */
2816 /* LDV_COMMENT_BEGIN_PREP */
2817 #ifdef CONFIG_PM
2818 #endif
2819 /* LDV_COMMENT_END_PREP */
2820 /* content: static void mvumi_disable_intr(struct mvumi_hba *mhba)*/
2821 /* LDV_COMMENT_END_PREP */
2822 /* LDV_COMMENT_BEGIN_PREP */
2823 #ifdef CONFIG_PM
2824 #endif
2825 /* LDV_COMMENT_END_PREP */
2826 /* content: static int mvumi_clear_intr(void *extend)*/
2827 /* LDV_COMMENT_END_PREP */
2828 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mvumi_clear_intr" */
2829 void * var_mvumi_clear_intr_66_p0;
2830 /* LDV_COMMENT_BEGIN_PREP */
2831 #ifdef CONFIG_PM
2832 #endif
2833 /* LDV_COMMENT_END_PREP */
2834 /* content: static unsigned int mvumi_read_fw_status_reg(struct mvumi_hba *mhba)*/
2835 /* LDV_COMMENT_END_PREP */
2836 /* LDV_COMMENT_BEGIN_PREP */
2837 #ifdef CONFIG_PM
2838 #endif
2839 /* LDV_COMMENT_END_PREP */
2840 /* content: static unsigned int mvumi_check_ib_list_9143(struct mvumi_hba *mhba)*/
2841 /* LDV_COMMENT_END_PREP */
2842 /* LDV_COMMENT_BEGIN_PREP */
2843 #ifdef CONFIG_PM
2844 #endif
2845 /* LDV_COMMENT_END_PREP */
2846 /* content: static int mvumi_check_ob_list_9143(struct mvumi_hba *mhba, unsigned int *cur_obf, unsigned int *assign_obf_end)*/
2847 /* LDV_COMMENT_END_PREP */
2848 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mvumi_check_ob_list_9143" */
2849 unsigned int * var_mvumi_check_ob_list_9143_21_p1;
2850 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mvumi_check_ob_list_9143" */
2851 unsigned int * var_mvumi_check_ob_list_9143_21_p2;
2852 /* LDV_COMMENT_BEGIN_PREP */
2853 #ifdef CONFIG_PM
2854 #endif
2855 /* LDV_COMMENT_END_PREP */
2856 /* content: static int mvumi_reset_host_9143(struct mvumi_hba *mhba)*/
2857 /* LDV_COMMENT_END_PREP */
2858 /* LDV_COMMENT_BEGIN_PREP */
2859 #ifdef CONFIG_PM
2860 #endif
2861 /* LDV_COMMENT_END_PREP */
2862
2863 /** STRUCT: struct type: mvumi_instance_template, struct name: mvumi_instance_9580 **/
2864 /* content: static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd)*/
2865 /* LDV_COMMENT_END_PREP */
2866 /* LDV_COMMENT_BEGIN_PREP */
2867 #ifdef CONFIG_PM
2868 #endif
2869 /* LDV_COMMENT_END_PREP */
2870 /* content: static void mvumi_enable_intr(struct mvumi_hba *mhba)*/
2871 /* LDV_COMMENT_END_PREP */
2872 /* LDV_COMMENT_BEGIN_PREP */
2873 #ifdef CONFIG_PM
2874 #endif
2875 /* LDV_COMMENT_END_PREP */
2876 /* content: static void mvumi_disable_intr(struct mvumi_hba *mhba)*/
2877 /* LDV_COMMENT_END_PREP */
2878 /* LDV_COMMENT_BEGIN_PREP */
2879 #ifdef CONFIG_PM
2880 #endif
2881 /* LDV_COMMENT_END_PREP */
2882 /* content: static int mvumi_clear_intr(void *extend)*/
2883 /* LDV_COMMENT_END_PREP */
2884 /* LDV_COMMENT_BEGIN_PREP */
2885 #ifdef CONFIG_PM
2886 #endif
2887 /* LDV_COMMENT_END_PREP */
2888 /* content: static unsigned int mvumi_read_fw_status_reg(struct mvumi_hba *mhba)*/
2889 /* LDV_COMMENT_END_PREP */
2890 /* LDV_COMMENT_BEGIN_PREP */
2891 #ifdef CONFIG_PM
2892 #endif
2893 /* LDV_COMMENT_END_PREP */
2894 /* content: static unsigned int mvumi_check_ib_list_9580(struct mvumi_hba *mhba)*/
2895 /* LDV_COMMENT_END_PREP */
2896 /* LDV_COMMENT_BEGIN_PREP */
2897 #ifdef CONFIG_PM
2898 #endif
2899 /* LDV_COMMENT_END_PREP */
2900 /* content: static int mvumi_check_ob_list_9580(struct mvumi_hba *mhba, unsigned int *cur_obf, unsigned int *assign_obf_end)*/
2901 /* LDV_COMMENT_END_PREP */
2902 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mvumi_check_ob_list_9580" */
2903 unsigned int * var_mvumi_check_ob_list_9580_22_p1;
2904 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mvumi_check_ob_list_9580" */
2905 unsigned int * var_mvumi_check_ob_list_9580_22_p2;
2906 /* LDV_COMMENT_BEGIN_PREP */
2907 #ifdef CONFIG_PM
2908 #endif
2909 /* LDV_COMMENT_END_PREP */
2910 /* content: static int mvumi_reset_host_9580(struct mvumi_hba *mhba)*/
2911 /* LDV_COMMENT_END_PREP */
2912 /* LDV_COMMENT_BEGIN_PREP */
2913 #ifdef CONFIG_PM
2914 #endif
2915 /* LDV_COMMENT_END_PREP */
2916
2917 /** STRUCT: struct type: scsi_host_template, struct name: mvumi_template **/
2918 /* content: static int mvumi_slave_configure(struct scsi_device *sdev)*/
2919 /* LDV_COMMENT_END_PREP */
2920 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mvumi_slave_configure" */
2921 struct scsi_device * var_group3;
2922 /* LDV_COMMENT_BEGIN_PREP */
2923 #ifdef CONFIG_PM
2924 #endif
2925 /* LDV_COMMENT_END_PREP */
2926 /* content: static int mvumi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)*/
2927 /* LDV_COMMENT_END_PREP */
2928 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mvumi_queue_command" */
2929 struct Scsi_Host * var_group4;
2930 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mvumi_queue_command" */
2931 struct scsi_cmnd * var_group5;
2932 /* LDV_COMMENT_BEGIN_PREP */
2933 #ifdef CONFIG_PM
2934 #endif
2935 /* LDV_COMMENT_END_PREP */
2936 /* content: static enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd)*/
2937 /* LDV_COMMENT_END_PREP */
2938 /* LDV_COMMENT_BEGIN_PREP */
2939 #ifdef CONFIG_PM
2940 #endif
2941 /* LDV_COMMENT_END_PREP */
2942 /* content: static int mvumi_host_reset(struct scsi_cmnd *scmd)*/
2943 /* LDV_COMMENT_END_PREP */
2944 /* LDV_COMMENT_BEGIN_PREP */
2945 #ifdef CONFIG_PM
2946 #endif
2947 /* LDV_COMMENT_END_PREP */
2948
2949 /** STRUCT: struct type: pci_driver, struct name: mvumi_pci_driver **/
2950 /* content: static int mvumi_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)*/
2951 /* LDV_COMMENT_END_PREP */
2952 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mvumi_probe_one" */
2953 struct pci_dev * var_group6;
2954 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mvumi_probe_one" */
2955 const struct pci_device_id * var_mvumi_probe_one_75_p1;
2956 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "mvumi_probe_one" */
2957 static int res_mvumi_probe_one_75;
2958 /* LDV_COMMENT_BEGIN_PREP */
2959 #ifdef CONFIG_PM
2960 #endif
2961 /* LDV_COMMENT_END_PREP */
2962 /* content: static void mvumi_detach_one(struct pci_dev *pdev)*/
2963 /* LDV_COMMENT_END_PREP */
2964 /* LDV_COMMENT_BEGIN_PREP */
2965 #ifdef CONFIG_PM
2966 #endif
2967 /* LDV_COMMENT_END_PREP */
2968 /* content: static void mvumi_shutdown(struct pci_dev *pdev)*/
2969 /* LDV_COMMENT_END_PREP */
2970 /* LDV_COMMENT_BEGIN_PREP */
2971 #ifdef CONFIG_PM
2972 #endif
2973 /* LDV_COMMENT_END_PREP */
2974 /* content: static int __maybe_unused mvumi_suspend(struct pci_dev *pdev, pm_message_t state)*/
2975 /* LDV_COMMENT_END_PREP */
2976 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mvumi_suspend" */
2977 pm_message_t var_mvumi_suspend_78_p1;
2978 /* LDV_COMMENT_BEGIN_PREP */
2979 #ifdef CONFIG_PM
2980 #endif
2981 /* LDV_COMMENT_END_PREP */
2982 /* content: static int __maybe_unused mvumi_resume(struct pci_dev *pdev)*/
2983 /* LDV_COMMENT_END_PREP */
2984 /* LDV_COMMENT_BEGIN_PREP */
2985 #ifdef CONFIG_PM
2986 #endif
2987 /* LDV_COMMENT_END_PREP */
2988
2989 /** CALLBACK SECTION request_irq **/
2990 /* content: static irqreturn_t mvumi_isr_handler(int irq, void *devp)*/
2991 /* LDV_COMMENT_END_PREP */
2992 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mvumi_isr_handler" */
2993 int var_mvumi_isr_handler_61_p0;
2994 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mvumi_isr_handler" */
2995 void * var_mvumi_isr_handler_61_p1;
2996 /* LDV_COMMENT_BEGIN_PREP */
2997 #ifdef CONFIG_PM
2998 #endif
2999 /* LDV_COMMENT_END_PREP */
3000
3001
3002
3003
3004 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
3005 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
3006 /*============================= VARIABLE INITIALIZING PART =============================*/
3007 LDV_IN_INTERRUPT=1;
3008
3009
3010
3011
3012 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
3013 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
3014 /*============================= FUNCTION CALL SECTION =============================*/
3015 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
3016 ldv_initialize();
3017
3018 /** INIT: init_type: ST_MODULE_INIT **/
3019 /* content: static int __init mvumi_init(void)*/
3020 /* LDV_COMMENT_BEGIN_PREP */
3021 #ifdef CONFIG_PM
3022 #endif
3023 /* LDV_COMMENT_END_PREP */
3024 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver init function after driver loading to kernel. This function declared as "MODULE_INIT(function name)". */
3025 ldv_handler_precall();
3026 if(mvumi_init())
3027 goto ldv_final;
3028
3029
3030
3031
3032
3033
3034 int ldv_s_mvumi_pci_driver_pci_driver = 0;
3035
3036
3037
3038
3039
3040 while( nondet_int()
3041 || !(ldv_s_mvumi_pci_driver_pci_driver == 0)
3042 ) {
3043
3044 switch(nondet_int()) {
3045
3046 case 0: {
3047
3048 /** STRUCT: struct type: mvumi_instance_template, struct name: mvumi_instance_9143 **/
3049
3050
3051 /* content: static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd)*/
3052 /* LDV_COMMENT_END_PREP */
3053 /* LDV_COMMENT_FUNCTION_CALL Function from field "fire_cmd" from driver structure with callbacks "mvumi_instance_9143" */
3054 ldv_handler_precall();
3055 mvumi_fire_cmd( var_group1, var_group2);
3056 /* LDV_COMMENT_BEGIN_PREP */
3057 #ifdef CONFIG_PM
3058 #endif
3059 /* LDV_COMMENT_END_PREP */
3060
3061
3062
3063
3064 }
3065
3066 break;
3067 case 1: {
3068
3069 /** STRUCT: struct type: mvumi_instance_template, struct name: mvumi_instance_9143 **/
3070
3071
3072 /* content: static void mvumi_enable_intr(struct mvumi_hba *mhba)*/
3073 /* LDV_COMMENT_END_PREP */
3074 /* LDV_COMMENT_FUNCTION_CALL Function from field "enable_intr" from driver structure with callbacks "mvumi_instance_9143" */
3075 ldv_handler_precall();
3076 mvumi_enable_intr( var_group1);
3077 /* LDV_COMMENT_BEGIN_PREP */
3078 #ifdef CONFIG_PM
3079 #endif
3080 /* LDV_COMMENT_END_PREP */
3081
3082
3083
3084
3085 }
3086
3087 break;
3088 case 2: {
3089
3090 /** STRUCT: struct type: mvumi_instance_template, struct name: mvumi_instance_9143 **/
3091
3092
3093 /* content: static void mvumi_disable_intr(struct mvumi_hba *mhba)*/
3094 /* LDV_COMMENT_END_PREP */
3095 /* LDV_COMMENT_FUNCTION_CALL Function from field "disable_intr" from driver structure with callbacks "mvumi_instance_9143" */
3096 ldv_handler_precall();
3097 mvumi_disable_intr( var_group1);
3098 /* LDV_COMMENT_BEGIN_PREP */
3099 #ifdef CONFIG_PM
3100 #endif
3101 /* LDV_COMMENT_END_PREP */
3102
3103
3104
3105
3106 }
3107
3108 break;
3109 case 3: {
3110
3111 /** STRUCT: struct type: mvumi_instance_template, struct name: mvumi_instance_9143 **/
3112
3113
3114 /* content: static int mvumi_clear_intr(void *extend)*/
3115 /* LDV_COMMENT_END_PREP */
3116 /* LDV_COMMENT_FUNCTION_CALL Function from field "clear_intr" from driver structure with callbacks "mvumi_instance_9143" */
3117 ldv_handler_precall();
3118 mvumi_clear_intr( var_mvumi_clear_intr_66_p0);
3119 /* LDV_COMMENT_BEGIN_PREP */
3120 #ifdef CONFIG_PM
3121 #endif
3122 /* LDV_COMMENT_END_PREP */
3123
3124
3125
3126
3127 }
3128
3129 break;
3130 case 4: {
3131
3132 /** STRUCT: struct type: mvumi_instance_template, struct name: mvumi_instance_9143 **/
3133
3134
3135 /* content: static unsigned int mvumi_read_fw_status_reg(struct mvumi_hba *mhba)*/
3136 /* LDV_COMMENT_END_PREP */
3137 /* LDV_COMMENT_FUNCTION_CALL Function from field "read_fw_status_reg" from driver structure with callbacks "mvumi_instance_9143" */
3138 ldv_handler_precall();
3139 mvumi_read_fw_status_reg( var_group1);
3140 /* LDV_COMMENT_BEGIN_PREP */
3141 #ifdef CONFIG_PM
3142 #endif
3143 /* LDV_COMMENT_END_PREP */
3144
3145
3146
3147
3148 }
3149
3150 break;
3151 case 5: {
3152
3153 /** STRUCT: struct type: mvumi_instance_template, struct name: mvumi_instance_9143 **/
3154
3155
3156 /* content: static unsigned int mvumi_check_ib_list_9143(struct mvumi_hba *mhba)*/
3157 /* LDV_COMMENT_END_PREP */
3158 /* LDV_COMMENT_FUNCTION_CALL Function from field "check_ib_list" from driver structure with callbacks "mvumi_instance_9143" */
3159 ldv_handler_precall();
3160 mvumi_check_ib_list_9143( var_group1);
3161 /* LDV_COMMENT_BEGIN_PREP */
3162 #ifdef CONFIG_PM
3163 #endif
3164 /* LDV_COMMENT_END_PREP */
3165
3166
3167
3168
3169 }
3170
3171 break;
3172 case 6: {
3173
3174 /** STRUCT: struct type: mvumi_instance_template, struct name: mvumi_instance_9143 **/
3175
3176
3177 /* content: static int mvumi_check_ob_list_9143(struct mvumi_hba *mhba, unsigned int *cur_obf, unsigned int *assign_obf_end)*/
3178 /* LDV_COMMENT_END_PREP */
3179 /* LDV_COMMENT_FUNCTION_CALL Function from field "check_ob_list" from driver structure with callbacks "mvumi_instance_9143" */
3180 ldv_handler_precall();
3181 mvumi_check_ob_list_9143( var_group1, var_mvumi_check_ob_list_9143_21_p1, var_mvumi_check_ob_list_9143_21_p2);
3182 /* LDV_COMMENT_BEGIN_PREP */
3183 #ifdef CONFIG_PM
3184 #endif
3185 /* LDV_COMMENT_END_PREP */
3186
3187
3188
3189
3190 }
3191
3192 break;
3193 case 7: {
3194
3195 /** STRUCT: struct type: mvumi_instance_template, struct name: mvumi_instance_9143 **/
3196
3197
3198 /* content: static int mvumi_reset_host_9143(struct mvumi_hba *mhba)*/
3199 /* LDV_COMMENT_END_PREP */
3200 /* LDV_COMMENT_FUNCTION_CALL Function from field "reset_host" from driver structure with callbacks "mvumi_instance_9143" */
3201 ldv_handler_precall();
3202 mvumi_reset_host_9143( var_group1);
3203 /* LDV_COMMENT_BEGIN_PREP */
3204 #ifdef CONFIG_PM
3205 #endif
3206 /* LDV_COMMENT_END_PREP */
3207
3208
3209
3210
3211 }
3212
3213 break;
3214 case 8: {
3215
3216 /** STRUCT: struct type: mvumi_instance_template, struct name: mvumi_instance_9580 **/
3217
3218
3219 /* content: static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd)*/
3220 /* LDV_COMMENT_END_PREP */
3221 /* LDV_COMMENT_FUNCTION_CALL Function from field "fire_cmd" from driver structure with callbacks "mvumi_instance_9580" */
3222 ldv_handler_precall();
3223 mvumi_fire_cmd( var_group1, var_group2);
3224 /* LDV_COMMENT_BEGIN_PREP */
3225 #ifdef CONFIG_PM
3226 #endif
3227 /* LDV_COMMENT_END_PREP */
3228
3229
3230
3231
3232 }
3233
3234 break;
3235 case 9: {
3236
3237 /** STRUCT: struct type: mvumi_instance_template, struct name: mvumi_instance_9580 **/
3238
3239
3240 /* content: static void mvumi_enable_intr(struct mvumi_hba *mhba)*/
3241 /* LDV_COMMENT_END_PREP */
3242 /* LDV_COMMENT_FUNCTION_CALL Function from field "enable_intr" from driver structure with callbacks "mvumi_instance_9580" */
3243 ldv_handler_precall();
3244 mvumi_enable_intr( var_group1);
3245 /* LDV_COMMENT_BEGIN_PREP */
3246 #ifdef CONFIG_PM
3247 #endif
3248 /* LDV_COMMENT_END_PREP */
3249
3250
3251
3252
3253 }
3254
3255 break;
3256 case 10: {
3257
3258 /** STRUCT: struct type: mvumi_instance_template, struct name: mvumi_instance_9580 **/
3259
3260
3261 /* content: static void mvumi_disable_intr(struct mvumi_hba *mhba)*/
3262 /* LDV_COMMENT_END_PREP */
3263 /* LDV_COMMENT_FUNCTION_CALL Function from field "disable_intr" from driver structure with callbacks "mvumi_instance_9580" */
3264 ldv_handler_precall();
3265 mvumi_disable_intr( var_group1);
3266 /* LDV_COMMENT_BEGIN_PREP */
3267 #ifdef CONFIG_PM
3268 #endif
3269 /* LDV_COMMENT_END_PREP */
3270
3271
3272
3273
3274 }
3275
3276 break;
3277 case 11: {
3278
3279 /** STRUCT: struct type: mvumi_instance_template, struct name: mvumi_instance_9580 **/
3280
3281
3282 /* content: static int mvumi_clear_intr(void *extend)*/
3283 /* LDV_COMMENT_END_PREP */
3284 /* LDV_COMMENT_FUNCTION_CALL Function from field "clear_intr" from driver structure with callbacks "mvumi_instance_9580" */
3285 ldv_handler_precall();
3286 mvumi_clear_intr( var_mvumi_clear_intr_66_p0);
3287 /* LDV_COMMENT_BEGIN_PREP */
3288 #ifdef CONFIG_PM
3289 #endif
3290 /* LDV_COMMENT_END_PREP */
3291
3292
3293
3294
3295 }
3296
3297 break;
3298 case 12: {
3299
3300 /** STRUCT: struct type: mvumi_instance_template, struct name: mvumi_instance_9580 **/
3301
3302
3303 /* content: static unsigned int mvumi_read_fw_status_reg(struct mvumi_hba *mhba)*/
3304 /* LDV_COMMENT_END_PREP */
3305 /* LDV_COMMENT_FUNCTION_CALL Function from field "read_fw_status_reg" from driver structure with callbacks "mvumi_instance_9580" */
3306 ldv_handler_precall();
3307 mvumi_read_fw_status_reg( var_group1);
3308 /* LDV_COMMENT_BEGIN_PREP */
3309 #ifdef CONFIG_PM
3310 #endif
3311 /* LDV_COMMENT_END_PREP */
3312
3313
3314
3315
3316 }
3317
3318 break;
3319 case 13: {
3320
3321 /** STRUCT: struct type: mvumi_instance_template, struct name: mvumi_instance_9580 **/
3322
3323
3324 /* content: static unsigned int mvumi_check_ib_list_9580(struct mvumi_hba *mhba)*/
3325 /* LDV_COMMENT_END_PREP */
3326 /* LDV_COMMENT_FUNCTION_CALL Function from field "check_ib_list" from driver structure with callbacks "mvumi_instance_9580" */
3327 ldv_handler_precall();
3328 mvumi_check_ib_list_9580( var_group1);
3329 /* LDV_COMMENT_BEGIN_PREP */
3330 #ifdef CONFIG_PM
3331 #endif
3332 /* LDV_COMMENT_END_PREP */
3333
3334
3335
3336
3337 }
3338
3339 break;
3340 case 14: {
3341
3342 /** STRUCT: struct type: mvumi_instance_template, struct name: mvumi_instance_9580 **/
3343
3344
3345 /* content: static int mvumi_check_ob_list_9580(struct mvumi_hba *mhba, unsigned int *cur_obf, unsigned int *assign_obf_end)*/
3346 /* LDV_COMMENT_END_PREP */
3347 /* LDV_COMMENT_FUNCTION_CALL Function from field "check_ob_list" from driver structure with callbacks "mvumi_instance_9580" */
3348 ldv_handler_precall();
3349 mvumi_check_ob_list_9580( var_group1, var_mvumi_check_ob_list_9580_22_p1, var_mvumi_check_ob_list_9580_22_p2);
3350 /* LDV_COMMENT_BEGIN_PREP */
3351 #ifdef CONFIG_PM
3352 #endif
3353 /* LDV_COMMENT_END_PREP */
3354
3355
3356
3357
3358 }
3359
3360 break;
3361 case 15: {
3362
3363 /** STRUCT: struct type: mvumi_instance_template, struct name: mvumi_instance_9580 **/
3364
3365
3366 /* content: static int mvumi_reset_host_9580(struct mvumi_hba *mhba)*/
3367 /* LDV_COMMENT_END_PREP */
3368 /* LDV_COMMENT_FUNCTION_CALL Function from field "reset_host" from driver structure with callbacks "mvumi_instance_9580" */
3369 ldv_handler_precall();
3370 mvumi_reset_host_9580( var_group1);
3371 /* LDV_COMMENT_BEGIN_PREP */
3372 #ifdef CONFIG_PM
3373 #endif
3374 /* LDV_COMMENT_END_PREP */
3375
3376
3377
3378
3379 }
3380
3381 break;
3382 case 16: {
3383
3384 /** STRUCT: struct type: scsi_host_template, struct name: mvumi_template **/
3385
3386
3387 /* content: static int mvumi_slave_configure(struct scsi_device *sdev)*/
3388 /* LDV_COMMENT_END_PREP */
3389 /* LDV_COMMENT_FUNCTION_CALL Function from field "slave_configure" from driver structure with callbacks "mvumi_template" */
3390 ldv_handler_precall();
3391 mvumi_slave_configure( var_group3);
3392 /* LDV_COMMENT_BEGIN_PREP */
3393 #ifdef CONFIG_PM
3394 #endif
3395 /* LDV_COMMENT_END_PREP */
3396
3397
3398
3399
3400 }
3401
3402 break;
3403 case 17: {
3404
3405 /** STRUCT: struct type: scsi_host_template, struct name: mvumi_template **/
3406
3407
3408 /* content: static int mvumi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)*/
3409 /* LDV_COMMENT_END_PREP */
3410 /* LDV_COMMENT_FUNCTION_CALL Function from field "queuecommand" from driver structure with callbacks "mvumi_template" */
3411 ldv_handler_precall();
3412 mvumi_queue_command( var_group4, var_group5);
3413 /* LDV_COMMENT_BEGIN_PREP */
3414 #ifdef CONFIG_PM
3415 #endif
3416 /* LDV_COMMENT_END_PREP */
3417
3418
3419
3420
3421 }
3422
3423 break;
3424 case 18: {
3425
3426 /** STRUCT: struct type: scsi_host_template, struct name: mvumi_template **/
3427
3428
3429 /* content: static enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd)*/
3430 /* LDV_COMMENT_END_PREP */
3431 /* LDV_COMMENT_FUNCTION_CALL Function from field "eh_timed_out" from driver structure with callbacks "mvumi_template" */
3432 ldv_handler_precall();
3433 mvumi_timed_out( var_group5);
3434 /* LDV_COMMENT_BEGIN_PREP */
3435 #ifdef CONFIG_PM
3436 #endif
3437 /* LDV_COMMENT_END_PREP */
3438
3439
3440
3441
3442 }
3443
3444 break;
3445 case 19: {
3446
3447 /** STRUCT: struct type: scsi_host_template, struct name: mvumi_template **/
3448
3449
3450 /* content: static int mvumi_host_reset(struct scsi_cmnd *scmd)*/
3451 /* LDV_COMMENT_END_PREP */
3452 /* LDV_COMMENT_FUNCTION_CALL Function from field "eh_host_reset_handler" from driver structure with callbacks "mvumi_template" */
3453 ldv_handler_precall();
3454 mvumi_host_reset( var_group5);
3455 /* LDV_COMMENT_BEGIN_PREP */
3456 #ifdef CONFIG_PM
3457 #endif
3458 /* LDV_COMMENT_END_PREP */
3459
3460
3461
3462
3463 }
3464
3465 break;
3466 case 20: {
3467
3468 /** STRUCT: struct type: pci_driver, struct name: mvumi_pci_driver **/
3469 if(ldv_s_mvumi_pci_driver_pci_driver==0) {
3470
3471 /* content: static int mvumi_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)*/
3472 /* LDV_COMMENT_END_PREP */
3473 /* LDV_COMMENT_FUNCTION_CALL Function from field "probe" from driver structure with callbacks "mvumi_pci_driver". Standart function test for correct return result. */
3474 res_mvumi_probe_one_75 = mvumi_probe_one( var_group6, var_mvumi_probe_one_75_p1);
3475 ldv_check_return_value(res_mvumi_probe_one_75);
3476 ldv_check_return_value_probe(res_mvumi_probe_one_75);
3477 if(res_mvumi_probe_one_75)
3478 goto ldv_module_exit;
3479 /* LDV_COMMENT_BEGIN_PREP */
3480 #ifdef CONFIG_PM
3481 #endif
3482 /* LDV_COMMENT_END_PREP */
3483 ldv_s_mvumi_pci_driver_pci_driver++;
3484
3485 }
3486
3487 }
3488
3489 break;
3490 case 21: {
3491
3492 /** STRUCT: struct type: pci_driver, struct name: mvumi_pci_driver **/
3493 if(ldv_s_mvumi_pci_driver_pci_driver==1) {
3494
3495 /* content: static void mvumi_detach_one(struct pci_dev *pdev)*/
3496 /* LDV_COMMENT_END_PREP */
3497 /* LDV_COMMENT_FUNCTION_CALL Function from field "remove" from driver structure with callbacks "mvumi_pci_driver" */
3498 ldv_handler_precall();
3499 mvumi_detach_one( var_group6);
3500 /* LDV_COMMENT_BEGIN_PREP */
3501 #ifdef CONFIG_PM
3502 #endif
3503 /* LDV_COMMENT_END_PREP */
3504 ldv_s_mvumi_pci_driver_pci_driver=0;
3505
3506 }
3507
3508 }
3509
3510 break;
3511 case 22: {
3512
3513 /** STRUCT: struct type: pci_driver, struct name: mvumi_pci_driver **/
3514
3515
3516 /* content: static void mvumi_shutdown(struct pci_dev *pdev)*/
3517 /* LDV_COMMENT_END_PREP */
3518 /* LDV_COMMENT_FUNCTION_CALL Function from field "shutdown" from driver structure with callbacks "mvumi_pci_driver" */
3519 ldv_handler_precall();
3520 mvumi_shutdown( var_group6);
3521 /* LDV_COMMENT_BEGIN_PREP */
3522 #ifdef CONFIG_PM
3523 #endif
3524 /* LDV_COMMENT_END_PREP */
3525
3526
3527
3528
3529 }
3530
3531 break;
3532 case 23: {
3533
3534 /** STRUCT: struct type: pci_driver, struct name: mvumi_pci_driver **/
3535
3536
3537 /* content: static int __maybe_unused mvumi_suspend(struct pci_dev *pdev, pm_message_t state)*/
3538 /* LDV_COMMENT_END_PREP */
3539 /* LDV_COMMENT_FUNCTION_CALL Function from field "suspend" from driver structure with callbacks "mvumi_pci_driver" */
3540 ldv_handler_precall();
3541 mvumi_suspend( var_group6, var_mvumi_suspend_78_p1);
3542 /* LDV_COMMENT_BEGIN_PREP */
3543 #ifdef CONFIG_PM
3544 #endif
3545 /* LDV_COMMENT_END_PREP */
3546
3547
3548
3549
3550 }
3551
3552 break;
3553 case 24: {
3554
3555 /** STRUCT: struct type: pci_driver, struct name: mvumi_pci_driver **/
3556
3557
3558 /* content: static int __maybe_unused mvumi_resume(struct pci_dev *pdev)*/
3559 /* LDV_COMMENT_END_PREP */
3560 /* LDV_COMMENT_FUNCTION_CALL Function from field "resume" from driver structure with callbacks "mvumi_pci_driver" */
3561 ldv_handler_precall();
3562 mvumi_resume( var_group6);
3563 /* LDV_COMMENT_BEGIN_PREP */
3564 #ifdef CONFIG_PM
3565 #endif
3566 /* LDV_COMMENT_END_PREP */
3567
3568
3569
3570
3571 }
3572
3573 break;
3574 case 25: {
3575
3576 /** CALLBACK SECTION request_irq **/
3577 LDV_IN_INTERRUPT=2;
3578
3579 /* content: static irqreturn_t mvumi_isr_handler(int irq, void *devp)*/
3580 /* LDV_COMMENT_END_PREP */
3581 /* LDV_COMMENT_FUNCTION_CALL */
3582 ldv_handler_precall();
3583 mvumi_isr_handler( var_mvumi_isr_handler_61_p0, var_mvumi_isr_handler_61_p1);
3584 /* LDV_COMMENT_BEGIN_PREP */
3585 #ifdef CONFIG_PM
3586 #endif
3587 /* LDV_COMMENT_END_PREP */
3588 LDV_IN_INTERRUPT=1;
3589
3590
3591
3592 }
3593
3594 break;
3595 default: break;
3596
3597 }
3598
3599 }
3600
3601 ldv_module_exit:
3602
3603 /** INIT: init_type: ST_MODULE_EXIT **/
3604 /* content: static void __exit mvumi_exit(void)*/
3605 /* LDV_COMMENT_BEGIN_PREP */
3606 #ifdef CONFIG_PM
3607 #endif
3608 /* LDV_COMMENT_END_PREP */
3609 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver release function before driver will be uploaded from kernel. This function declared as "MODULE_EXIT(function name)". */
3610 ldv_handler_precall();
3611 mvumi_exit();
3612
3613 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
3614 ldv_final: ldv_check_final_state();
3615
3616 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
3617 return;
3618
3619 }
3620 #endif
3621
3622 /* LDV_COMMENT_END_MAIN */
3623
3624 #line 10 "/home/vitaly/ldv-launches/work/current--X--drivers--X--defaultlinux-4.11-rc1.tar.xz--X--331_1a--X--cpachecker/linux-4.11-rc1.tar.xz/csd_deg_dscv/2204/dscv_tempdir/dscv/ri/331_1a/drivers/scsi/mvumi.o.c.prepared" 1
2 #include <verifier/rcv.h>
3 #include <kernel-model/ERR.inc>
4
5 int LDV_DMA_MAP_CALLS = 0;
6
7 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_dma_map_page') maps page */
8 void ldv_dma_map_page(void) {
9 /* LDV_COMMENT_ASSERT Check that previos dma_mapping call was checked */
10 ldv_assert(LDV_DMA_MAP_CALLS == 0);
11 /* LDV_COMMENT_CHANGE_STATE Increase dma_mapping counter */
12 LDV_DMA_MAP_CALLS++;
13 }
14
15 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_dma_mapping_error') unmaps page */
16 void ldv_dma_mapping_error(void) {
17 /* LDV_COMMENT_ASSERT No dma_mapping calls to verify */
18 ldv_assert(LDV_DMA_MAP_CALLS != 0);
19 /* LDV_COMMENT_CHANGE_STATE Check that previos dma_mapping call was checked */
20 LDV_DMA_MAP_CALLS--;
21 }
22
23 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_check_final_state') Check that all module reference counters have their initial values at the end */
24 void ldv_check_final_state(void) {
25 /* LDV_COMMENT_ASSERT All incremented module reference counters should be decremented before module unloading*/
26 ldv_assert(LDV_DMA_MAP_CALLS == 0);
27 } 1 #ifndef _LDV_RCV_H_
2 #define _LDV_RCV_H_
3
4 /* If expr evaluates to zero, ldv_assert() causes a program to reach the error
5 label like the standard assert(). */
6 #define ldv_assert(expr) ((expr) ? 0 : ldv_error())
7
8 /* The error label wrapper. It is used because of some static verifiers (like
9 BLAST) don't accept multiple error labels through a program. */
10 static inline void ldv_error(void)
11 {
12 LDV_ERROR: goto LDV_ERROR;
13 }
14
15 /* If expr evaluates to zero, ldv_assume() causes an infinite loop that is
16 avoided by verifiers. */
17 #define ldv_assume(expr) ((expr) ? 0 : ldv_stop())
18
19 /* Infinite loop, that causes verifiers to skip such paths. */
20 static inline void ldv_stop(void) {
21 LDV_STOP: goto LDV_STOP;
22 }
23
24 /* Special nondeterministic functions. */
25 int ldv_undef_int(void);
26 void *ldv_undef_ptr(void);
27 unsigned long ldv_undef_ulong(void);
28 long ldv_undef_long(void);
29 /* Return nondeterministic negative integer number. */
30 static inline int ldv_undef_int_negative(void)
31 {
32 int ret = ldv_undef_int();
33
34 ldv_assume(ret < 0);
35
36 return ret;
37 }
38 /* Return nondeterministic nonpositive integer number. */
39 static inline int ldv_undef_int_nonpositive(void)
40 {
41 int ret = ldv_undef_int();
42
43 ldv_assume(ret <= 0);
44
45 return ret;
46 }
47
48 /* Add explicit model for __builin_expect GCC function. Without the model a
49 return value will be treated as nondetermined by verifiers. */
50 static inline long __builtin_expect(long exp, long c)
51 {
52 return exp;
53 }
54
55 /* This function causes the program to exit abnormally. GCC implements this
56 function by using a target-dependent mechanism (such as intentionally executing
57 an illegal instruction) or by calling abort. The mechanism used may vary from
58 release to release so you should not rely on any particular implementation.
59 http://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html */
60 static inline void __builtin_trap(void)
61 {
62 ldv_assert(0);
63 }
64
65 /* The constant is for simulating an error of ldv_undef_ptr() function. */
66 #define LDV_PTR_MAX 2012
67
68 #endif /* _LDV_RCV_H_ */ 1 #ifndef __LINUX_COMPILER_H
2 #define __LINUX_COMPILER_H
3
4 #ifndef __ASSEMBLY__
5
6 #ifdef __CHECKER__
7 # define __user __attribute__((noderef, address_space(1)))
8 # define __kernel __attribute__((address_space(0)))
9 # define __safe __attribute__((safe))
10 # define __force __attribute__((force))
11 # define __nocast __attribute__((nocast))
12 # define __iomem __attribute__((noderef, address_space(2)))
13 # define __must_hold(x) __attribute__((context(x,1,1)))
14 # define __acquires(x) __attribute__((context(x,0,1)))
15 # define __releases(x) __attribute__((context(x,1,0)))
16 # define __acquire(x) __context__(x,1)
17 # define __release(x) __context__(x,-1)
18 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
19 # define __percpu __attribute__((noderef, address_space(3)))
20 #ifdef CONFIG_SPARSE_RCU_POINTER
21 # define __rcu __attribute__((noderef, address_space(4)))
22 #else /* CONFIG_SPARSE_RCU_POINTER */
23 # define __rcu
24 #endif /* CONFIG_SPARSE_RCU_POINTER */
25 # define __private __attribute__((noderef))
26 extern void __chk_user_ptr(const volatile void __user *);
27 extern void __chk_io_ptr(const volatile void __iomem *);
28 # define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member))
29 #else /* __CHECKER__ */
30 # ifdef STRUCTLEAK_PLUGIN
31 # define __user __attribute__((user))
32 # else
33 # define __user
34 # endif
35 # define __kernel
36 # define __safe
37 # define __force
38 # define __nocast
39 # define __iomem
40 # define __chk_user_ptr(x) (void)0
41 # define __chk_io_ptr(x) (void)0
42 # define __builtin_warning(x, y...) (1)
43 # define __must_hold(x)
44 # define __acquires(x)
45 # define __releases(x)
46 # define __acquire(x) (void)0
47 # define __release(x) (void)0
48 # define __cond_lock(x,c) (c)
49 # define __percpu
50 # define __rcu
51 # define __private
52 # define ACCESS_PRIVATE(p, member) ((p)->member)
53 #endif /* __CHECKER__ */
54
55 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
56 #define ___PASTE(a,b) a##b
57 #define __PASTE(a,b) ___PASTE(a,b)
58
59 #ifdef __KERNEL__
60
61 #ifdef __GNUC__
62 #include <linux/compiler-gcc.h>
63 #endif
64
65 #if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__)
66 #define notrace __attribute__((hotpatch(0,0)))
67 #else
68 #define notrace __attribute__((no_instrument_function))
69 #endif
70
71 /* Intel compiler defines __GNUC__. So we will overwrite implementations
72 * coming from above header files here
73 */
74 #ifdef __INTEL_COMPILER
75 # include <linux/compiler-intel.h>
76 #endif
77
78 /* Clang compiler defines __GNUC__. So we will overwrite implementations
79 * coming from above header files here
80 */
81 #ifdef __clang__
82 #include <linux/compiler-clang.h>
83 #endif
84
85 /*
86 * Generic compiler-dependent macros required for kernel
87 * build go below this comment. Actual compiler/compiler version
88 * specific implementations come from the above header files
89 */
90
91 struct ftrace_branch_data {
92 const char *func;
93 const char *file;
94 unsigned line;
95 union {
96 struct {
97 unsigned long correct;
98 unsigned long incorrect;
99 };
100 struct {
101 unsigned long miss;
102 unsigned long hit;
103 };
104 unsigned long miss_hit[2];
105 };
106 };
107
108 struct ftrace_likely_data {
109 struct ftrace_branch_data data;
110 unsigned long constant;
111 };
112
113 /*
114 * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
115 * to disable branch tracing on a per file basis.
116 */
117 #if defined(CONFIG_TRACE_BRANCH_PROFILING) \
118 && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
119 void ftrace_likely_update(struct ftrace_likely_data *f, int val,
120 int expect, int is_constant);
121
122 #define likely_notrace(x) __builtin_expect(!!(x), 1)
123 #define unlikely_notrace(x) __builtin_expect(!!(x), 0)
124
125 #define __branch_check__(x, expect, is_constant) ({ \
126 int ______r; \
127 static struct ftrace_likely_data \
128 __attribute__((__aligned__(4))) \
129 __attribute__((section("_ftrace_annotated_branch"))) \
130 ______f = { \
131 .data.func = __func__, \
132 .data.file = __FILE__, \
133 .data.line = __LINE__, \
134 }; \
135 ______r = __builtin_expect(!!(x), expect); \
136 ftrace_likely_update(&______f, ______r, \
137 expect, is_constant); \
138 ______r; \
139 })
140
141 /*
142 * Using __builtin_constant_p(x) to ignore cases where the return
143 * value is always the same. This idea is taken from a similar patch
144 * written by Daniel Walker.
145 */
146 # ifndef likely
147 # define likely(x) (__branch_check__(x, 1, __builtin_constant_p(x)))
148 # endif
149 # ifndef unlikely
150 # define unlikely(x) (__branch_check__(x, 0, __builtin_constant_p(x)))
151 # endif
152
153 #ifdef CONFIG_PROFILE_ALL_BRANCHES
154 /*
155 * "Define 'is'", Bill Clinton
156 * "Define 'if'", Steven Rostedt
157 */
158 #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
159 #define __trace_if(cond) \
160 if (__builtin_constant_p(!!(cond)) ? !!(cond) : \
161 ({ \
162 int ______r; \
163 static struct ftrace_branch_data \
164 __attribute__((__aligned__(4))) \
165 __attribute__((section("_ftrace_branch"))) \
166 ______f = { \
167 .func = __func__, \
168 .file = __FILE__, \
169 .line = __LINE__, \
170 }; \
171 ______r = !!(cond); \
172 ______f.miss_hit[______r]++; \
173 ______r; \
174 }))
175 #endif /* CONFIG_PROFILE_ALL_BRANCHES */
176
177 #else
178 # define likely(x) __builtin_expect(!!(x), 1)
179 # define unlikely(x) __builtin_expect(!!(x), 0)
180 #endif
181
182 /* Optimization barrier */
183 #ifndef barrier
184 # define barrier() __memory_barrier()
185 #endif
186
187 #ifndef barrier_data
188 # define barrier_data(ptr) barrier()
189 #endif
190
191 /* Unreachable code */
192 #ifndef unreachable
193 # define unreachable() do { } while (1)
194 #endif
195
196 /*
197 * KENTRY - kernel entry point
198 * This can be used to annotate symbols (functions or data) that are used
199 * without their linker symbol being referenced explicitly. For example,
200 * interrupt vector handlers, or functions in the kernel image that are found
201 * programatically.
202 *
203 * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those
204 * are handled in their own way (with KEEP() in linker scripts).
205 *
206 * KENTRY can be avoided if the symbols in question are marked as KEEP() in the
207 * linker script. For example an architecture could KEEP() its entire
208 * boot/exception vector code rather than annotate each function and data.
209 */
210 #ifndef KENTRY
211 # define KENTRY(sym) \
212 extern typeof(sym) sym; \
213 static const unsigned long __kentry_##sym \
214 __used \
215 __attribute__((section("___kentry" "+" #sym ), used)) \
216 = (unsigned long)&sym;
217 #endif
218
219 #ifndef RELOC_HIDE
220 # define RELOC_HIDE(ptr, off) \
221 ({ unsigned long __ptr; \
222 __ptr = (unsigned long) (ptr); \
223 (typeof(ptr)) (__ptr + (off)); })
224 #endif
225
226 #ifndef OPTIMIZER_HIDE_VAR
227 #define OPTIMIZER_HIDE_VAR(var) barrier()
228 #endif
229
230 /* Not-quite-unique ID. */
231 #ifndef __UNIQUE_ID
232 # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
233 #endif
234
235 #include <uapi/linux/types.h>
236
237 #define __READ_ONCE_SIZE \
238 ({ \
239 switch (size) { \
240 case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \
241 case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \
242 case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \
243 case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \
244 default: \
245 barrier(); \
246 __builtin_memcpy((void *)res, (const void *)p, size); \
247 barrier(); \
248 } \
249 })
250
251 static __always_inline
252 void __read_once_size(const volatile void *p, void *res, int size)
253 {
254 __READ_ONCE_SIZE;
255 }
256
257 #ifdef CONFIG_KASAN
258 /*
259 * This function is not 'inline' because __no_sanitize_address confilcts
260 * with inlining. Attempt to inline it may cause a build failure.
261 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
262 * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
263 */
264 static __no_sanitize_address __maybe_unused
265 void __read_once_size_nocheck(const volatile void *p, void *res, int size)
266 {
267 __READ_ONCE_SIZE;
268 }
269 #else
270 static __always_inline
271 void __read_once_size_nocheck(const volatile void *p, void *res, int size)
272 {
273 __READ_ONCE_SIZE;
274 }
275 #endif
276
277 static __always_inline void __write_once_size(volatile void *p, void *res, int size)
278 {
279 switch (size) {
280 case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
281 case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
282 case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
283 case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
284 default:
285 barrier();
286 __builtin_memcpy((void *)p, (const void *)res, size);
287 barrier();
288 }
289 }
290
291 /*
292 * Prevent the compiler from merging or refetching reads or writes. The
293 * compiler is also forbidden from reordering successive instances of
294 * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
295 * compiler is aware of some particular ordering. One way to make the
296 * compiler aware of ordering is to put the two invocations of READ_ONCE,
297 * WRITE_ONCE or ACCESS_ONCE() in different C statements.
298 *
299 * In contrast to ACCESS_ONCE these two macros will also work on aggregate
300 * data types like structs or unions. If the size of the accessed data
301 * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
302 * READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at
303 * least two memcpy()s: one for the __builtin_memcpy() and then one for
304 * the macro doing the copy of variable - '__u' allocated on the stack.
305 *
306 * Their two major use cases are: (1) Mediating communication between
307 * process-level code and irq/NMI handlers, all running on the same CPU,
308 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
309 * mutilate accesses that either do not require ordering or that interact
310 * with an explicit memory barrier or atomic instruction that provides the
311 * required ordering.
312 */
313
314 #define __READ_ONCE(x, check) \
315 ({ \
316 union { typeof(x) __val; char __c[1]; } __u; \
317 if (check) \
318 __read_once_size(&(x), __u.__c, sizeof(x)); \
319 else \
320 __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \
321 __u.__val; \
322 })
323 #define READ_ONCE(x) __READ_ONCE(x, 1)
324
325 /*
326 * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
327 * to hide memory access from KASAN.
328 */
329 #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
330
331 #define WRITE_ONCE(x, val) \
332 ({ \
333 union { typeof(x) __val; char __c[1]; } __u = \
334 { .__val = (__force typeof(x)) (val) }; \
335 __write_once_size(&(x), __u.__c, sizeof(x)); \
336 __u.__val; \
337 })
338
339 #endif /* __KERNEL__ */
340
341 #endif /* __ASSEMBLY__ */
342
343 #ifdef __KERNEL__
344 /*
345 * Allow us to mark functions as 'deprecated' and have gcc emit a nice
346 * warning for each use, in hopes of speeding the functions removal.
347 * Usage is:
348 * int __deprecated foo(void)
349 */
350 #ifndef __deprecated
351 # define __deprecated /* unimplemented */
352 #endif
353
354 #ifdef MODULE
355 #define __deprecated_for_modules __deprecated
356 #else
357 #define __deprecated_for_modules
358 #endif
359
360 #ifndef __must_check
361 #define __must_check
362 #endif
363
364 #ifndef CONFIG_ENABLE_MUST_CHECK
365 #undef __must_check
366 #define __must_check
367 #endif
368 #ifndef CONFIG_ENABLE_WARN_DEPRECATED
369 #undef __deprecated
370 #undef __deprecated_for_modules
371 #define __deprecated
372 #define __deprecated_for_modules
373 #endif
374
375 #ifndef __malloc
376 #define __malloc
377 #endif
378
379 /*
380 * Allow us to avoid 'defined but not used' warnings on functions and data,
381 * as well as force them to be emitted to the assembly file.
382 *
383 * As of gcc 3.4, static functions that are not marked with attribute((used))
384 * may be elided from the assembly file. As of gcc 3.4, static data not so
385 * marked will not be elided, but this may change in a future gcc version.
386 *
387 * NOTE: Because distributions shipped with a backported unit-at-a-time
388 * compiler in gcc 3.3, we must define __used to be __attribute__((used))
389 * for gcc >=3.3 instead of 3.4.
390 *
391 * In prior versions of gcc, such functions and data would be emitted, but
392 * would be warned about except with attribute((unused)).
393 *
394 * Mark functions that are referenced only in inline assembly as __used so
395 * the code is emitted even though it appears to be unreferenced.
396 */
397 #ifndef __used
398 # define __used /* unimplemented */
399 #endif
400
401 #ifndef __maybe_unused
402 # define __maybe_unused /* unimplemented */
403 #endif
404
405 #ifndef __always_unused
406 # define __always_unused /* unimplemented */
407 #endif
408
409 #ifndef noinline
410 #define noinline
411 #endif
412
413 /*
414 * Rather then using noinline to prevent stack consumption, use
415 * noinline_for_stack instead. For documentation reasons.
416 */
417 #define noinline_for_stack noinline
418
419 #ifndef __always_inline
420 #define __always_inline inline
421 #endif
422
423 #endif /* __KERNEL__ */
424
425 /*
426 * From the GCC manual:
427 *
428 * Many functions do not examine any values except their arguments,
429 * and have no effects except the return value. Basically this is
430 * just slightly more strict class than the `pure' attribute above,
431 * since function is not allowed to read global memory.
432 *
433 * Note that a function that has pointer arguments and examines the
434 * data pointed to must _not_ be declared `const'. Likewise, a
435 * function that calls a non-`const' function usually must not be
436 * `const'. It does not make sense for a `const' function to return
437 * `void'.
438 */
439 #ifndef __attribute_const__
440 # define __attribute_const__ /* unimplemented */
441 #endif
442
443 #ifndef __latent_entropy
444 # define __latent_entropy
445 #endif
446
447 /*
448 * Tell gcc if a function is cold. The compiler will assume any path
449 * directly leading to the call is unlikely.
450 */
451
452 #ifndef __cold
453 #define __cold
454 #endif
455
456 /* Simple shorthand for a section definition */
457 #ifndef __section
458 # define __section(S) __attribute__ ((__section__(#S)))
459 #endif
460
461 #ifndef __visible
462 #define __visible
463 #endif
464
465 /*
466 * Assume alignment of return value.
467 */
468 #ifndef __assume_aligned
469 #define __assume_aligned(a, ...)
470 #endif
471
472
473 /* Are two types/vars the same type (ignoring qualifiers)? */
474 #ifndef __same_type
475 # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
476 #endif
477
478 /* Is this type a native word size -- useful for atomic operations */
479 #ifndef __native_word
480 # define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
481 #endif
482
483 /* Compile time object size, -1 for unknown */
484 #ifndef __compiletime_object_size
485 # define __compiletime_object_size(obj) -1
486 #endif
487 #ifndef __compiletime_warning
488 # define __compiletime_warning(message)
489 #endif
490 #ifndef __compiletime_error
491 # define __compiletime_error(message)
492 /*
493 * Sparse complains of variable sized arrays due to the temporary variable in
494 * __compiletime_assert. Unfortunately we can't just expand it out to make
495 * sparse see a constant array size without breaking compiletime_assert on old
496 * versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether.
497 */
498 # ifndef __CHECKER__
499 # define __compiletime_error_fallback(condition) \
500 do { } while (0)
501 # endif
502 #endif
503 #ifndef __compiletime_error_fallback
504 # define __compiletime_error_fallback(condition) do { } while (0)
505 #endif
506
507 #define __compiletime_assert(condition, msg, prefix, suffix) \
508 do { \
509 bool __cond = !(condition); \
510 extern void prefix ## suffix(void) __compiletime_error(msg); \
511 if (__cond) \
512 prefix ## suffix(); \
513 __compiletime_error_fallback(__cond); \
514 } while (0)
515
516 #define _compiletime_assert(condition, msg, prefix, suffix) \
517 __compiletime_assert(condition, msg, prefix, suffix)
518
519 /**
520 * compiletime_assert - break build and emit msg if condition is false
521 * @condition: a compile-time constant condition to check
522 * @msg: a message to emit if condition is false
523 *
524 * In tradition of POSIX assert, this macro will break the build if the
525 * supplied condition is *false*, emitting the supplied error message if the
526 * compiler has support to do so.
527 */
528 #define compiletime_assert(condition, msg) \
529 _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
530
531 #define compiletime_assert_atomic_type(t) \
532 compiletime_assert(__native_word(t), \
533 "Need native word sized stores/loads for atomicity.")
534
535 /*
536 * Prevent the compiler from merging or refetching accesses. The compiler
537 * is also forbidden from reordering successive instances of ACCESS_ONCE(),
538 * but only when the compiler is aware of some particular ordering. One way
539 * to make the compiler aware of ordering is to put the two invocations of
540 * ACCESS_ONCE() in different C statements.
541 *
542 * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE
543 * on a union member will work as long as the size of the member matches the
544 * size of the union and the size is smaller than word size.
545 *
546 * The major use cases of ACCESS_ONCE used to be (1) Mediating communication
547 * between process-level code and irq/NMI handlers, all running on the same CPU,
548 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
549 * mutilate accesses that either do not require ordering or that interact
550 * with an explicit memory barrier or atomic instruction that provides the
551 * required ordering.
552 *
553 * If possible use READ_ONCE()/WRITE_ONCE() instead.
554 */
555 #define __ACCESS_ONCE(x) ({ \
556 __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
557 (volatile typeof(x) *)&(x); })
558 #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
559
560 /**
561 * lockless_dereference() - safely load a pointer for later dereference
562 * @p: The pointer to load
563 *
564 * Similar to rcu_dereference(), but for situations where the pointed-to
565 * object's lifetime is managed by something other than RCU. That
566 * "something other" might be reference counting or simple immortality.
567 *
568 * The seemingly unused variable ___typecheck_p validates that @p is
569 * indeed a pointer type by using a pointer to typeof(*p) as the type.
570 * Taking a pointer to typeof(*p) again is needed in case p is void *.
571 */
572 #define lockless_dereference(p) \
573 ({ \
574 typeof(p) _________p1 = READ_ONCE(p); \
575 typeof(*(p)) *___typecheck_p __maybe_unused; \
576 smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
577 (_________p1); \
578 })
579
580 #endif /* __LINUX_COMPILER_H */ 1 #ifndef _LINUX_DMA_MAPPING_H
2 #define _LINUX_DMA_MAPPING_H
3
4 #include <linux/sizes.h>
5 #include <linux/string.h>
6 #include <linux/device.h>
7 #include <linux/err.h>
8 #include <linux/dma-debug.h>
9 #include <linux/dma-direction.h>
10 #include <linux/scatterlist.h>
11 #include <linux/kmemcheck.h>
12 #include <linux/bug.h>
13
14 /**
15 * List of possible attributes associated with a DMA mapping. The semantics
16 * of each attribute should be defined in Documentation/DMA-attributes.txt.
17 *
18 * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute
19 * forces all pending DMA writes to complete.
20 */
21 #define DMA_ATTR_WRITE_BARRIER (1UL << 0)
22 /*
23 * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
24 * may be weakly ordered, that is that reads and writes may pass each other.
25 */
26 #define DMA_ATTR_WEAK_ORDERING (1UL << 1)
27 /*
28 * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
29 * buffered to improve performance.
30 */
31 #define DMA_ATTR_WRITE_COMBINE (1UL << 2)
32 /*
33 * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either
34 * consistent or non-consistent memory as it sees fit.
35 */
36 #define DMA_ATTR_NON_CONSISTENT (1UL << 3)
37 /*
38 * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
39 * virtual mapping for the allocated buffer.
40 */
41 #define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4)
42 /*
43 * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
44 * the CPU cache for the given buffer assuming that it has been already
45 * transferred to 'device' domain.
46 */
47 #define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5)
48 /*
49 * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
50 * in physical memory.
51 */
52 #define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6)
53 /*
54 * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
55 * that it's probably not worth the time to try to allocate memory to in a way
56 * that gives better TLB efficiency.
57 */
58 #define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
59 /*
60 * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
61 * allocation failure reports (similarly to __GFP_NOWARN).
62 */
63 #define DMA_ATTR_NO_WARN (1UL << 8)
64
65 /*
66 * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
67 * accessible at an elevated privilege level (and ideally inaccessible or
68 * at least read-only at lesser-privileged levels).
69 */
70 #define DMA_ATTR_PRIVILEGED (1UL << 9)
71
72 /*
73 * A dma_addr_t can hold any valid DMA or bus address for the platform.
74 * It can be given to a device to use as a DMA source or target. A CPU cannot
75 * reference a dma_addr_t directly because there may be translation between
76 * its physical address space and the bus address space.
77 */
78 struct dma_map_ops {
79 void* (*alloc)(struct device *dev, size_t size,
80 dma_addr_t *dma_handle, gfp_t gfp,
81 unsigned long attrs);
82 void (*free)(struct device *dev, size_t size,
83 void *vaddr, dma_addr_t dma_handle,
84 unsigned long attrs);
85 int (*mmap)(struct device *, struct vm_area_struct *,
86 void *, dma_addr_t, size_t,
87 unsigned long attrs);
88
89 int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
90 dma_addr_t, size_t, unsigned long attrs);
91
92 dma_addr_t (*map_page)(struct device *dev, struct page *page,
93 unsigned long offset, size_t size,
94 enum dma_data_direction dir,
95 unsigned long attrs);
96 void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
97 size_t size, enum dma_data_direction dir,
98 unsigned long attrs);
99 /*
100 * map_sg returns 0 on error and a value > 0 on success.
101 * It should never return a value < 0.
102 */
103 int (*map_sg)(struct device *dev, struct scatterlist *sg,
104 int nents, enum dma_data_direction dir,
105 unsigned long attrs);
106 void (*unmap_sg)(struct device *dev,
107 struct scatterlist *sg, int nents,
108 enum dma_data_direction dir,
109 unsigned long attrs);
110 dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
111 size_t size, enum dma_data_direction dir,
112 unsigned long attrs);
113 void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
114 size_t size, enum dma_data_direction dir,
115 unsigned long attrs);
116 void (*sync_single_for_cpu)(struct device *dev,
117 dma_addr_t dma_handle, size_t size,
118 enum dma_data_direction dir);
119 void (*sync_single_for_device)(struct device *dev,
120 dma_addr_t dma_handle, size_t size,
121 enum dma_data_direction dir);
122 void (*sync_sg_for_cpu)(struct device *dev,
123 struct scatterlist *sg, int nents,
124 enum dma_data_direction dir);
125 void (*sync_sg_for_device)(struct device *dev,
126 struct scatterlist *sg, int nents,
127 enum dma_data_direction dir);
128 int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
129 int (*dma_supported)(struct device *dev, u64 mask);
130 int (*set_dma_mask)(struct device *dev, u64 mask);
131 #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
132 u64 (*get_required_mask)(struct device *dev);
133 #endif
134 int is_phys;
135 };
136
137 extern const struct dma_map_ops dma_noop_ops;
138 extern const struct dma_map_ops dma_virt_ops;
139
140 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
141
142 #define DMA_MASK_NONE 0x0ULL
143
144 static inline int valid_dma_direction(int dma_direction)
145 {
146 return ((dma_direction == DMA_BIDIRECTIONAL) ||
147 (dma_direction == DMA_TO_DEVICE) ||
148 (dma_direction == DMA_FROM_DEVICE));
149 }
150
151 static inline int is_device_dma_capable(struct device *dev)
152 {
153 return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
154 }
155
156 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
157 /*
158 * These three functions are only for dma allocator.
159 * Don't use them in device drivers.
160 */
161 int dma_alloc_from_coherent(struct device *dev, ssize_t size,
162 dma_addr_t *dma_handle, void **ret);
163 int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
164
165 int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
166 void *cpu_addr, size_t size, int *ret);
167 #else
168 #define dma_alloc_from_coherent(dev, size, handle, ret) (0)
169 #define dma_release_from_coherent(dev, order, vaddr) (0)
170 #define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
171 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
172
173 #ifdef CONFIG_HAS_DMA
174 #include <asm/dma-mapping.h>
175 static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
176 {
177 if (dev && dev->dma_ops)
178 return dev->dma_ops;
179 return get_arch_dma_ops(dev ? dev->bus : NULL);
180 }
181
182 static inline void set_dma_ops(struct device *dev,
183 const struct dma_map_ops *dma_ops)
184 {
185 dev->dma_ops = dma_ops;
186 }
187 #else
188 /*
189 * Define the dma api to allow compilation but not linking of
190 * dma dependent code. Code that depends on the dma-mapping
191 * API needs to set 'depends on HAS_DMA' in its Kconfig
192 */
193 extern const struct dma_map_ops bad_dma_ops;
194 static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
195 {
196 return &bad_dma_ops;
197 }
198 #endif
199
200 static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
201 size_t size,
202 enum dma_data_direction dir,
203 unsigned long attrs)
204 {
205 const struct dma_map_ops *ops = get_dma_ops(dev);
206 dma_addr_t addr;
207
208 kmemcheck_mark_initialized(ptr, size);
209 BUG_ON(!valid_dma_direction(dir));
210 addr = ops->map_page(dev, virt_to_page(ptr),
211 offset_in_page(ptr), size,
212 dir, attrs);
213 debug_dma_map_page(dev, virt_to_page(ptr),
214 offset_in_page(ptr), size,
215 dir, addr, true);
216 return addr;
217 }
218
219 static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
220 size_t size,
221 enum dma_data_direction dir,
222 unsigned long attrs)
223 {
224 const struct dma_map_ops *ops = get_dma_ops(dev);
225
226 BUG_ON(!valid_dma_direction(dir));
227 if (ops->unmap_page)
228 ops->unmap_page(dev, addr, size, dir, attrs);
229 debug_dma_unmap_page(dev, addr, size, dir, true);
230 }
231
232 /*
233 * dma_maps_sg_attrs returns 0 on error and > 0 on success.
234 * It should never return a value < 0.
235 */
236 static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
237 int nents, enum dma_data_direction dir,
238 unsigned long attrs)
239 {
240 const struct dma_map_ops *ops = get_dma_ops(dev);
241 int i, ents;
242 struct scatterlist *s;
243
244 for_each_sg(sg, s, nents, i)
245 kmemcheck_mark_initialized(sg_virt(s), s->length);
246 BUG_ON(!valid_dma_direction(dir));
247 ents = ops->map_sg(dev, sg, nents, dir, attrs);
248 BUG_ON(ents < 0);
249 debug_dma_map_sg(dev, sg, nents, ents, dir);
250
251 return ents;
252 }
253
254 static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
255 int nents, enum dma_data_direction dir,
256 unsigned long attrs)
257 {
258 const struct dma_map_ops *ops = get_dma_ops(dev);
259
260 BUG_ON(!valid_dma_direction(dir));
261 debug_dma_unmap_sg(dev, sg, nents, dir);
262 if (ops->unmap_sg)
263 ops->unmap_sg(dev, sg, nents, dir, attrs);
264 }
265
266 static inline dma_addr_t dma_map_page_attrs(struct device *dev,
267 struct page *page,
268 size_t offset, size_t size,
269 enum dma_data_direction dir,
270 unsigned long attrs)
271 {
272 const struct dma_map_ops *ops = get_dma_ops(dev);
273 dma_addr_t addr;
274
275 kmemcheck_mark_initialized(page_address(page) + offset, size);
276 BUG_ON(!valid_dma_direction(dir));
277 addr = ops->map_page(dev, page, offset, size, dir, attrs);
278 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
279
280 return addr;
281 }
282
283 static inline void dma_unmap_page_attrs(struct device *dev,
284 dma_addr_t addr, size_t size,
285 enum dma_data_direction dir,
286 unsigned long attrs)
287 {
288 const struct dma_map_ops *ops = get_dma_ops(dev);
289
290 BUG_ON(!valid_dma_direction(dir));
291 if (ops->unmap_page)
292 ops->unmap_page(dev, addr, size, dir, attrs);
293 debug_dma_unmap_page(dev, addr, size, dir, false);
294 }
295
296 static inline dma_addr_t dma_map_resource(struct device *dev,
297 phys_addr_t phys_addr,
298 size_t size,
299 enum dma_data_direction dir,
300 unsigned long attrs)
301 {
302 const struct dma_map_ops *ops = get_dma_ops(dev);
303 dma_addr_t addr;
304
305 BUG_ON(!valid_dma_direction(dir));
306
307 /* Don't allow RAM to be mapped */
308 BUG_ON(pfn_valid(PHYS_PFN(phys_addr)));
309
310 addr = phys_addr;
311 if (ops->map_resource)
312 addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
313
314 debug_dma_map_resource(dev, phys_addr, size, dir, addr);
315
316 return addr;
317 }
318
319 static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
320 size_t size, enum dma_data_direction dir,
321 unsigned long attrs)
322 {
323 const struct dma_map_ops *ops = get_dma_ops(dev);
324
325 BUG_ON(!valid_dma_direction(dir));
326 if (ops->unmap_resource)
327 ops->unmap_resource(dev, addr, size, dir, attrs);
328 debug_dma_unmap_resource(dev, addr, size, dir);
329 }
330
331 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
332 size_t size,
333 enum dma_data_direction dir)
334 {
335 const struct dma_map_ops *ops = get_dma_ops(dev);
336
337 BUG_ON(!valid_dma_direction(dir));
338 if (ops->sync_single_for_cpu)
339 ops->sync_single_for_cpu(dev, addr, size, dir);
340 debug_dma_sync_single_for_cpu(dev, addr, size, dir);
341 }
342
343 static inline void dma_sync_single_for_device(struct device *dev,
344 dma_addr_t addr, size_t size,
345 enum dma_data_direction dir)
346 {
347 const struct dma_map_ops *ops = get_dma_ops(dev);
348
349 BUG_ON(!valid_dma_direction(dir));
350 if (ops->sync_single_for_device)
351 ops->sync_single_for_device(dev, addr, size, dir);
352 debug_dma_sync_single_for_device(dev, addr, size, dir);
353 }
354
355 static inline void dma_sync_single_range_for_cpu(struct device *dev,
356 dma_addr_t addr,
357 unsigned long offset,
358 size_t size,
359 enum dma_data_direction dir)
360 {
361 const struct dma_map_ops *ops = get_dma_ops(dev);
362
363 BUG_ON(!valid_dma_direction(dir));
364 if (ops->sync_single_for_cpu)
365 ops->sync_single_for_cpu(dev, addr + offset, size, dir);
366 debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
367 }
368
369 static inline void dma_sync_single_range_for_device(struct device *dev,
370 dma_addr_t addr,
371 unsigned long offset,
372 size_t size,
373 enum dma_data_direction dir)
374 {
375 const struct dma_map_ops *ops = get_dma_ops(dev);
376
377 BUG_ON(!valid_dma_direction(dir));
378 if (ops->sync_single_for_device)
379 ops->sync_single_for_device(dev, addr + offset, size, dir);
380 debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
381 }
382
383 static inline void
384 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
385 int nelems, enum dma_data_direction dir)
386 {
387 const struct dma_map_ops *ops = get_dma_ops(dev);
388
389 BUG_ON(!valid_dma_direction(dir));
390 if (ops->sync_sg_for_cpu)
391 ops->sync_sg_for_cpu(dev, sg, nelems, dir);
392 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
393 }
394
395 static inline void
396 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
397 int nelems, enum dma_data_direction dir)
398 {
399 const struct dma_map_ops *ops = get_dma_ops(dev);
400
401 BUG_ON(!valid_dma_direction(dir));
402 if (ops->sync_sg_for_device)
403 ops->sync_sg_for_device(dev, sg, nelems, dir);
404 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
405
406 }
407
408 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
409 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
410 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
411 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
412 #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
413 #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
414
415 extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
416 void *cpu_addr, dma_addr_t dma_addr, size_t size);
417
418 void *dma_common_contiguous_remap(struct page *page, size_t size,
419 unsigned long vm_flags,
420 pgprot_t prot, const void *caller);
421
422 void *dma_common_pages_remap(struct page **pages, size_t size,
423 unsigned long vm_flags, pgprot_t prot,
424 const void *caller);
425 void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
426
427 /**
428 * dma_mmap_attrs - map a coherent DMA allocation into user space
429 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
430 * @vma: vm_area_struct describing requested user mapping
431 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
432 * @handle: device-view address returned from dma_alloc_attrs
433 * @size: size of memory originally requested in dma_alloc_attrs
434 * @attrs: attributes of mapping properties requested in dma_alloc_attrs
435 *
436 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
437 * into user space. The coherent DMA buffer must not be freed by the
438 * driver until the user space mapping has been released.
439 */
440 static inline int
441 dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
442 dma_addr_t dma_addr, size_t size, unsigned long attrs)
443 {
444 const struct dma_map_ops *ops = get_dma_ops(dev);
445 BUG_ON(!ops);
446 if (ops->mmap)
447 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
448 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
449 }
450
451 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
452
453 int
454 dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
455 void *cpu_addr, dma_addr_t dma_addr, size_t size);
456
457 static inline int
458 dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
459 dma_addr_t dma_addr, size_t size,
460 unsigned long attrs)
461 {
462 const struct dma_map_ops *ops = get_dma_ops(dev);
463 BUG_ON(!ops);
464 if (ops->get_sgtable)
465 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
466 attrs);
467 return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
468 }
469
470 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
471
472 #ifndef arch_dma_alloc_attrs
473 #define arch_dma_alloc_attrs(dev, flag) (true)
474 #endif
475
476 static inline void *dma_alloc_attrs(struct device *dev, size_t size,
477 dma_addr_t *dma_handle, gfp_t flag,
478 unsigned long attrs)
479 {
480 const struct dma_map_ops *ops = get_dma_ops(dev);
481 void *cpu_addr;
482
483 BUG_ON(!ops);
484
485 if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr))
486 return cpu_addr;
487
488 if (!arch_dma_alloc_attrs(&dev, &flag))
489 return NULL;
490 if (!ops->alloc)
491 return NULL;
492
493 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
494 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
495 return cpu_addr;
496 }
497
498 static inline void dma_free_attrs(struct device *dev, size_t size,
499 void *cpu_addr, dma_addr_t dma_handle,
500 unsigned long attrs)
501 {
502 const struct dma_map_ops *ops = get_dma_ops(dev);
503
504 BUG_ON(!ops);
505 WARN_ON(irqs_disabled());
506
507 if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
508 return;
509
510 if (!ops->free || !cpu_addr)
511 return;
512
513 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
514 ops->free(dev, size, cpu_addr, dma_handle, attrs);
515 }
516
517 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
518 dma_addr_t *dma_handle, gfp_t flag)
519 {
520 return dma_alloc_attrs(dev, size, dma_handle, flag, 0);
521 }
522
523 static inline void dma_free_coherent(struct device *dev, size_t size,
524 void *cpu_addr, dma_addr_t dma_handle)
525 {
526 return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
527 }
528
529 static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
530 dma_addr_t *dma_handle, gfp_t gfp)
531 {
532 return dma_alloc_attrs(dev, size, dma_handle, gfp,
533 DMA_ATTR_NON_CONSISTENT);
534 }
535
536 static inline void dma_free_noncoherent(struct device *dev, size_t size,
537 void *cpu_addr, dma_addr_t dma_handle)
538 {
539 dma_free_attrs(dev, size, cpu_addr, dma_handle,
540 DMA_ATTR_NON_CONSISTENT);
541 }
542
543 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
544 {
545 debug_dma_mapping_error(dev, dma_addr);
546
547 if (get_dma_ops(dev)->mapping_error)
548 return get_dma_ops(dev)->mapping_error(dev, dma_addr);
549
550 #ifdef DMA_ERROR_CODE
551 return dma_addr == DMA_ERROR_CODE;
552 #else
553 return 0;
554 #endif
555 }
556
557 #ifndef HAVE_ARCH_DMA_SUPPORTED
558 static inline int dma_supported(struct device *dev, u64 mask)
559 {
560 const struct dma_map_ops *ops = get_dma_ops(dev);
561
562 if (!ops)
563 return 0;
564 if (!ops->dma_supported)
565 return 1;
566 return ops->dma_supported(dev, mask);
567 }
568 #endif
569
570 #ifndef HAVE_ARCH_DMA_SET_MASK
571 static inline int dma_set_mask(struct device *dev, u64 mask)
572 {
573 const struct dma_map_ops *ops = get_dma_ops(dev);
574
575 if (ops->set_dma_mask)
576 return ops->set_dma_mask(dev, mask);
577
578 if (!dev->dma_mask || !dma_supported(dev, mask))
579 return -EIO;
580 *dev->dma_mask = mask;
581 return 0;
582 }
583 #endif
584
585 static inline u64 dma_get_mask(struct device *dev)
586 {
587 if (dev && dev->dma_mask && *dev->dma_mask)
588 return *dev->dma_mask;
589 return DMA_BIT_MASK(32);
590 }
591
592 #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
593 int dma_set_coherent_mask(struct device *dev, u64 mask);
594 #else
595 static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
596 {
597 if (!dma_supported(dev, mask))
598 return -EIO;
599 dev->coherent_dma_mask = mask;
600 return 0;
601 }
602 #endif
603
604 /*
605 * Set both the DMA mask and the coherent DMA mask to the same thing.
606 * Note that we don't check the return value from dma_set_coherent_mask()
607 * as the DMA API guarantees that the coherent DMA mask can be set to
608 * the same or smaller than the streaming DMA mask.
609 */
610 static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
611 {
612 int rc = dma_set_mask(dev, mask);
613 if (rc == 0)
614 dma_set_coherent_mask(dev, mask);
615 return rc;
616 }
617
618 /*
619 * Similar to the above, except it deals with the case where the device
620 * does not have dev->dma_mask appropriately setup.
621 */
622 static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
623 {
624 dev->dma_mask = &dev->coherent_dma_mask;
625 return dma_set_mask_and_coherent(dev, mask);
626 }
627
628 extern u64 dma_get_required_mask(struct device *dev);
629
630 #ifndef arch_setup_dma_ops
631 static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
632 u64 size, const struct iommu_ops *iommu,
633 bool coherent) { }
634 #endif
635
636 #ifndef arch_teardown_dma_ops
637 static inline void arch_teardown_dma_ops(struct device *dev) { }
638 #endif
639
640 static inline unsigned int dma_get_max_seg_size(struct device *dev)
641 {
642 if (dev->dma_parms && dev->dma_parms->max_segment_size)
643 return dev->dma_parms->max_segment_size;
644 return SZ_64K;
645 }
646
647 static inline unsigned int dma_set_max_seg_size(struct device *dev,
648 unsigned int size)
649 {
650 if (dev->dma_parms) {
651 dev->dma_parms->max_segment_size = size;
652 return 0;
653 }
654 return -EIO;
655 }
656
657 static inline unsigned long dma_get_seg_boundary(struct device *dev)
658 {
659 if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
660 return dev->dma_parms->segment_boundary_mask;
661 return DMA_BIT_MASK(32);
662 }
663
664 static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
665 {
666 if (dev->dma_parms) {
667 dev->dma_parms->segment_boundary_mask = mask;
668 return 0;
669 }
670 return -EIO;
671 }
672
673 #ifndef dma_max_pfn
674 static inline unsigned long dma_max_pfn(struct device *dev)
675 {
676 return *dev->dma_mask >> PAGE_SHIFT;
677 }
678 #endif
679
680 static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
681 dma_addr_t *dma_handle, gfp_t flag)
682 {
683 void *ret = dma_alloc_coherent(dev, size, dma_handle,
684 flag | __GFP_ZERO);
685 return ret;
686 }
687
688 #ifdef CONFIG_HAS_DMA
689 static inline int dma_get_cache_alignment(void)
690 {
691 #ifdef ARCH_DMA_MINALIGN
692 return ARCH_DMA_MINALIGN;
693 #endif
694 return 1;
695 }
696 #endif
697
698 /* flags for the coherent memory api */
699 #define DMA_MEMORY_MAP 0x01
700 #define DMA_MEMORY_IO 0x02
701 #define DMA_MEMORY_INCLUDES_CHILDREN 0x04
702 #define DMA_MEMORY_EXCLUSIVE 0x08
703
704 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
705 int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
706 dma_addr_t device_addr, size_t size, int flags);
707 void dma_release_declared_memory(struct device *dev);
708 void *dma_mark_declared_memory_occupied(struct device *dev,
709 dma_addr_t device_addr, size_t size);
710 #else
711 static inline int
712 dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
713 dma_addr_t device_addr, size_t size, int flags)
714 {
715 return 0;
716 }
717
718 static inline void
719 dma_release_declared_memory(struct device *dev)
720 {
721 }
722
723 static inline void *
724 dma_mark_declared_memory_occupied(struct device *dev,
725 dma_addr_t device_addr, size_t size)
726 {
727 return ERR_PTR(-EBUSY);
728 }
729 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
730
731 /*
732 * Managed DMA API
733 */
734 extern void *dmam_alloc_coherent(struct device *dev, size_t size,
735 dma_addr_t *dma_handle, gfp_t gfp);
736 extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
737 dma_addr_t dma_handle);
738 extern void *dmam_alloc_noncoherent(struct device *dev, size_t size,
739 dma_addr_t *dma_handle, gfp_t gfp);
740 extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
741 dma_addr_t dma_handle);
742 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
743 extern int dmam_declare_coherent_memory(struct device *dev,
744 phys_addr_t phys_addr,
745 dma_addr_t device_addr, size_t size,
746 int flags);
747 extern void dmam_release_declared_memory(struct device *dev);
748 #else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
749 static inline int dmam_declare_coherent_memory(struct device *dev,
750 phys_addr_t phys_addr, dma_addr_t device_addr,
751 size_t size, gfp_t gfp)
752 {
753 return 0;
754 }
755
756 static inline void dmam_release_declared_memory(struct device *dev)
757 {
758 }
759 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
760
761 static inline void *dma_alloc_wc(struct device *dev, size_t size,
762 dma_addr_t *dma_addr, gfp_t gfp)
763 {
764 return dma_alloc_attrs(dev, size, dma_addr, gfp,
765 DMA_ATTR_WRITE_COMBINE);
766 }
767 #ifndef dma_alloc_writecombine
768 #define dma_alloc_writecombine dma_alloc_wc
769 #endif
770
771 static inline void dma_free_wc(struct device *dev, size_t size,
772 void *cpu_addr, dma_addr_t dma_addr)
773 {
774 return dma_free_attrs(dev, size, cpu_addr, dma_addr,
775 DMA_ATTR_WRITE_COMBINE);
776 }
777 #ifndef dma_free_writecombine
778 #define dma_free_writecombine dma_free_wc
779 #endif
780
781 static inline int dma_mmap_wc(struct device *dev,
782 struct vm_area_struct *vma,
783 void *cpu_addr, dma_addr_t dma_addr,
784 size_t size)
785 {
786 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
787 DMA_ATTR_WRITE_COMBINE);
788 }
789 #ifndef dma_mmap_writecombine
790 #define dma_mmap_writecombine dma_mmap_wc
791 #endif
792
793 #if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG)
794 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
795 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
796 #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
797 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
798 #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
799 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
800 #else
801 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
802 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
803 #define dma_unmap_addr(PTR, ADDR_NAME) (0)
804 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
805 #define dma_unmap_len(PTR, LEN_NAME) (0)
806 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
807 #endif
808
809 #endif 1 #ifndef LINUX_KMEMCHECK_H
2 #define LINUX_KMEMCHECK_H
3
4 #include <linux/mm_types.h>
5 #include <linux/types.h>
6
7 #ifdef CONFIG_KMEMCHECK
8 extern int kmemcheck_enabled;
9
10 /* The slab-related functions. */
11 void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node);
12 void kmemcheck_free_shadow(struct page *page, int order);
13 void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
14 size_t size);
15 void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size);
16
17 void kmemcheck_pagealloc_alloc(struct page *p, unsigned int order,
18 gfp_t gfpflags);
19
20 void kmemcheck_show_pages(struct page *p, unsigned int n);
21 void kmemcheck_hide_pages(struct page *p, unsigned int n);
22
23 bool kmemcheck_page_is_tracked(struct page *p);
24
25 void kmemcheck_mark_unallocated(void *address, unsigned int n);
26 void kmemcheck_mark_uninitialized(void *address, unsigned int n);
27 void kmemcheck_mark_initialized(void *address, unsigned int n);
28 void kmemcheck_mark_freed(void *address, unsigned int n);
29
30 void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n);
31 void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n);
32 void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n);
33
34 int kmemcheck_show_addr(unsigned long address);
35 int kmemcheck_hide_addr(unsigned long address);
36
37 bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size);
38
39 /*
40 * Bitfield annotations
41 *
42 * How to use: If you have a struct using bitfields, for example
43 *
44 * struct a {
45 * int x:8, y:8;
46 * };
47 *
48 * then this should be rewritten as
49 *
50 * struct a {
51 * kmemcheck_bitfield_begin(flags);
52 * int x:8, y:8;
53 * kmemcheck_bitfield_end(flags);
54 * };
55 *
56 * Now the "flags_begin" and "flags_end" members may be used to refer to the
57 * beginning and end, respectively, of the bitfield (and things like
58 * &x.flags_begin is allowed). As soon as the struct is allocated, the bit-
59 * fields should be annotated:
60 *
61 * struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL);
62 * kmemcheck_annotate_bitfield(a, flags);
63 */
64 #define kmemcheck_bitfield_begin(name) \
65 int name##_begin[0];
66
67 #define kmemcheck_bitfield_end(name) \
68 int name##_end[0];
69
70 #define kmemcheck_annotate_bitfield(ptr, name) \
71 do { \
72 int _n; \
73 \
74 if (!ptr) \
75 break; \
76 \
77 _n = (long) &((ptr)->name##_end) \
78 - (long) &((ptr)->name##_begin); \
79 BUILD_BUG_ON(_n < 0); \
80 \
81 kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \
82 } while (0)
83
84 #define kmemcheck_annotate_variable(var) \
85 do { \
86 kmemcheck_mark_initialized(&(var), sizeof(var)); \
87 } while (0) \
88
89 #else
90 #define kmemcheck_enabled 0
91
92 static inline void
93 kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
94 {
95 }
96
97 static inline void
98 kmemcheck_free_shadow(struct page *page, int order)
99 {
100 }
101
102 static inline void
103 kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
104 size_t size)
105 {
106 }
107
108 static inline void kmemcheck_slab_free(struct kmem_cache *s, void *object,
109 size_t size)
110 {
111 }
112
113 static inline void kmemcheck_pagealloc_alloc(struct page *p,
114 unsigned int order, gfp_t gfpflags)
115 {
116 }
117
118 static inline bool kmemcheck_page_is_tracked(struct page *p)
119 {
120 return false;
121 }
122
123 static inline void kmemcheck_mark_unallocated(void *address, unsigned int n)
124 {
125 }
126
127 static inline void kmemcheck_mark_uninitialized(void *address, unsigned int n)
128 {
129 }
130
131 static inline void kmemcheck_mark_initialized(void *address, unsigned int n)
132 {
133 }
134
135 static inline void kmemcheck_mark_freed(void *address, unsigned int n)
136 {
137 }
138
139 static inline void kmemcheck_mark_unallocated_pages(struct page *p,
140 unsigned int n)
141 {
142 }
143
144 static inline void kmemcheck_mark_uninitialized_pages(struct page *p,
145 unsigned int n)
146 {
147 }
148
149 static inline void kmemcheck_mark_initialized_pages(struct page *p,
150 unsigned int n)
151 {
152 }
153
154 static inline bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size)
155 {
156 return true;
157 }
158
159 #define kmemcheck_bitfield_begin(name)
160 #define kmemcheck_bitfield_end(name)
161 #define kmemcheck_annotate_bitfield(ptr, name) \
162 do { \
163 } while (0)
164
165 #define kmemcheck_annotate_variable(var) \
166 do { \
167 } while (0)
168
169 #endif /* CONFIG_KMEMCHECK */
170
171 #endif /* LINUX_KMEMCHECK_H */ 1 #ifndef _LINUX_LIST_H
2 #define _LINUX_LIST_H
3
4 #include <linux/types.h>
5 #include <linux/stddef.h>
6 #include <linux/poison.h>
7 #include <linux/const.h>
8 #include <linux/kernel.h>
9
10 /*
11 * Simple doubly linked list implementation.
12 *
13 * Some of the internal functions ("__xxx") are useful when
14 * manipulating whole lists rather than single entries, as
15 * sometimes we already know the next/prev entries and we can
16 * generate better code by using them directly rather than
17 * using the generic single-entry routines.
18 */
19
20 #define LIST_HEAD_INIT(name) { &(name), &(name) }
21
22 #define LIST_HEAD(name) \
23 struct list_head name = LIST_HEAD_INIT(name)
24
25 static inline void INIT_LIST_HEAD(struct list_head *list)
26 {
27 WRITE_ONCE(list->next, list);
28 list->prev = list;
29 }
30
31 #ifdef CONFIG_DEBUG_LIST
32 extern bool __list_add_valid(struct list_head *new,
33 struct list_head *prev,
34 struct list_head *next);
35 extern bool __list_del_entry_valid(struct list_head *entry);
36 #else
37 static inline bool __list_add_valid(struct list_head *new,
38 struct list_head *prev,
39 struct list_head *next)
40 {
41 return true;
42 }
43 static inline bool __list_del_entry_valid(struct list_head *entry)
44 {
45 return true;
46 }
47 #endif
48
49 /*
50 * Insert a new entry between two known consecutive entries.
51 *
52 * This is only for internal list manipulation where we know
53 * the prev/next entries already!
54 */
55 static inline void __list_add(struct list_head *new,
56 struct list_head *prev,
57 struct list_head *next)
58 {
59 if (!__list_add_valid(new, prev, next))
60 return;
61
62 next->prev = new;
63 new->next = next;
64 new->prev = prev;
65 WRITE_ONCE(prev->next, new);
66 }
67
68 /**
69 * list_add - add a new entry
70 * @new: new entry to be added
71 * @head: list head to add it after
72 *
73 * Insert a new entry after the specified head.
74 * This is good for implementing stacks.
75 */
76 static inline void list_add(struct list_head *new, struct list_head *head)
77 {
78 __list_add(new, head, head->next);
79 }
80
81
82 /**
83 * list_add_tail - add a new entry
84 * @new: new entry to be added
85 * @head: list head to add it before
86 *
87 * Insert a new entry before the specified head.
88 * This is useful for implementing queues.
89 */
90 static inline void list_add_tail(struct list_head *new, struct list_head *head)
91 {
92 __list_add(new, head->prev, head);
93 }
94
95 /*
96 * Delete a list entry by making the prev/next entries
97 * point to each other.
98 *
99 * This is only for internal list manipulation where we know
100 * the prev/next entries already!
101 */
102 static inline void __list_del(struct list_head * prev, struct list_head * next)
103 {
104 next->prev = prev;
105 WRITE_ONCE(prev->next, next);
106 }
107
108 /**
109 * list_del - deletes entry from list.
110 * @entry: the element to delete from the list.
111 * Note: list_empty() on entry does not return true after this, the entry is
112 * in an undefined state.
113 */
114 static inline void __list_del_entry(struct list_head *entry)
115 {
116 if (!__list_del_entry_valid(entry))
117 return;
118
119 __list_del(entry->prev, entry->next);
120 }
121
122 static inline void list_del(struct list_head *entry)
123 {
124 __list_del_entry(entry);
125 entry->next = LIST_POISON1;
126 entry->prev = LIST_POISON2;
127 }
128
129 /**
130 * list_replace - replace old entry by new one
131 * @old : the element to be replaced
132 * @new : the new element to insert
133 *
134 * If @old was empty, it will be overwritten.
135 */
136 static inline void list_replace(struct list_head *old,
137 struct list_head *new)
138 {
139 new->next = old->next;
140 new->next->prev = new;
141 new->prev = old->prev;
142 new->prev->next = new;
143 }
144
145 static inline void list_replace_init(struct list_head *old,
146 struct list_head *new)
147 {
148 list_replace(old, new);
149 INIT_LIST_HEAD(old);
150 }
151
152 /**
153 * list_del_init - deletes entry from list and reinitialize it.
154 * @entry: the element to delete from the list.
155 */
156 static inline void list_del_init(struct list_head *entry)
157 {
158 __list_del_entry(entry);
159 INIT_LIST_HEAD(entry);
160 }
161
162 /**
163 * list_move - delete from one list and add as another's head
164 * @list: the entry to move
165 * @head: the head that will precede our entry
166 */
167 static inline void list_move(struct list_head *list, struct list_head *head)
168 {
169 __list_del_entry(list);
170 list_add(list, head);
171 }
172
173 /**
174 * list_move_tail - delete from one list and add as another's tail
175 * @list: the entry to move
176 * @head: the head that will follow our entry
177 */
178 static inline void list_move_tail(struct list_head *list,
179 struct list_head *head)
180 {
181 __list_del_entry(list);
182 list_add_tail(list, head);
183 }
184
185 /**
186 * list_is_last - tests whether @list is the last entry in list @head
187 * @list: the entry to test
188 * @head: the head of the list
189 */
190 static inline int list_is_last(const struct list_head *list,
191 const struct list_head *head)
192 {
193 return list->next == head;
194 }
195
196 /**
197 * list_empty - tests whether a list is empty
198 * @head: the list to test.
199 */
200 static inline int list_empty(const struct list_head *head)
201 {
202 return READ_ONCE(head->next) == head;
203 }
204
205 /**
206 * list_empty_careful - tests whether a list is empty and not being modified
207 * @head: the list to test
208 *
209 * Description:
210 * tests whether a list is empty _and_ checks that no other CPU might be
211 * in the process of modifying either member (next or prev)
212 *
213 * NOTE: using list_empty_careful() without synchronization
214 * can only be safe if the only activity that can happen
215 * to the list entry is list_del_init(). Eg. it cannot be used
216 * if another CPU could re-list_add() it.
217 */
218 static inline int list_empty_careful(const struct list_head *head)
219 {
220 struct list_head *next = head->next;
221 return (next == head) && (next == head->prev);
222 }
223
224 /**
225 * list_rotate_left - rotate the list to the left
226 * @head: the head of the list
227 */
228 static inline void list_rotate_left(struct list_head *head)
229 {
230 struct list_head *first;
231
232 if (!list_empty(head)) {
233 first = head->next;
234 list_move_tail(first, head);
235 }
236 }
237
238 /**
239 * list_is_singular - tests whether a list has just one entry.
240 * @head: the list to test.
241 */
242 static inline int list_is_singular(const struct list_head *head)
243 {
244 return !list_empty(head) && (head->next == head->prev);
245 }
246
247 static inline void __list_cut_position(struct list_head *list,
248 struct list_head *head, struct list_head *entry)
249 {
250 struct list_head *new_first = entry->next;
251 list->next = head->next;
252 list->next->prev = list;
253 list->prev = entry;
254 entry->next = list;
255 head->next = new_first;
256 new_first->prev = head;
257 }
258
259 /**
260 * list_cut_position - cut a list into two
261 * @list: a new list to add all removed entries
262 * @head: a list with entries
263 * @entry: an entry within head, could be the head itself
264 * and if so we won't cut the list
265 *
266 * This helper moves the initial part of @head, up to and
267 * including @entry, from @head to @list. You should
268 * pass on @entry an element you know is on @head. @list
269 * should be an empty list or a list you do not care about
270 * losing its data.
271 *
272 */
273 static inline void list_cut_position(struct list_head *list,
274 struct list_head *head, struct list_head *entry)
275 {
276 if (list_empty(head))
277 return;
278 if (list_is_singular(head) &&
279 (head->next != entry && head != entry))
280 return;
281 if (entry == head)
282 INIT_LIST_HEAD(list);
283 else
284 __list_cut_position(list, head, entry);
285 }
286
287 static inline void __list_splice(const struct list_head *list,
288 struct list_head *prev,
289 struct list_head *next)
290 {
291 struct list_head *first = list->next;
292 struct list_head *last = list->prev;
293
294 first->prev = prev;
295 prev->next = first;
296
297 last->next = next;
298 next->prev = last;
299 }
300
301 /**
302 * list_splice - join two lists, this is designed for stacks
303 * @list: the new list to add.
304 * @head: the place to add it in the first list.
305 */
306 static inline void list_splice(const struct list_head *list,
307 struct list_head *head)
308 {
309 if (!list_empty(list))
310 __list_splice(list, head, head->next);
311 }
312
313 /**
314 * list_splice_tail - join two lists, each list being a queue
315 * @list: the new list to add.
316 * @head: the place to add it in the first list.
317 */
318 static inline void list_splice_tail(struct list_head *list,
319 struct list_head *head)
320 {
321 if (!list_empty(list))
322 __list_splice(list, head->prev, head);
323 }
324
325 /**
326 * list_splice_init - join two lists and reinitialise the emptied list.
327 * @list: the new list to add.
328 * @head: the place to add it in the first list.
329 *
330 * The list at @list is reinitialised
331 */
332 static inline void list_splice_init(struct list_head *list,
333 struct list_head *head)
334 {
335 if (!list_empty(list)) {
336 __list_splice(list, head, head->next);
337 INIT_LIST_HEAD(list);
338 }
339 }
340
341 /**
342 * list_splice_tail_init - join two lists and reinitialise the emptied list
343 * @list: the new list to add.
344 * @head: the place to add it in the first list.
345 *
346 * Each of the lists is a queue.
347 * The list at @list is reinitialised
348 */
349 static inline void list_splice_tail_init(struct list_head *list,
350 struct list_head *head)
351 {
352 if (!list_empty(list)) {
353 __list_splice(list, head->prev, head);
354 INIT_LIST_HEAD(list);
355 }
356 }
357
358 /**
359 * list_entry - get the struct for this entry
360 * @ptr: the &struct list_head pointer.
361 * @type: the type of the struct this is embedded in.
362 * @member: the name of the list_head within the struct.
363 */
364 #define list_entry(ptr, type, member) \
365 container_of(ptr, type, member)
366
367 /**
368 * list_first_entry - get the first element from a list
369 * @ptr: the list head to take the element from.
370 * @type: the type of the struct this is embedded in.
371 * @member: the name of the list_head within the struct.
372 *
373 * Note, that list is expected to be not empty.
374 */
375 #define list_first_entry(ptr, type, member) \
376 list_entry((ptr)->next, type, member)
377
378 /**
379 * list_last_entry - get the last element from a list
380 * @ptr: the list head to take the element from.
381 * @type: the type of the struct this is embedded in.
382 * @member: the name of the list_head within the struct.
383 *
384 * Note, that list is expected to be not empty.
385 */
386 #define list_last_entry(ptr, type, member) \
387 list_entry((ptr)->prev, type, member)
388
389 /**
390 * list_first_entry_or_null - get the first element from a list
391 * @ptr: the list head to take the element from.
392 * @type: the type of the struct this is embedded in.
393 * @member: the name of the list_head within the struct.
394 *
395 * Note that if the list is empty, it returns NULL.
396 */
397 #define list_first_entry_or_null(ptr, type, member) ({ \
398 struct list_head *head__ = (ptr); \
399 struct list_head *pos__ = READ_ONCE(head__->next); \
400 pos__ != head__ ? list_entry(pos__, type, member) : NULL; \
401 })
402
403 /**
404 * list_next_entry - get the next element in list
405 * @pos: the type * to cursor
406 * @member: the name of the list_head within the struct.
407 */
408 #define list_next_entry(pos, member) \
409 list_entry((pos)->member.next, typeof(*(pos)), member)
410
411 /**
412 * list_prev_entry - get the prev element in list
413 * @pos: the type * to cursor
414 * @member: the name of the list_head within the struct.
415 */
416 #define list_prev_entry(pos, member) \
417 list_entry((pos)->member.prev, typeof(*(pos)), member)
418
419 /**
420 * list_for_each - iterate over a list
421 * @pos: the &struct list_head to use as a loop cursor.
422 * @head: the head for your list.
423 */
424 #define list_for_each(pos, head) \
425 for (pos = (head)->next; pos != (head); pos = pos->next)
426
427 /**
428 * list_for_each_prev - iterate over a list backwards
429 * @pos: the &struct list_head to use as a loop cursor.
430 * @head: the head for your list.
431 */
432 #define list_for_each_prev(pos, head) \
433 for (pos = (head)->prev; pos != (head); pos = pos->prev)
434
435 /**
436 * list_for_each_safe - iterate over a list safe against removal of list entry
437 * @pos: the &struct list_head to use as a loop cursor.
438 * @n: another &struct list_head to use as temporary storage
439 * @head: the head for your list.
440 */
441 #define list_for_each_safe(pos, n, head) \
442 for (pos = (head)->next, n = pos->next; pos != (head); \
443 pos = n, n = pos->next)
444
445 /**
446 * list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry
447 * @pos: the &struct list_head to use as a loop cursor.
448 * @n: another &struct list_head to use as temporary storage
449 * @head: the head for your list.
450 */
451 #define list_for_each_prev_safe(pos, n, head) \
452 for (pos = (head)->prev, n = pos->prev; \
453 pos != (head); \
454 pos = n, n = pos->prev)
455
456 /**
457 * list_for_each_entry - iterate over list of given type
458 * @pos: the type * to use as a loop cursor.
459 * @head: the head for your list.
460 * @member: the name of the list_head within the struct.
461 */
462 #define list_for_each_entry(pos, head, member) \
463 for (pos = list_first_entry(head, typeof(*pos), member); \
464 &pos->member != (head); \
465 pos = list_next_entry(pos, member))
466
467 /**
468 * list_for_each_entry_reverse - iterate backwards over list of given type.
469 * @pos: the type * to use as a loop cursor.
470 * @head: the head for your list.
471 * @member: the name of the list_head within the struct.
472 */
473 #define list_for_each_entry_reverse(pos, head, member) \
474 for (pos = list_last_entry(head, typeof(*pos), member); \
475 &pos->member != (head); \
476 pos = list_prev_entry(pos, member))
477
478 /**
479 * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue()
480 * @pos: the type * to use as a start point
481 * @head: the head of the list
482 * @member: the name of the list_head within the struct.
483 *
484 * Prepares a pos entry for use as a start point in list_for_each_entry_continue().
485 */
486 #define list_prepare_entry(pos, head, member) \
487 ((pos) ? : list_entry(head, typeof(*pos), member))
488
489 /**
490 * list_for_each_entry_continue - continue iteration over list of given type
491 * @pos: the type * to use as a loop cursor.
492 * @head: the head for your list.
493 * @member: the name of the list_head within the struct.
494 *
495 * Continue to iterate over list of given type, continuing after
496 * the current position.
497 */
498 #define list_for_each_entry_continue(pos, head, member) \
499 for (pos = list_next_entry(pos, member); \
500 &pos->member != (head); \
501 pos = list_next_entry(pos, member))
502
503 /**
504 * list_for_each_entry_continue_reverse - iterate backwards from the given point
505 * @pos: the type * to use as a loop cursor.
506 * @head: the head for your list.
507 * @member: the name of the list_head within the struct.
508 *
509 * Start to iterate over list of given type backwards, continuing after
510 * the current position.
511 */
512 #define list_for_each_entry_continue_reverse(pos, head, member) \
513 for (pos = list_prev_entry(pos, member); \
514 &pos->member != (head); \
515 pos = list_prev_entry(pos, member))
516
517 /**
518 * list_for_each_entry_from - iterate over list of given type from the current point
519 * @pos: the type * to use as a loop cursor.
520 * @head: the head for your list.
521 * @member: the name of the list_head within the struct.
522 *
523 * Iterate over list of given type, continuing from current position.
524 */
525 #define list_for_each_entry_from(pos, head, member) \
526 for (; &pos->member != (head); \
527 pos = list_next_entry(pos, member))
528
529 /**
530 * list_for_each_entry_from_reverse - iterate backwards over list of given type
531 * from the current point
532 * @pos: the type * to use as a loop cursor.
533 * @head: the head for your list.
534 * @member: the name of the list_head within the struct.
535 *
536 * Iterate backwards over list of given type, continuing from current position.
537 */
538 #define list_for_each_entry_from_reverse(pos, head, member) \
539 for (; &pos->member != (head); \
540 pos = list_prev_entry(pos, member))
541
542 /**
543 * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
544 * @pos: the type * to use as a loop cursor.
545 * @n: another type * to use as temporary storage
546 * @head: the head for your list.
547 * @member: the name of the list_head within the struct.
548 */
549 #define list_for_each_entry_safe(pos, n, head, member) \
550 for (pos = list_first_entry(head, typeof(*pos), member), \
551 n = list_next_entry(pos, member); \
552 &pos->member != (head); \
553 pos = n, n = list_next_entry(n, member))
554
555 /**
556 * list_for_each_entry_safe_continue - continue list iteration safe against removal
557 * @pos: the type * to use as a loop cursor.
558 * @n: another type * to use as temporary storage
559 * @head: the head for your list.
560 * @member: the name of the list_head within the struct.
561 *
562 * Iterate over list of given type, continuing after current point,
563 * safe against removal of list entry.
564 */
565 #define list_for_each_entry_safe_continue(pos, n, head, member) \
566 for (pos = list_next_entry(pos, member), \
567 n = list_next_entry(pos, member); \
568 &pos->member != (head); \
569 pos = n, n = list_next_entry(n, member))
570
571 /**
572 * list_for_each_entry_safe_from - iterate over list from current point safe against removal
573 * @pos: the type * to use as a loop cursor.
574 * @n: another type * to use as temporary storage
575 * @head: the head for your list.
576 * @member: the name of the list_head within the struct.
577 *
578 * Iterate over list of given type from current point, safe against
579 * removal of list entry.
580 */
581 #define list_for_each_entry_safe_from(pos, n, head, member) \
582 for (n = list_next_entry(pos, member); \
583 &pos->member != (head); \
584 pos = n, n = list_next_entry(n, member))
585
586 /**
587 * list_for_each_entry_safe_reverse - iterate backwards over list safe against removal
588 * @pos: the type * to use as a loop cursor.
589 * @n: another type * to use as temporary storage
590 * @head: the head for your list.
591 * @member: the name of the list_head within the struct.
592 *
593 * Iterate backwards over list of given type, safe against removal
594 * of list entry.
595 */
596 #define list_for_each_entry_safe_reverse(pos, n, head, member) \
597 for (pos = list_last_entry(head, typeof(*pos), member), \
598 n = list_prev_entry(pos, member); \
599 &pos->member != (head); \
600 pos = n, n = list_prev_entry(n, member))
601
602 /**
603 * list_safe_reset_next - reset a stale list_for_each_entry_safe loop
604 * @pos: the loop cursor used in the list_for_each_entry_safe loop
605 * @n: temporary storage used in list_for_each_entry_safe
606 * @member: the name of the list_head within the struct.
607 *
608 * list_safe_reset_next is not safe to use in general if the list may be
609 * modified concurrently (eg. the lock is dropped in the loop body). An
610 * exception to this is if the cursor element (pos) is pinned in the list,
611 * and list_safe_reset_next is called after re-taking the lock and before
612 * completing the current iteration of the loop body.
613 */
614 #define list_safe_reset_next(pos, n, member) \
615 n = list_next_entry(pos, member)
616
617 /*
618 * Double linked lists with a single pointer list head.
619 * Mostly useful for hash tables where the two pointer list head is
620 * too wasteful.
621 * You lose the ability to access the tail in O(1).
622 */
623
624 #define HLIST_HEAD_INIT { .first = NULL }
625 #define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
626 #define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
627 static inline void INIT_HLIST_NODE(struct hlist_node *h)
628 {
629 h->next = NULL;
630 h->pprev = NULL;
631 }
632
633 static inline int hlist_unhashed(const struct hlist_node *h)
634 {
635 return !h->pprev;
636 }
637
638 static inline int hlist_empty(const struct hlist_head *h)
639 {
640 return !READ_ONCE(h->first);
641 }
642
643 static inline void __hlist_del(struct hlist_node *n)
644 {
645 struct hlist_node *next = n->next;
646 struct hlist_node **pprev = n->pprev;
647
648 WRITE_ONCE(*pprev, next);
649 if (next)
650 next->pprev = pprev;
651 }
652
653 static inline void hlist_del(struct hlist_node *n)
654 {
655 __hlist_del(n);
656 n->next = LIST_POISON1;
657 n->pprev = LIST_POISON2;
658 }
659
660 static inline void hlist_del_init(struct hlist_node *n)
661 {
662 if (!hlist_unhashed(n)) {
663 __hlist_del(n);
664 INIT_HLIST_NODE(n);
665 }
666 }
667
668 static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
669 {
670 struct hlist_node *first = h->first;
671 n->next = first;
672 if (first)
673 first->pprev = &n->next;
674 WRITE_ONCE(h->first, n);
675 n->pprev = &h->first;
676 }
677
678 /* next must be != NULL */
679 static inline void hlist_add_before(struct hlist_node *n,
680 struct hlist_node *next)
681 {
682 n->pprev = next->pprev;
683 n->next = next;
684 next->pprev = &n->next;
685 WRITE_ONCE(*(n->pprev), n);
686 }
687
688 static inline void hlist_add_behind(struct hlist_node *n,
689 struct hlist_node *prev)
690 {
691 n->next = prev->next;
692 WRITE_ONCE(prev->next, n);
693 n->pprev = &prev->next;
694
695 if (n->next)
696 n->next->pprev = &n->next;
697 }
698
699 /* after that we'll appear to be on some hlist and hlist_del will work */
700 static inline void hlist_add_fake(struct hlist_node *n)
701 {
702 n->pprev = &n->next;
703 }
704
705 static inline bool hlist_fake(struct hlist_node *h)
706 {
707 return h->pprev == &h->next;
708 }
709
710 /*
711 * Check whether the node is the only node of the head without
712 * accessing head:
713 */
714 static inline bool
715 hlist_is_singular_node(struct hlist_node *n, struct hlist_head *h)
716 {
717 return !n->next && n->pprev == &h->first;
718 }
719
720 /*
721 * Move a list from one list head to another. Fixup the pprev
722 * reference of the first entry if it exists.
723 */
724 static inline void hlist_move_list(struct hlist_head *old,
725 struct hlist_head *new)
726 {
727 new->first = old->first;
728 if (new->first)
729 new->first->pprev = &new->first;
730 old->first = NULL;
731 }
732
733 #define hlist_entry(ptr, type, member) container_of(ptr,type,member)
734
735 #define hlist_for_each(pos, head) \
736 for (pos = (head)->first; pos ; pos = pos->next)
737
738 #define hlist_for_each_safe(pos, n, head) \
739 for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
740 pos = n)
741
742 #define hlist_entry_safe(ptr, type, member) \
743 ({ typeof(ptr) ____ptr = (ptr); \
744 ____ptr ? hlist_entry(____ptr, type, member) : NULL; \
745 })
746
747 /**
748 * hlist_for_each_entry - iterate over list of given type
749 * @pos: the type * to use as a loop cursor.
750 * @head: the head for your list.
751 * @member: the name of the hlist_node within the struct.
752 */
753 #define hlist_for_each_entry(pos, head, member) \
754 for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member);\
755 pos; \
756 pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
757
758 /**
759 * hlist_for_each_entry_continue - iterate over a hlist continuing after current point
760 * @pos: the type * to use as a loop cursor.
761 * @member: the name of the hlist_node within the struct.
762 */
763 #define hlist_for_each_entry_continue(pos, member) \
764 for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\
765 pos; \
766 pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
767
768 /**
769 * hlist_for_each_entry_from - iterate over a hlist continuing from current point
770 * @pos: the type * to use as a loop cursor.
771 * @member: the name of the hlist_node within the struct.
772 */
773 #define hlist_for_each_entry_from(pos, member) \
774 for (; pos; \
775 pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
776
777 /**
778 * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
779 * @pos: the type * to use as a loop cursor.
780 * @n: another &struct hlist_node to use as temporary storage
781 * @head: the head for your list.
782 * @member: the name of the hlist_node within the struct.
783 */
784 #define hlist_for_each_entry_safe(pos, n, head, member) \
785 for (pos = hlist_entry_safe((head)->first, typeof(*pos), member);\
786 pos && ({ n = pos->member.next; 1; }); \
787 pos = hlist_entry_safe(n, typeof(*pos), member))
788
789 #endif 1 /* include this file if the platform implements the dma_ DMA Mapping API
2 * and wants to provide the pci_ DMA Mapping API in terms of it */
3
4 #ifndef _ASM_GENERIC_PCI_DMA_COMPAT_H
5 #define _ASM_GENERIC_PCI_DMA_COMPAT_H
6
7 #include <linux/dma-mapping.h>
8
9 /* This defines the direction arg to the DMA mapping routines. */
10 #define PCI_DMA_BIDIRECTIONAL 0
11 #define PCI_DMA_TODEVICE 1
12 #define PCI_DMA_FROMDEVICE 2
13 #define PCI_DMA_NONE 3
14
15 static inline void *
16 pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
17 dma_addr_t *dma_handle)
18 {
19 return dma_alloc_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, dma_handle, GFP_ATOMIC);
20 }
21
22 static inline void *
23 pci_zalloc_consistent(struct pci_dev *hwdev, size_t size,
24 dma_addr_t *dma_handle)
25 {
26 return dma_zalloc_coherent(hwdev == NULL ? NULL : &hwdev->dev,
27 size, dma_handle, GFP_ATOMIC);
28 }
29
30 static inline void
31 pci_free_consistent(struct pci_dev *hwdev, size_t size,
32 void *vaddr, dma_addr_t dma_handle)
33 {
34 dma_free_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, vaddr, dma_handle);
35 }
36
37 static inline dma_addr_t
38 pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
39 {
40 return dma_map_single(hwdev == NULL ? NULL : &hwdev->dev, ptr, size, (enum dma_data_direction)direction);
41 }
42
43 static inline void
44 pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
45 size_t size, int direction)
46 {
47 dma_unmap_single(hwdev == NULL ? NULL : &hwdev->dev, dma_addr, size, (enum dma_data_direction)direction);
48 }
49
50 static inline dma_addr_t
51 pci_map_page(struct pci_dev *hwdev, struct page *page,
52 unsigned long offset, size_t size, int direction)
53 {
54 return dma_map_page(hwdev == NULL ? NULL : &hwdev->dev, page, offset, size, (enum dma_data_direction)direction);
55 }
56
57 static inline void
58 pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address,
59 size_t size, int direction)
60 {
61 dma_unmap_page(hwdev == NULL ? NULL : &hwdev->dev, dma_address, size, (enum dma_data_direction)direction);
62 }
63
64 static inline int
65 pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
66 int nents, int direction)
67 {
68 return dma_map_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction);
69 }
70
71 static inline void
72 pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
73 int nents, int direction)
74 {
75 dma_unmap_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction);
76 }
77
78 static inline void
79 pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle,
80 size_t size, int direction)
81 {
82 dma_sync_single_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
83 }
84
85 static inline void
86 pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle,
87 size_t size, int direction)
88 {
89 dma_sync_single_for_device(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
90 }
91
92 static inline void
93 pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg,
94 int nelems, int direction)
95 {
96 dma_sync_sg_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
97 }
98
99 static inline void
100 pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg,
101 int nelems, int direction)
102 {
103 dma_sync_sg_for_device(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
104 }
105
106 static inline int
107 pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr)
108 {
109 return dma_mapping_error(&pdev->dev, dma_addr);
110 }
111
112 #ifdef CONFIG_PCI
113 static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
114 {
115 return dma_set_mask(&dev->dev, mask);
116 }
117
118 static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
119 {
120 return dma_set_coherent_mask(&dev->dev, mask);
121 }
122
123 static inline int pci_set_dma_max_seg_size(struct pci_dev *dev,
124 unsigned int size)
125 {
126 return dma_set_max_seg_size(&dev->dev, size);
127 }
128
129 static inline int pci_set_dma_seg_boundary(struct pci_dev *dev,
130 unsigned long mask)
131 {
132 return dma_set_seg_boundary(&dev->dev, mask);
133 }
134 #else
135 static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
136 { return -EIO; }
137 static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
138 { return -EIO; }
139 static inline int pci_set_dma_max_seg_size(struct pci_dev *dev,
140 unsigned int size)
141 { return -EIO; }
142 static inline int pci_set_dma_seg_boundary(struct pci_dev *dev,
143 unsigned long mask)
144 { return -EIO; }
145 #endif
146
147 #endif 1 #ifndef __LINUX_SPINLOCK_H
2 #define __LINUX_SPINLOCK_H
3
4 /*
5 * include/linux/spinlock.h - generic spinlock/rwlock declarations
6 *
7 * here's the role of the various spinlock/rwlock related include files:
8 *
9 * on SMP builds:
10 *
11 * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
12 * initializers
13 *
14 * linux/spinlock_types.h:
15 * defines the generic type and initializers
16 *
17 * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel
18 * implementations, mostly inline assembly code
19 *
20 * (also included on UP-debug builds:)
21 *
22 * linux/spinlock_api_smp.h:
23 * contains the prototypes for the _spin_*() APIs.
24 *
25 * linux/spinlock.h: builds the final spin_*() APIs.
26 *
27 * on UP builds:
28 *
29 * linux/spinlock_type_up.h:
30 * contains the generic, simplified UP spinlock type.
31 * (which is an empty structure on non-debug builds)
32 *
33 * linux/spinlock_types.h:
34 * defines the generic type and initializers
35 *
36 * linux/spinlock_up.h:
37 * contains the arch_spin_*()/etc. version of UP
38 * builds. (which are NOPs on non-debug, non-preempt
39 * builds)
40 *
41 * (included on UP-non-debug builds:)
42 *
43 * linux/spinlock_api_up.h:
44 * builds the _spin_*() APIs.
45 *
46 * linux/spinlock.h: builds the final spin_*() APIs.
47 */
48
49 #include <linux/typecheck.h>
50 #include <linux/preempt.h>
51 #include <linux/linkage.h>
52 #include <linux/compiler.h>
53 #include <linux/irqflags.h>
54 #include <linux/thread_info.h>
55 #include <linux/kernel.h>
56 #include <linux/stringify.h>
57 #include <linux/bottom_half.h>
58 #include <asm/barrier.h>
59
60
61 /*
62 * Must define these before including other files, inline functions need them
63 */
64 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
65
66 #define LOCK_SECTION_START(extra) \
67 ".subsection 1\n\t" \
68 extra \
69 ".ifndef " LOCK_SECTION_NAME "\n\t" \
70 LOCK_SECTION_NAME ":\n\t" \
71 ".endif\n"
72
73 #define LOCK_SECTION_END \
74 ".previous\n\t"
75
76 #define __lockfunc __attribute__((section(".spinlock.text")))
77
78 /*
79 * Pull the arch_spinlock_t and arch_rwlock_t definitions:
80 */
81 #include <linux/spinlock_types.h>
82
83 /*
84 * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
85 */
86 #ifdef CONFIG_SMP
87 # include <asm/spinlock.h>
88 #else
89 # include <linux/spinlock_up.h>
90 #endif
91
92 #ifdef CONFIG_DEBUG_SPINLOCK
93 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
94 struct lock_class_key *key);
95 # define raw_spin_lock_init(lock) \
96 do { \
97 static struct lock_class_key __key; \
98 \
99 __raw_spin_lock_init((lock), #lock, &__key); \
100 } while (0)
101
102 #else
103 # define raw_spin_lock_init(lock) \
104 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
105 #endif
106
107 #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
108
109 #ifdef CONFIG_GENERIC_LOCKBREAK
110 #define raw_spin_is_contended(lock) ((lock)->break_lock)
111 #else
112
113 #ifdef arch_spin_is_contended
114 #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
115 #else
116 #define raw_spin_is_contended(lock) (((void)(lock), 0))
117 #endif /*arch_spin_is_contended*/
118 #endif
119
120 /*
121 * Despite its name it doesn't necessarily has to be a full barrier.
122 * It should only guarantee that a STORE before the critical section
123 * can not be reordered with LOADs and STOREs inside this section.
124 * spin_lock() is the one-way barrier, this LOAD can not escape out
125 * of the region. So the default implementation simply ensures that
126 * a STORE can not move into the critical section, smp_wmb() should
127 * serialize it with another STORE done by spin_lock().
128 */
129 #ifndef smp_mb__before_spinlock
130 #define smp_mb__before_spinlock() smp_wmb()
131 #endif
132
133 /**
134 * raw_spin_unlock_wait - wait until the spinlock gets unlocked
135 * @lock: the spinlock in question.
136 */
137 #define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock)
138
139 #ifdef CONFIG_DEBUG_SPINLOCK
140 extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
141 #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
142 extern int do_raw_spin_trylock(raw_spinlock_t *lock);
143 extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
144 #else
145 static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
146 {
147 __acquire(lock);
148 arch_spin_lock(&lock->raw_lock);
149 }
150
151 static inline void
152 do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
153 {
154 __acquire(lock);
155 arch_spin_lock_flags(&lock->raw_lock, *flags);
156 }
157
158 static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
159 {
160 return arch_spin_trylock(&(lock)->raw_lock);
161 }
162
163 static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
164 {
165 arch_spin_unlock(&lock->raw_lock);
166 __release(lock);
167 }
168 #endif
169
170 /*
171 * Define the various spin_lock methods. Note we define these
172 * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
173 * various methods are defined as nops in the case they are not
174 * required.
175 */
176 #define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
177
178 #define raw_spin_lock(lock) _raw_spin_lock(lock)
179
180 #ifdef CONFIG_DEBUG_LOCK_ALLOC
181 # define raw_spin_lock_nested(lock, subclass) \
182 _raw_spin_lock_nested(lock, subclass)
183
184 # define raw_spin_lock_nest_lock(lock, nest_lock) \
185 do { \
186 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
187 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
188 } while (0)
189 #else
190 /*
191 * Always evaluate the 'subclass' argument to avoid that the compiler
192 * warns about set-but-not-used variables when building with
193 * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
194 */
195 # define raw_spin_lock_nested(lock, subclass) \
196 _raw_spin_lock(((void)(subclass), (lock)))
197 # define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
198 #endif
199
200 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
201
202 #define raw_spin_lock_irqsave(lock, flags) \
203 do { \
204 typecheck(unsigned long, flags); \
205 flags = _raw_spin_lock_irqsave(lock); \
206 } while (0)
207
208 #ifdef CONFIG_DEBUG_LOCK_ALLOC
209 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
210 do { \
211 typecheck(unsigned long, flags); \
212 flags = _raw_spin_lock_irqsave_nested(lock, subclass); \
213 } while (0)
214 #else
215 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
216 do { \
217 typecheck(unsigned long, flags); \
218 flags = _raw_spin_lock_irqsave(lock); \
219 } while (0)
220 #endif
221
222 #else
223
224 #define raw_spin_lock_irqsave(lock, flags) \
225 do { \
226 typecheck(unsigned long, flags); \
227 _raw_spin_lock_irqsave(lock, flags); \
228 } while (0)
229
230 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
231 raw_spin_lock_irqsave(lock, flags)
232
233 #endif
234
235 #define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
236 #define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock)
237 #define raw_spin_unlock(lock) _raw_spin_unlock(lock)
238 #define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
239
240 #define raw_spin_unlock_irqrestore(lock, flags) \
241 do { \
242 typecheck(unsigned long, flags); \
243 _raw_spin_unlock_irqrestore(lock, flags); \
244 } while (0)
245 #define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
246
247 #define raw_spin_trylock_bh(lock) \
248 __cond_lock(lock, _raw_spin_trylock_bh(lock))
249
250 #define raw_spin_trylock_irq(lock) \
251 ({ \
252 local_irq_disable(); \
253 raw_spin_trylock(lock) ? \
254 1 : ({ local_irq_enable(); 0; }); \
255 })
256
257 #define raw_spin_trylock_irqsave(lock, flags) \
258 ({ \
259 local_irq_save(flags); \
260 raw_spin_trylock(lock) ? \
261 1 : ({ local_irq_restore(flags); 0; }); \
262 })
263
264 /**
265 * raw_spin_can_lock - would raw_spin_trylock() succeed?
266 * @lock: the spinlock in question.
267 */
268 #define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
269
270 /* Include rwlock functions */
271 #include <linux/rwlock.h>
272
273 /*
274 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
275 */
276 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
277 # include <linux/spinlock_api_smp.h>
278 #else
279 # include <linux/spinlock_api_up.h>
280 #endif
281
282 /*
283 * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
284 */
285
286 static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
287 {
288 return &lock->rlock;
289 }
290
291 #define spin_lock_init(_lock) \
292 do { \
293 spinlock_check(_lock); \
294 raw_spin_lock_init(&(_lock)->rlock); \
295 } while (0)
296
297 static __always_inline void spin_lock(spinlock_t *lock)
298 {
299 raw_spin_lock(&lock->rlock);
300 }
301
302 static __always_inline void spin_lock_bh(spinlock_t *lock)
303 {
304 raw_spin_lock_bh(&lock->rlock);
305 }
306
307 static __always_inline int spin_trylock(spinlock_t *lock)
308 {
309 return raw_spin_trylock(&lock->rlock);
310 }
311
312 #define spin_lock_nested(lock, subclass) \
313 do { \
314 raw_spin_lock_nested(spinlock_check(lock), subclass); \
315 } while (0)
316
317 #define spin_lock_nest_lock(lock, nest_lock) \
318 do { \
319 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
320 } while (0)
321
322 static __always_inline void spin_lock_irq(spinlock_t *lock)
323 {
324 raw_spin_lock_irq(&lock->rlock);
325 }
326
327 #define spin_lock_irqsave(lock, flags) \
328 do { \
329 raw_spin_lock_irqsave(spinlock_check(lock), flags); \
330 } while (0)
331
332 #define spin_lock_irqsave_nested(lock, flags, subclass) \
333 do { \
334 raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
335 } while (0)
336
337 static __always_inline void spin_unlock(spinlock_t *lock)
338 {
339 raw_spin_unlock(&lock->rlock);
340 }
341
342 static __always_inline void spin_unlock_bh(spinlock_t *lock)
343 {
344 raw_spin_unlock_bh(&lock->rlock);
345 }
346
347 static __always_inline void spin_unlock_irq(spinlock_t *lock)
348 {
349 raw_spin_unlock_irq(&lock->rlock);
350 }
351
352 static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
353 {
354 raw_spin_unlock_irqrestore(&lock->rlock, flags);
355 }
356
357 static __always_inline int spin_trylock_bh(spinlock_t *lock)
358 {
359 return raw_spin_trylock_bh(&lock->rlock);
360 }
361
362 static __always_inline int spin_trylock_irq(spinlock_t *lock)
363 {
364 return raw_spin_trylock_irq(&lock->rlock);
365 }
366
367 #define spin_trylock_irqsave(lock, flags) \
368 ({ \
369 raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
370 })
371
372 static __always_inline void spin_unlock_wait(spinlock_t *lock)
373 {
374 raw_spin_unlock_wait(&lock->rlock);
375 }
376
377 static __always_inline int spin_is_locked(spinlock_t *lock)
378 {
379 return raw_spin_is_locked(&lock->rlock);
380 }
381
382 static __always_inline int spin_is_contended(spinlock_t *lock)
383 {
384 return raw_spin_is_contended(&lock->rlock);
385 }
386
387 static __always_inline int spin_can_lock(spinlock_t *lock)
388 {
389 return raw_spin_can_lock(&lock->rlock);
390 }
391
392 #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
393
394 /*
395 * Pull the atomic_t declaration:
396 * (asm-mips/atomic.h needs above definitions)
397 */
398 #include <linux/atomic.h>
399 /**
400 * atomic_dec_and_lock - lock on reaching reference count zero
401 * @atomic: the atomic counter
402 * @lock: the spinlock in question
403 *
404 * Decrements @atomic by 1. If the result is 0, returns true and locks
405 * @lock. Returns false for all other cases.
406 */
407 extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
408 #define atomic_dec_and_lock(atomic, lock) \
409 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
410
411 #endif /* __LINUX_SPINLOCK_H */ 1 #ifndef _SCSI_SCSI_CMND_H
2 #define _SCSI_SCSI_CMND_H
3
4 #include <linux/dma-mapping.h>
5 #include <linux/blkdev.h>
6 #include <linux/list.h>
7 #include <linux/types.h>
8 #include <linux/timer.h>
9 #include <linux/scatterlist.h>
10 #include <scsi/scsi_device.h>
11 #include <scsi/scsi_request.h>
12
13 struct Scsi_Host;
14 struct scsi_driver;
15
16 #include <scsi/scsi_device.h>
17
18 /*
19 * MAX_COMMAND_SIZE is:
20 * The longest fixed-length SCSI CDB as per the SCSI standard.
21 * fixed-length means: commands that their size can be determined
22 * by their opcode and the CDB does not carry a length specifier, (unlike
23 * the VARIABLE_LENGTH_CMD(0x7f) command). This is actually not exactly
24 * true and the SCSI standard also defines extended commands and
25 * vendor specific commands that can be bigger than 16 bytes. The kernel
26 * will support these using the same infrastructure used for VARLEN CDB's.
27 * So in effect MAX_COMMAND_SIZE means the maximum size command scsi-ml
28 * supports without specifying a cmd_len by ULD's
29 */
30 #define MAX_COMMAND_SIZE 16
31 #if (MAX_COMMAND_SIZE > BLK_MAX_CDB)
32 # error MAX_COMMAND_SIZE can not be bigger than BLK_MAX_CDB
33 #endif
34
35 struct scsi_data_buffer {
36 struct sg_table table;
37 unsigned length;
38 int resid;
39 };
40
41 /* embedded in scsi_cmnd */
42 struct scsi_pointer {
43 char *ptr; /* data pointer */
44 int this_residual; /* left in this buffer */
45 struct scatterlist *buffer; /* which buffer */
46 int buffers_residual; /* how many buffers left */
47
48 dma_addr_t dma_handle;
49
50 volatile int Status;
51 volatile int Message;
52 volatile int have_data_in;
53 volatile int sent_command;
54 volatile int phase;
55 };
56
57 /* for scmd->flags */
58 #define SCMD_TAGGED (1 << 0)
59
60 struct scsi_cmnd {
61 struct scsi_request req;
62 struct scsi_device *device;
63 struct list_head list; /* scsi_cmnd participates in queue lists */
64 struct list_head eh_entry; /* entry for the host eh_cmd_q */
65 struct delayed_work abort_work;
66 int eh_eflags; /* Used by error handlr */
67
68 /*
69 * A SCSI Command is assigned a nonzero serial_number before passed
70 * to the driver's queue command function. The serial_number is
71 * cleared when scsi_done is entered indicating that the command
72 * has been completed. It is a bug for LLDDs to use this number
73 * for purposes other than printk (and even that is only useful
74 * for debugging).
75 */
76 unsigned long serial_number;
77
78 /*
79 * This is set to jiffies as it was when the command was first
80 * allocated. It is used to time how long the command has
81 * been outstanding
82 */
83 unsigned long jiffies_at_alloc;
84
85 int retries;
86 int allowed;
87
88 unsigned char prot_op;
89 unsigned char prot_type;
90 unsigned char prot_flags;
91
92 unsigned short cmd_len;
93 enum dma_data_direction sc_data_direction;
94
95 /* These elements define the operation we are about to perform */
96 unsigned char *cmnd;
97
98
99 /* These elements define the operation we ultimately want to perform */
100 struct scsi_data_buffer sdb;
101 struct scsi_data_buffer *prot_sdb;
102
103 unsigned underflow; /* Return error if less than
104 this amount is transferred */
105
106 unsigned transfersize; /* How much we are guaranteed to
107 transfer with each SCSI transfer
108 (ie, between disconnect /
109 reconnects. Probably == sector
110 size */
111
112 struct request *request; /* The command we are
113 working on */
114
115 #define SCSI_SENSE_BUFFERSIZE 96
116 unsigned char *sense_buffer;
117 /* obtained by REQUEST SENSE when
118 * CHECK CONDITION is received on original
119 * command (auto-sense) */
120
121 /* Low-level done function - can be used by low-level driver to point
122 * to completion function. Not used by mid/upper level code. */
123 void (*scsi_done) (struct scsi_cmnd *);
124
125 /*
126 * The following fields can be written to by the host specific code.
127 * Everything else should be left alone.
128 */
129 struct scsi_pointer SCp; /* Scratchpad used by some host adapters */
130
131 unsigned char *host_scribble; /* The host adapter is allowed to
132 * call scsi_malloc and get some memory
133 * and hang it here. The host adapter
134 * is also expected to call scsi_free
135 * to release this memory. (The memory
136 * obtained by scsi_malloc is guaranteed
137 * to be at an address < 16Mb). */
138
139 int result; /* Status code from lower level driver */
140 int flags; /* Command flags */
141
142 unsigned char tag; /* SCSI-II queued command tag */
143 };
144
145 /*
146 * Return the driver private allocation behind the command.
147 * Only works if cmd_size is set in the host template.
148 */
149 static inline void *scsi_cmd_priv(struct scsi_cmnd *cmd)
150 {
151 return cmd + 1;
152 }
153
154 /* make sure not to use it with passthrough commands */
155 static inline struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd)
156 {
157 return *(struct scsi_driver **)cmd->request->rq_disk->private_data;
158 }
159
160 extern struct scsi_cmnd *scsi_get_command(struct scsi_device *, gfp_t);
161 extern void scsi_put_command(struct scsi_cmnd *);
162 extern void scsi_finish_command(struct scsi_cmnd *cmd);
163
164 extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
165 size_t *offset, size_t *len);
166 extern void scsi_kunmap_atomic_sg(void *virt);
167
168 extern int scsi_init_io(struct scsi_cmnd *cmd);
169
170 extern int scsi_dma_map(struct scsi_cmnd *cmd);
171 extern void scsi_dma_unmap(struct scsi_cmnd *cmd);
172
173 static inline unsigned scsi_sg_count(struct scsi_cmnd *cmd)
174 {
175 return cmd->sdb.table.nents;
176 }
177
178 static inline struct scatterlist *scsi_sglist(struct scsi_cmnd *cmd)
179 {
180 return cmd->sdb.table.sgl;
181 }
182
183 static inline unsigned scsi_bufflen(struct scsi_cmnd *cmd)
184 {
185 return cmd->sdb.length;
186 }
187
188 static inline void scsi_set_resid(struct scsi_cmnd *cmd, int resid)
189 {
190 cmd->sdb.resid = resid;
191 }
192
193 static inline int scsi_get_resid(struct scsi_cmnd *cmd)
194 {
195 return cmd->sdb.resid;
196 }
197
198 #define scsi_for_each_sg(cmd, sg, nseg, __i) \
199 for_each_sg(scsi_sglist(cmd), sg, nseg, __i)
200
201 static inline int scsi_bidi_cmnd(struct scsi_cmnd *cmd)
202 {
203 return blk_bidi_rq(cmd->request) &&
204 (cmd->request->next_rq->special != NULL);
205 }
206
207 static inline struct scsi_data_buffer *scsi_in(struct scsi_cmnd *cmd)
208 {
209 return scsi_bidi_cmnd(cmd) ?
210 cmd->request->next_rq->special : &cmd->sdb;
211 }
212
213 static inline struct scsi_data_buffer *scsi_out(struct scsi_cmnd *cmd)
214 {
215 return &cmd->sdb;
216 }
217
218 static inline int scsi_sg_copy_from_buffer(struct scsi_cmnd *cmd,
219 void *buf, int buflen)
220 {
221 return sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd),
222 buf, buflen);
223 }
224
225 static inline int scsi_sg_copy_to_buffer(struct scsi_cmnd *cmd,
226 void *buf, int buflen)
227 {
228 return sg_copy_to_buffer(scsi_sglist(cmd), scsi_sg_count(cmd),
229 buf, buflen);
230 }
231
232 /*
233 * The operations below are hints that tell the controller driver how
234 * to handle I/Os with DIF or similar types of protection information.
235 */
236 enum scsi_prot_operations {
237 /* Normal I/O */
238 SCSI_PROT_NORMAL = 0,
239
240 /* OS-HBA: Protected, HBA-Target: Unprotected */
241 SCSI_PROT_READ_INSERT,
242 SCSI_PROT_WRITE_STRIP,
243
244 /* OS-HBA: Unprotected, HBA-Target: Protected */
245 SCSI_PROT_READ_STRIP,
246 SCSI_PROT_WRITE_INSERT,
247
248 /* OS-HBA: Protected, HBA-Target: Protected */
249 SCSI_PROT_READ_PASS,
250 SCSI_PROT_WRITE_PASS,
251 };
252
253 static inline void scsi_set_prot_op(struct scsi_cmnd *scmd, unsigned char op)
254 {
255 scmd->prot_op = op;
256 }
257
258 static inline unsigned char scsi_get_prot_op(struct scsi_cmnd *scmd)
259 {
260 return scmd->prot_op;
261 }
262
263 enum scsi_prot_flags {
264 SCSI_PROT_TRANSFER_PI = 1 << 0,
265 SCSI_PROT_GUARD_CHECK = 1 << 1,
266 SCSI_PROT_REF_CHECK = 1 << 2,
267 SCSI_PROT_REF_INCREMENT = 1 << 3,
268 SCSI_PROT_IP_CHECKSUM = 1 << 4,
269 };
270
271 /*
272 * The controller usually does not know anything about the target it
273 * is communicating with. However, when DIX is enabled the controller
274 * must be know target type so it can verify the protection
275 * information passed along with the I/O.
276 */
277 enum scsi_prot_target_type {
278 SCSI_PROT_DIF_TYPE0 = 0,
279 SCSI_PROT_DIF_TYPE1,
280 SCSI_PROT_DIF_TYPE2,
281 SCSI_PROT_DIF_TYPE3,
282 };
283
284 static inline void scsi_set_prot_type(struct scsi_cmnd *scmd, unsigned char type)
285 {
286 scmd->prot_type = type;
287 }
288
289 static inline unsigned char scsi_get_prot_type(struct scsi_cmnd *scmd)
290 {
291 return scmd->prot_type;
292 }
293
294 static inline sector_t scsi_get_lba(struct scsi_cmnd *scmd)
295 {
296 return blk_rq_pos(scmd->request);
297 }
298
299 static inline unsigned int scsi_prot_interval(struct scsi_cmnd *scmd)
300 {
301 return scmd->device->sector_size;
302 }
303
304 static inline u32 scsi_prot_ref_tag(struct scsi_cmnd *scmd)
305 {
306 return blk_rq_pos(scmd->request) >>
307 (ilog2(scsi_prot_interval(scmd)) - 9) & 0xffffffff;
308 }
309
310 static inline unsigned scsi_prot_sg_count(struct scsi_cmnd *cmd)
311 {
312 return cmd->prot_sdb ? cmd->prot_sdb->table.nents : 0;
313 }
314
315 static inline struct scatterlist *scsi_prot_sglist(struct scsi_cmnd *cmd)
316 {
317 return cmd->prot_sdb ? cmd->prot_sdb->table.sgl : NULL;
318 }
319
320 static inline struct scsi_data_buffer *scsi_prot(struct scsi_cmnd *cmd)
321 {
322 return cmd->prot_sdb;
323 }
324
325 #define scsi_for_each_prot_sg(cmd, sg, nseg, __i) \
326 for_each_sg(scsi_prot_sglist(cmd), sg, nseg, __i)
327
328 static inline void set_msg_byte(struct scsi_cmnd *cmd, char status)
329 {
330 cmd->result = (cmd->result & 0xffff00ff) | (status << 8);
331 }
332
333 static inline void set_host_byte(struct scsi_cmnd *cmd, char status)
334 {
335 cmd->result = (cmd->result & 0xff00ffff) | (status << 16);
336 }
337
338 static inline void set_driver_byte(struct scsi_cmnd *cmd, char status)
339 {
340 cmd->result = (cmd->result & 0x00ffffff) | (status << 24);
341 }
342
343 static inline unsigned scsi_transfer_length(struct scsi_cmnd *scmd)
344 {
345 unsigned int xfer_len = scsi_out(scmd)->length;
346 unsigned int prot_interval = scsi_prot_interval(scmd);
347
348 if (scmd->prot_flags & SCSI_PROT_TRANSFER_PI)
349 xfer_len += (xfer_len >> ilog2(prot_interval)) * 8;
350
351 return xfer_len;
352 }
353
354 #endif /* _SCSI_SCSI_CMND_H */ |
Here is an explanation of a rule violation arisen while checking your driver against a corresponding kernel.
Note that it may be false positive, i.e. there isn't a real error indeed. Please analyze a given error trace and related source code to understand whether there is an error in your driver.
Error trace column contains a path on which the given rule is violated. You can expand/collapse some entity classes by clicking on corresponding checkboxes in a main menu or in an advanced Others menu. Also you can expand/collapse each particular entity by clicking on +/-. In hovering on some entities you can see some tips. Also the error trace is bound with related source code. Line numbers may be shown as links on the left. You can click on them to open corresponding lines in source code.
Source code column contains a content of files related with the error trace. There is source code of your driver (note that there are some LDV modifications at the end), kernel headers and rule model. Tabs show a currently opened file and other available files. In hovering on them you can see full file names. On clicking a corresponding file content will be shown.
Ядро | Модуль | Правило | Верификатор | Вердикт | Статус | Время создания | Описание проблемы |
linux-4.11-rc1.tar.xz | drivers/scsi/mvumi.ko | 331_1a | CPAchecker | Bug | Fixed | 2017-04-22 02:24:00 | L0271 |
Комментарий
Reported: 22 Apr 2017
[В начало]