Bug
[В начало]
Ошибка # 156
Показать/спрятать трассу ошибок Error trace
{ 20 typedef unsigned char __u8; 23 typedef unsigned short __u16; 25 typedef int __s32; 26 typedef unsigned int __u32; 29 typedef long long __s64; 30 typedef unsigned long long __u64; 15 typedef signed char s8; 16 typedef unsigned char u8; 19 typedef unsigned short u16; 21 typedef int s32; 22 typedef unsigned int u32; 24 typedef long long s64; 25 typedef unsigned long long u64; 14 typedef long __kernel_long_t; 15 typedef unsigned long __kernel_ulong_t; 27 typedef int __kernel_pid_t; 48 typedef unsigned int __kernel_uid32_t; 49 typedef unsigned int __kernel_gid32_t; 71 typedef __kernel_ulong_t __kernel_size_t; 72 typedef __kernel_long_t __kernel_ssize_t; 87 typedef long long __kernel_loff_t; 88 typedef __kernel_long_t __kernel_time_t; 89 typedef __kernel_long_t __kernel_clock_t; 90 typedef int __kernel_timer_t; 91 typedef int __kernel_clockid_t; 32 typedef __u16 __le16; 257 struct kernel_symbol { unsigned long value; const char *name; } ; 33 struct module ; 12 typedef __u32 __kernel_dev_t; 15 typedef __kernel_dev_t dev_t; 18 typedef unsigned short umode_t; 21 typedef __kernel_pid_t pid_t; 26 typedef __kernel_clockid_t clockid_t; 29 typedef _Bool bool; 31 typedef __kernel_uid32_t uid_t; 32 typedef __kernel_gid32_t gid_t; 45 typedef __kernel_loff_t loff_t; 54 typedef __kernel_size_t size_t; 59 typedef __kernel_ssize_t ssize_t; 69 typedef __kernel_time_t time_t; 102 typedef __s32 int32_t; 108 typedef __u32 uint32_t; 133 typedef unsigned long sector_t; 134 typedef unsigned long blkcnt_t; 152 typedef u64 dma_addr_t; 157 typedef unsigned int gfp_t; 158 typedef unsigned int fmode_t; 161 typedef u64 phys_addr_t; 166 typedef phys_addr_t resource_size_t; 176 struct __anonstruct_atomic_t_6 { int counter; } ; 176 typedef struct __anonstruct_atomic_t_6 atomic_t; 181 struct __anonstruct_atomic64_t_7 { long counter; } ; 181 typedef struct __anonstruct_atomic64_t_7 atomic64_t; 182 struct list_head { struct list_head *next; struct list_head *prev; } ; 187 struct hlist_node ; 187 struct hlist_head { struct hlist_node *first; } ; 191 struct hlist_node { struct hlist_node *next; struct hlist_node **pprev; } ; 202 struct callback_head { struct callback_head *next; void (*func)(struct callback_head *); } ; 125 typedef void (*ctor_fn_t)(); 279 struct _ddebug { const char *modname; const char *function; const char *filename; const char *format; unsigned int lineno; unsigned char flags; } ; 58 struct device ; 467 struct file_operations ; 479 struct completion ; 480 struct pt_regs ; 27 union __anonunion___u_9 { struct list_head *__val; char __c[1U]; } ; 189 union __anonunion___u_13 { struct list_head *__val; char __c[1U]; } ; 556 struct bug_entry { int bug_addr_disp; int file_disp; unsigned short line; unsigned short flags; } ; 111 struct timespec ; 112 struct compat_timespec ; 113 struct __anonstruct_futex_25 { u32 *uaddr; u32 val; u32 flags; u32 bitset; u64 time; u32 *uaddr2; } ; 113 struct __anonstruct_nanosleep_26 { clockid_t clockid; struct timespec *rmtp; struct compat_timespec *compat_rmtp; u64 expires; } ; 113 struct pollfd ; 113 struct __anonstruct_poll_27 { struct pollfd *ufds; int nfds; int has_timeout; unsigned long tv_sec; unsigned long tv_nsec; } ; 113 union __anonunion____missing_field_name_24 { struct __anonstruct_futex_25 futex; struct __anonstruct_nanosleep_26 nanosleep; struct __anonstruct_poll_27 poll; } ; 113 struct restart_block { long int (*fn)(struct restart_block *); union __anonunion____missing_field_name_24 __annonCompField4; } ; 39 struct page ; 26 struct task_struct ; 27 struct mm_struct ; 288 struct pt_regs { unsigned long r15; unsigned long r14; unsigned long r13; unsigned long r12; unsigned long bp; unsigned long bx; unsigned long r11; unsigned long r10; unsigned long r9; unsigned long r8; unsigned long ax; unsigned long cx; unsigned long dx; unsigned long si; unsigned long di; unsigned long orig_ax; unsigned long ip; unsigned long cs; unsigned long flags; unsigned long sp; unsigned long ss; } ; 66 struct __anonstruct____missing_field_name_30 { unsigned int a; unsigned int b; } ; 66 struct __anonstruct____missing_field_name_31 { u16 limit0; u16 base0; unsigned char base1; unsigned char type; unsigned char s; unsigned char dpl; unsigned char p; unsigned char limit; unsigned char avl; unsigned char l; unsigned char d; unsigned char g; unsigned char base2; } ; 66 union __anonunion____missing_field_name_29 { struct __anonstruct____missing_field_name_30 __annonCompField5; struct __anonstruct____missing_field_name_31 __annonCompField6; } ; 66 struct desc_struct { union __anonunion____missing_field_name_29 __annonCompField7; } ; 13 typedef unsigned long pteval_t; 14 typedef unsigned long pmdval_t; 16 typedef unsigned long pgdval_t; 17 typedef unsigned long pgprotval_t; 19 struct __anonstruct_pte_t_32 { pteval_t pte; } ; 19 typedef struct __anonstruct_pte_t_32 pte_t; 21 struct pgprot { pgprotval_t pgprot; } ; 256 typedef struct pgprot pgprot_t; 258 struct __anonstruct_pgd_t_33 { pgdval_t pgd; } ; 258 typedef struct __anonstruct_pgd_t_33 pgd_t; 297 struct __anonstruct_pmd_t_35 { pmdval_t pmd; } ; 297 typedef struct __anonstruct_pmd_t_35 pmd_t; 423 typedef struct page *pgtable_t; 434 struct file ; 447 struct seq_file ; 483 struct thread_struct ; 485 struct cpumask ; 20 struct qspinlock { atomic_t val; } ; 33 typedef struct qspinlock arch_spinlock_t; 34 struct qrwlock { atomic_t cnts; arch_spinlock_t wait_lock; } ; 14 typedef struct qrwlock arch_rwlock_t; 247 struct math_emu_info { long ___orig_eip; struct pt_regs *regs; } ; 341 struct cpumask { unsigned long bits[128U]; } ; 15 typedef struct cpumask cpumask_t; 654 typedef struct cpumask *cpumask_var_t; 23 typedef atomic64_t atomic_long_t; 81 struct static_key { atomic_t enabled; } ; 22 struct tracepoint_func { void *func; void *data; int prio; } ; 28 struct tracepoint { const char *name; struct static_key key; void (*regfunc)(); void (*unregfunc)(); struct tracepoint_func *funcs; } ; 254 struct fregs_state { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u32 status; } ; 26 struct __anonstruct____missing_field_name_59 { u64 rip; u64 rdp; } ; 26 struct __anonstruct____missing_field_name_60 { u32 fip; u32 fcs; u32 foo; u32 fos; } ; 26 union __anonunion____missing_field_name_58 { struct __anonstruct____missing_field_name_59 __annonCompField13; struct __anonstruct____missing_field_name_60 __annonCompField14; } ; 26 union __anonunion____missing_field_name_61 { u32 padding1[12U]; u32 sw_reserved[12U]; } ; 26 struct fxregs_state { u16 cwd; u16 swd; u16 twd; u16 fop; union __anonunion____missing_field_name_58 __annonCompField15; u32 mxcsr; u32 mxcsr_mask; u32 st_space[32U]; u32 xmm_space[64U]; u32 padding[12U]; union __anonunion____missing_field_name_61 __annonCompField16; } ; 66 struct swregs_state { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u8 ftop; u8 changed; u8 lookahead; u8 no_update; u8 rm; u8 alimit; struct math_emu_info *info; u32 entry_eip; } ; 227 struct xstate_header { u64 xfeatures; u64 xcomp_bv; u64 reserved[6U]; } ; 233 struct xregs_state { struct fxregs_state i387; struct xstate_header header; u8 extended_state_area[0U]; } ; 254 union fpregs_state { struct fregs_state fsave; struct fxregs_state fxsave; struct swregs_state soft; struct xregs_state xsave; u8 __padding[4096U]; } ; 271 struct fpu { unsigned int last_cpu; unsigned char fpstate_active; unsigned char fpregs_active; unsigned char counter; union fpregs_state state; } ; 169 struct seq_operations ; 372 struct perf_event ; 377 struct __anonstruct_mm_segment_t_73 { unsigned long seg; } ; 377 typedef struct __anonstruct_mm_segment_t_73 mm_segment_t; 378 struct thread_struct { struct desc_struct tls_array[3U]; unsigned long sp0; unsigned long sp; unsigned short es; unsigned short ds; unsigned short fsindex; unsigned short gsindex; unsigned long fsbase; unsigned long gsbase; struct perf_event *ptrace_bps[4U]; unsigned long debugreg6; unsigned long ptrace_dr7; unsigned long cr2; unsigned long trap_nr; unsigned long error_code; unsigned long *io_bitmap_ptr; unsigned long iopl; unsigned int io_bitmap_max; mm_segment_t addr_limit; unsigned char sig_on_uaccess_err; unsigned char uaccess_err; struct fpu fpu; } ; 33 struct lockdep_map ; 55 struct stack_trace { unsigned int nr_entries; unsigned int max_entries; unsigned long *entries; int skip; } ; 28 struct lockdep_subclass_key { char __one_byte; } ; 53 struct lock_class_key { struct lockdep_subclass_key subkeys[8U]; } ; 59 struct lock_class { struct hlist_node hash_entry; struct list_head lock_entry; struct lockdep_subclass_key *key; unsigned int subclass; unsigned int dep_gen_id; unsigned long usage_mask; struct stack_trace usage_traces[13U]; struct list_head locks_after; struct list_head locks_before; unsigned int version; unsigned long ops; const char *name; int name_version; unsigned long contention_point[4U]; unsigned long contending_point[4U]; } ; 144 struct lockdep_map { struct lock_class_key *key; struct lock_class *class_cache[2U]; const char *name; int cpu; unsigned long ip; } ; 207 struct held_lock { u64 prev_chain_key; unsigned long acquire_ip; struct lockdep_map *instance; struct lockdep_map *nest_lock; u64 waittime_stamp; u64 holdtime_stamp; unsigned short class_idx; unsigned char irq_context; unsigned char trylock; unsigned char read; unsigned char check; unsigned char hardirqs_off; unsigned short references; unsigned int pin_count; } ; 572 struct raw_spinlock { arch_spinlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ; 32 typedef struct raw_spinlock raw_spinlock_t; 33 struct __anonstruct____missing_field_name_75 { u8 __padding[24U]; struct lockdep_map dep_map; } ; 33 union __anonunion____missing_field_name_74 { struct raw_spinlock rlock; struct __anonstruct____missing_field_name_75 __annonCompField19; } ; 33 struct spinlock { union __anonunion____missing_field_name_74 __annonCompField20; } ; 76 typedef struct spinlock spinlock_t; 23 struct __anonstruct_rwlock_t_76 { arch_rwlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ; 23 typedef struct __anonstruct_rwlock_t_76 rwlock_t; 416 struct seqcount { unsigned int sequence; struct lockdep_map dep_map; } ; 52 typedef struct seqcount seqcount_t; 407 struct __anonstruct_seqlock_t_91 { struct seqcount seqcount; spinlock_t lock; } ; 407 typedef struct __anonstruct_seqlock_t_91 seqlock_t; 601 struct timespec { __kernel_time_t tv_sec; long tv_nsec; } ; 7 typedef __s64 time64_t; 83 struct user_namespace ; 22 struct __anonstruct_kuid_t_92 { uid_t val; } ; 22 typedef struct __anonstruct_kuid_t_92 kuid_t; 27 struct __anonstruct_kgid_t_93 { gid_t val; } ; 27 typedef struct __anonstruct_kgid_t_93 kgid_t; 139 struct kstat { u64 ino; dev_t dev; umode_t mode; unsigned int nlink; kuid_t uid; kgid_t gid; dev_t rdev; loff_t size; struct timespec atime; struct timespec mtime; struct timespec ctime; unsigned long blksize; unsigned long long blocks; } ; 36 struct vm_area_struct ; 38 struct __wait_queue_head { spinlock_t lock; struct list_head task_list; } ; 43 typedef struct __wait_queue_head wait_queue_head_t; 97 struct __anonstruct_nodemask_t_94 { unsigned long bits[16U]; } ; 97 typedef struct __anonstruct_nodemask_t_94 nodemask_t; 247 typedef unsigned int isolate_mode_t; 13 struct optimistic_spin_queue { atomic_t tail; } ; 39 struct mutex { atomic_t count; spinlock_t wait_lock; struct list_head wait_list; struct task_struct *owner; void *magic; struct lockdep_map dep_map; } ; 67 struct mutex_waiter { struct list_head list; struct task_struct *task; void *magic; } ; 177 struct rw_semaphore ; 178 struct rw_semaphore { atomic_long_t count; struct list_head wait_list; raw_spinlock_t wait_lock; struct optimistic_spin_queue osq; struct task_struct *owner; struct lockdep_map dep_map; } ; 178 struct completion { unsigned int done; wait_queue_head_t wait; } ; 446 union ktime { s64 tv64; } ; 41 typedef union ktime ktime_t; 1144 struct timer_list { struct hlist_node entry; unsigned long expires; void (*function)(unsigned long); unsigned long data; u32 flags; int start_pid; void *start_site; char start_comm[16U]; struct lockdep_map lockdep_map; } ; 254 struct hrtimer ; 255 enum hrtimer_restart ; 256 struct rb_node { unsigned long __rb_parent_color; struct rb_node *rb_right; struct rb_node *rb_left; } ; 41 struct rb_root { struct rb_node *rb_node; } ; 835 struct nsproxy ; 278 struct workqueue_struct ; 279 struct work_struct ; 54 struct work_struct { atomic_long_t data; struct list_head entry; void (*func)(struct work_struct *); struct lockdep_map lockdep_map; } ; 107 struct delayed_work { struct work_struct work; struct timer_list timer; struct workqueue_struct *wq; int cpu; } ; 217 struct resource ; 64 struct resource { resource_size_t start; resource_size_t end; const char *name; unsigned long flags; unsigned long desc; struct resource *parent; struct resource *sibling; struct resource *child; } ; 58 struct pm_message { int event; } ; 64 typedef struct pm_message pm_message_t; 65 struct dev_pm_ops { int (*prepare)(struct device *); void (*complete)(struct device *); int (*suspend)(struct device *); int (*resume)(struct device *); int (*freeze)(struct device *); int (*thaw)(struct device *); int (*poweroff)(struct device *); int (*restore)(struct device *); int (*suspend_late)(struct device *); int (*resume_early)(struct device *); int (*freeze_late)(struct device *); int (*thaw_early)(struct device *); int (*poweroff_late)(struct device *); int (*restore_early)(struct device *); int (*suspend_noirq)(struct device *); int (*resume_noirq)(struct device *); int (*freeze_noirq)(struct device *); int (*thaw_noirq)(struct device *); int (*poweroff_noirq)(struct device *); int (*restore_noirq)(struct device *); int (*runtime_suspend)(struct device *); int (*runtime_resume)(struct device *); int (*runtime_idle)(struct device *); } ; 320 enum rpm_status { RPM_ACTIVE = 0, RPM_RESUMING = 1, RPM_SUSPENDED = 2, RPM_SUSPENDING = 3 } ; 327 enum rpm_request { RPM_REQ_NONE = 0, RPM_REQ_IDLE = 1, RPM_REQ_SUSPEND = 2, RPM_REQ_AUTOSUSPEND = 3, RPM_REQ_RESUME = 4 } ; 335 struct wakeup_source ; 336 struct wake_irq ; 337 struct pm_domain_data ; 338 struct pm_subsys_data { spinlock_t lock; unsigned int refcount; struct list_head clock_list; struct pm_domain_data *domain_data; } ; 556 struct dev_pm_qos ; 556 struct dev_pm_info { pm_message_t power_state; unsigned char can_wakeup; unsigned char async_suspend; bool is_prepared; bool is_suspended; bool is_noirq_suspended; bool is_late_suspended; bool early_init; bool direct_complete; spinlock_t lock; struct list_head entry; struct completion completion; struct wakeup_source *wakeup; bool wakeup_path; bool syscore; bool no_pm_callbacks; struct timer_list suspend_timer; unsigned long timer_expires; struct work_struct work; wait_queue_head_t wait_queue; struct wake_irq *wakeirq; atomic_t usage_count; atomic_t child_count; unsigned char disable_depth; unsigned char idle_notification; unsigned char request_pending; unsigned char deferred_resume; unsigned char run_wake; unsigned char runtime_auto; bool ignore_children; unsigned char no_callbacks; unsigned char irq_safe; unsigned char use_autosuspend; unsigned char timer_autosuspends; unsigned char memalloc_noio; enum rpm_request request; enum rpm_status runtime_status; int runtime_error; int autosuspend_delay; unsigned long last_busy; unsigned long active_jiffies; unsigned long suspended_jiffies; unsigned long accounting_timestamp; struct pm_subsys_data *subsys_data; void (*set_latency_tolerance)(struct device *, s32 ); struct dev_pm_qos *qos; } ; 616 struct dev_pm_domain { struct dev_pm_ops ops; void (*detach)(struct device *, bool ); int (*activate)(struct device *); void (*sync)(struct device *); void (*dismiss)(struct device *); } ; 26 struct ldt_struct ; 26 struct vdso_image ; 26 struct __anonstruct_mm_context_t_165 { struct ldt_struct *ldt; unsigned short ia32_compat; struct mutex lock; void *vdso; const struct vdso_image *vdso_image; atomic_t perf_rdpmc_allowed; } ; 26 typedef struct __anonstruct_mm_context_t_165 mm_context_t; 1276 struct llist_node ; 64 struct llist_node { struct llist_node *next; } ; 37 struct cred ; 19 struct inode ; 58 struct arch_uprobe_task { unsigned long saved_scratch_register; unsigned int saved_trap_nr; unsigned int saved_tf; } ; 66 enum uprobe_task_state { UTASK_RUNNING = 0, UTASK_SSTEP = 1, UTASK_SSTEP_ACK = 2, UTASK_SSTEP_TRAPPED = 3 } ; 73 struct __anonstruct____missing_field_name_211 { struct arch_uprobe_task autask; unsigned long vaddr; } ; 73 struct __anonstruct____missing_field_name_212 { struct callback_head dup_xol_work; unsigned long dup_xol_addr; } ; 73 union __anonunion____missing_field_name_210 { struct __anonstruct____missing_field_name_211 __annonCompField35; struct __anonstruct____missing_field_name_212 __annonCompField36; } ; 73 struct uprobe ; 73 struct return_instance ; 73 struct uprobe_task { enum uprobe_task_state state; union __anonunion____missing_field_name_210 __annonCompField37; struct uprobe *active_uprobe; unsigned long xol_vaddr; struct return_instance *return_instances; unsigned int depth; } ; 94 struct return_instance { struct uprobe *uprobe; unsigned long func; unsigned long stack; unsigned long orig_ret_vaddr; bool chained; struct return_instance *next; } ; 110 struct xol_area ; 111 struct uprobes_state { struct xol_area *xol_area; } ; 150 struct address_space ; 151 struct mem_cgroup ; 152 union __anonunion____missing_field_name_213 { struct address_space *mapping; void *s_mem; atomic_t compound_mapcount; } ; 152 union __anonunion____missing_field_name_214 { unsigned long index; void *freelist; } ; 152 struct __anonstruct____missing_field_name_218 { unsigned short inuse; unsigned short objects; unsigned char frozen; } ; 152 union __anonunion____missing_field_name_217 { atomic_t _mapcount; unsigned int active; struct __anonstruct____missing_field_name_218 __annonCompField40; int units; } ; 152 struct __anonstruct____missing_field_name_216 { union __anonunion____missing_field_name_217 __annonCompField41; atomic_t _refcount; } ; 152 union __anonunion____missing_field_name_215 { unsigned long counters; struct __anonstruct____missing_field_name_216 __annonCompField42; } ; 152 struct dev_pagemap ; 152 struct __anonstruct____missing_field_name_220 { struct page *next; int pages; int pobjects; } ; 152 struct __anonstruct____missing_field_name_221 { unsigned long compound_head; unsigned int compound_dtor; unsigned int compound_order; } ; 152 struct __anonstruct____missing_field_name_222 { unsigned long __pad; pgtable_t pmd_huge_pte; } ; 152 union __anonunion____missing_field_name_219 { struct list_head lru; struct dev_pagemap *pgmap; struct __anonstruct____missing_field_name_220 __annonCompField44; struct callback_head callback_head; struct __anonstruct____missing_field_name_221 __annonCompField45; struct __anonstruct____missing_field_name_222 __annonCompField46; } ; 152 struct kmem_cache ; 152 union __anonunion____missing_field_name_223 { unsigned long private; spinlock_t *ptl; struct kmem_cache *slab_cache; } ; 152 struct page { unsigned long flags; union __anonunion____missing_field_name_213 __annonCompField38; union __anonunion____missing_field_name_214 __annonCompField39; union __anonunion____missing_field_name_215 __annonCompField43; union __anonunion____missing_field_name_219 __annonCompField47; union __anonunion____missing_field_name_223 __annonCompField48; struct mem_cgroup *mem_cgroup; } ; 197 struct page_frag { struct page *page; __u32 offset; __u32 size; } ; 282 struct userfaultfd_ctx ; 282 struct vm_userfaultfd_ctx { struct userfaultfd_ctx *ctx; } ; 289 struct __anonstruct_shared_224 { struct rb_node rb; unsigned long rb_subtree_last; } ; 289 struct anon_vma ; 289 struct vm_operations_struct ; 289 struct mempolicy ; 289 struct vm_area_struct { unsigned long vm_start; unsigned long vm_end; struct vm_area_struct *vm_next; struct vm_area_struct *vm_prev; struct rb_node vm_rb; unsigned long rb_subtree_gap; struct mm_struct *vm_mm; pgprot_t vm_page_prot; unsigned long vm_flags; struct __anonstruct_shared_224 shared; struct list_head anon_vma_chain; struct anon_vma *anon_vma; const struct vm_operations_struct *vm_ops; unsigned long vm_pgoff; struct file *vm_file; void *vm_private_data; struct mempolicy *vm_policy; struct vm_userfaultfd_ctx vm_userfaultfd_ctx; } ; 362 struct core_thread { struct task_struct *task; struct core_thread *next; } ; 367 struct core_state { atomic_t nr_threads; struct core_thread dumper; struct completion startup; } ; 381 struct task_rss_stat { int events; int count[4U]; } ; 389 struct mm_rss_stat { atomic_long_t count[4U]; } ; 394 struct kioctx_table ; 395 struct linux_binfmt ; 395 struct mmu_notifier_mm ; 395 struct mm_struct { struct vm_area_struct *mmap; struct rb_root mm_rb; u32 vmacache_seqnum; unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); unsigned long mmap_base; unsigned long mmap_legacy_base; unsigned long task_size; unsigned long highest_vm_end; pgd_t *pgd; atomic_t mm_users; atomic_t mm_count; atomic_long_t nr_ptes; atomic_long_t nr_pmds; int map_count; spinlock_t page_table_lock; struct rw_semaphore mmap_sem; struct list_head mmlist; unsigned long hiwater_rss; unsigned long hiwater_vm; unsigned long total_vm; unsigned long locked_vm; unsigned long pinned_vm; unsigned long data_vm; unsigned long exec_vm; unsigned long stack_vm; unsigned long def_flags; unsigned long start_code; unsigned long end_code; unsigned long start_data; unsigned long end_data; unsigned long start_brk; unsigned long brk; unsigned long start_stack; unsigned long arg_start; unsigned long arg_end; unsigned long env_start; unsigned long env_end; unsigned long saved_auxv[46U]; struct mm_rss_stat rss_stat; struct linux_binfmt *binfmt; cpumask_var_t cpu_vm_mask_var; mm_context_t context; unsigned long flags; struct core_state *core_state; spinlock_t ioctx_lock; struct kioctx_table *ioctx_table; struct task_struct *owner; struct file *exe_file; struct mmu_notifier_mm *mmu_notifier_mm; struct cpumask cpumask_allocation; unsigned long numa_next_scan; unsigned long numa_scan_offset; int numa_scan_seq; bool tlb_flush_pending; struct uprobes_state uprobes_state; void *bd_addr; atomic_long_t hugetlb_usage; struct work_struct async_put_work; } ; 565 struct vm_fault ; 619 struct vdso_image { void *data; unsigned long size; unsigned long alt; unsigned long alt_len; long sym_vvar_start; long sym_vvar_page; long sym_hpet_page; long sym_pvclock_page; long sym_VDSO32_NOTE_MASK; long sym___kernel_sigreturn; long sym___kernel_rt_sigreturn; long sym___kernel_vsyscall; long sym_int80_landing_pad; } ; 15 typedef __u64 Elf64_Addr; 16 typedef __u16 Elf64_Half; 18 typedef __u64 Elf64_Off; 20 typedef __u32 Elf64_Word; 21 typedef __u64 Elf64_Xword; 190 struct elf64_sym { Elf64_Word st_name; unsigned char st_info; unsigned char st_other; Elf64_Half st_shndx; Elf64_Addr st_value; Elf64_Xword st_size; } ; 198 typedef struct elf64_sym Elf64_Sym; 219 struct elf64_hdr { unsigned char e_ident[16U]; Elf64_Half e_type; Elf64_Half e_machine; Elf64_Word e_version; Elf64_Addr e_entry; Elf64_Off e_phoff; Elf64_Off e_shoff; Elf64_Word e_flags; Elf64_Half e_ehsize; Elf64_Half e_phentsize; Elf64_Half e_phnum; Elf64_Half e_shentsize; Elf64_Half e_shnum; Elf64_Half e_shstrndx; } ; 235 typedef struct elf64_hdr Elf64_Ehdr; 314 struct elf64_shdr { Elf64_Word sh_name; Elf64_Word sh_type; Elf64_Xword sh_flags; Elf64_Addr sh_addr; Elf64_Off sh_offset; Elf64_Xword sh_size; Elf64_Word sh_link; Elf64_Word sh_info; Elf64_Xword sh_addralign; Elf64_Xword sh_entsize; } ; 326 typedef struct elf64_shdr Elf64_Shdr; 53 union __anonunion____missing_field_name_229 { unsigned long bitmap[4U]; struct callback_head callback_head; } ; 53 struct idr_layer { int prefix; int layer; struct idr_layer *ary[256U]; int count; union __anonunion____missing_field_name_229 __annonCompField49; } ; 41 struct idr { struct idr_layer *hint; struct idr_layer *top; int layers; int cur; spinlock_t lock; int id_free_cnt; struct idr_layer *id_free; } ; 124 struct ida_bitmap { long nr_busy; unsigned long bitmap[15U]; } ; 167 struct ida { struct idr idr; struct ida_bitmap *free_bitmap; } ; 199 struct dentry ; 200 struct iattr ; 201 struct super_block ; 202 struct file_system_type ; 203 struct kernfs_open_node ; 204 struct kernfs_iattrs ; 227 struct kernfs_root ; 227 struct kernfs_elem_dir { unsigned long subdirs; struct rb_root children; struct kernfs_root *root; } ; 85 struct kernfs_node ; 85 struct kernfs_elem_symlink { struct kernfs_node *target_kn; } ; 89 struct kernfs_ops ; 89 struct kernfs_elem_attr { const struct kernfs_ops *ops; struct kernfs_open_node *open; loff_t size; struct kernfs_node *notify_next; } ; 96 union __anonunion____missing_field_name_234 { struct kernfs_elem_dir dir; struct kernfs_elem_symlink symlink; struct kernfs_elem_attr attr; } ; 96 struct kernfs_node { atomic_t count; atomic_t active; struct lockdep_map dep_map; struct kernfs_node *parent; const char *name; struct rb_node rb; const void *ns; unsigned int hash; union __anonunion____missing_field_name_234 __annonCompField50; void *priv; unsigned short flags; umode_t mode; unsigned int ino; struct kernfs_iattrs *iattr; } ; 138 struct kernfs_syscall_ops { int (*remount_fs)(struct kernfs_root *, int *, char *); int (*show_options)(struct seq_file *, struct kernfs_root *); int (*mkdir)(struct kernfs_node *, const char *, umode_t ); int (*rmdir)(struct kernfs_node *); int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *); int (*show_path)(struct seq_file *, struct kernfs_node *, struct kernfs_root *); } ; 157 struct kernfs_root { struct kernfs_node *kn; unsigned int flags; struct ida ino_ida; struct kernfs_syscall_ops *syscall_ops; struct list_head supers; wait_queue_head_t deactivate_waitq; } ; 173 struct kernfs_open_file { struct kernfs_node *kn; struct file *file; void *priv; struct mutex mutex; struct mutex prealloc_mutex; int event; struct list_head list; char *prealloc_buf; size_t atomic_write_len; bool mmapped; const struct vm_operations_struct *vm_ops; } ; 191 struct kernfs_ops { int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); ssize_t (*read)(struct kernfs_open_file *, char *, size_t , loff_t ); size_t atomic_write_len; bool prealloc; ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *); struct lock_class_key lockdep_key; } ; 499 struct sock ; 500 struct kobject ; 501 enum kobj_ns_type { KOBJ_NS_TYPE_NONE = 0, KOBJ_NS_TYPE_NET = 1, KOBJ_NS_TYPES = 2 } ; 507 struct kobj_ns_type_operations { enum kobj_ns_type type; bool (*current_may_mount)(); void * (*grab_current_ns)(); const void * (*netlink_ns)(struct sock *); const void * (*initial_ns)(); void (*drop_ns)(void *); } ; 59 struct bin_attribute ; 60 struct attribute { const char *name; umode_t mode; bool ignore_lockdep; struct lock_class_key *key; struct lock_class_key skey; } ; 37 struct attribute_group { const char *name; umode_t (*is_visible)(struct kobject *, struct attribute *, int); umode_t (*is_bin_visible)(struct kobject *, struct bin_attribute *, int); struct attribute **attrs; struct bin_attribute **bin_attrs; } ; 92 struct bin_attribute { struct attribute attr; size_t size; void *private; ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); int (*mmap)(struct file *, struct kobject *, struct bin_attribute *, struct vm_area_struct *); } ; 165 struct sysfs_ops { ssize_t (*show)(struct kobject *, struct attribute *, char *); ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t ); } ; 530 struct kref { atomic_t refcount; } ; 52 struct kset ; 52 struct kobj_type ; 52 struct kobject { const char *name; struct list_head entry; struct kobject *parent; struct kset *kset; struct kobj_type *ktype; struct kernfs_node *sd; struct kref kref; struct delayed_work release; unsigned char state_initialized; unsigned char state_in_sysfs; unsigned char state_add_uevent_sent; unsigned char state_remove_uevent_sent; unsigned char uevent_suppress; } ; 115 struct kobj_type { void (*release)(struct kobject *); const struct sysfs_ops *sysfs_ops; struct attribute **default_attrs; const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject *); const void * (*namespace)(struct kobject *); } ; 123 struct kobj_uevent_env { char *argv[3U]; char *envp[32U]; int envp_idx; char buf[2048U]; int buflen; } ; 131 struct kset_uevent_ops { const int (*filter)(struct kset *, struct kobject *); const const char * (*name)(struct kset *, struct kobject *); const int (*uevent)(struct kset *, struct kobject *, struct kobj_uevent_env *); } ; 148 struct kset { struct list_head list; spinlock_t list_lock; struct kobject kobj; const struct kset_uevent_ops *uevent_ops; } ; 223 struct kernel_param ; 228 struct kernel_param_ops { unsigned int flags; int (*set)(const char *, const struct kernel_param *); int (*get)(char *, const struct kernel_param *); void (*free)(void *); } ; 62 struct kparam_string ; 62 struct kparam_array ; 62 union __anonunion____missing_field_name_237 { void *arg; const struct kparam_string *str; const struct kparam_array *arr; } ; 62 struct kernel_param { const char *name; struct module *mod; const struct kernel_param_ops *ops; const u16 perm; s8 level; u8 flags; union __anonunion____missing_field_name_237 __annonCompField51; } ; 83 struct kparam_string { unsigned int maxlen; char *string; } ; 89 struct kparam_array { unsigned int max; unsigned int elemsize; unsigned int *num; const struct kernel_param_ops *ops; void *elem; } ; 470 struct exception_table_entry ; 24 struct latch_tree_node { struct rb_node node[2U]; } ; 211 struct mod_arch_specific { } ; 39 struct module_param_attrs ; 39 struct module_kobject { struct kobject kobj; struct module *mod; struct kobject *drivers_dir; struct module_param_attrs *mp; struct completion *kobj_completion; } ; 50 struct module_attribute { struct attribute attr; ssize_t (*show)(struct module_attribute *, struct module_kobject *, char *); ssize_t (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t ); void (*setup)(struct module *, const char *); int (*test)(struct module *); void (*free)(struct module *); } ; 277 enum module_state { MODULE_STATE_LIVE = 0, MODULE_STATE_COMING = 1, MODULE_STATE_GOING = 2, MODULE_STATE_UNFORMED = 3 } ; 284 struct mod_tree_node { struct module *mod; struct latch_tree_node node; } ; 291 struct module_layout { void *base; unsigned int size; unsigned int text_size; unsigned int ro_size; unsigned int ro_after_init_size; struct mod_tree_node mtn; } ; 307 struct mod_kallsyms { Elf64_Sym *symtab; unsigned int num_symtab; char *strtab; } ; 321 struct klp_modinfo { Elf64_Ehdr hdr; Elf64_Shdr *sechdrs; char *secstrings; unsigned int symndx; } ; 329 struct module_sect_attrs ; 329 struct module_notes_attrs ; 329 struct trace_event_call ; 329 struct trace_enum_map ; 329 struct module { enum module_state state; struct list_head list; char name[56U]; struct module_kobject mkobj; struct module_attribute *modinfo_attrs; const char *version; const char *srcversion; struct kobject *holders_dir; const struct kernel_symbol *syms; const unsigned long *crcs; unsigned int num_syms; struct mutex param_lock; struct kernel_param *kp; unsigned int num_kp; unsigned int num_gpl_syms; const struct kernel_symbol *gpl_syms; const unsigned long *gpl_crcs; const struct kernel_symbol *unused_syms; const unsigned long *unused_crcs; unsigned int num_unused_syms; unsigned int num_unused_gpl_syms; const struct kernel_symbol *unused_gpl_syms; const unsigned long *unused_gpl_crcs; bool sig_ok; bool async_probe_requested; const struct kernel_symbol *gpl_future_syms; const unsigned long *gpl_future_crcs; unsigned int num_gpl_future_syms; unsigned int num_exentries; struct exception_table_entry *extable; int (*init)(); struct module_layout core_layout; struct module_layout init_layout; struct mod_arch_specific arch; unsigned int taints; unsigned int num_bugs; struct list_head bug_list; struct bug_entry *bug_table; struct mod_kallsyms *kallsyms; struct mod_kallsyms core_kallsyms; struct module_sect_attrs *sect_attrs; struct module_notes_attrs *notes_attrs; char *args; void *percpu; unsigned int percpu_size; unsigned int num_tracepoints; const struct tracepoint **tracepoints_ptrs; unsigned int num_trace_bprintk_fmt; const char **trace_bprintk_fmt_start; struct trace_event_call **trace_events; unsigned int num_trace_events; struct trace_enum_map **trace_enums; unsigned int num_trace_enums; bool klp; bool klp_alive; struct klp_modinfo *klp_info; struct list_head source_list; struct list_head target_list; void (*exit)(); atomic_t refcnt; ctor_fn_t (**ctors)(); unsigned int num_ctors; } ; 799 struct klist_node ; 37 struct klist_node { void *n_klist; struct list_head n_node; struct kref n_ref; } ; 93 struct hlist_bl_node ; 93 struct hlist_bl_head { struct hlist_bl_node *first; } ; 36 struct hlist_bl_node { struct hlist_bl_node *next; struct hlist_bl_node **pprev; } ; 114 struct __anonstruct____missing_field_name_287 { spinlock_t lock; int count; } ; 114 union __anonunion____missing_field_name_286 { struct __anonstruct____missing_field_name_287 __annonCompField52; } ; 114 struct lockref { union __anonunion____missing_field_name_286 __annonCompField53; } ; 77 struct path ; 78 struct vfsmount ; 79 struct __anonstruct____missing_field_name_289 { u32 hash; u32 len; } ; 79 union __anonunion____missing_field_name_288 { struct __anonstruct____missing_field_name_289 __annonCompField54; u64 hash_len; } ; 79 struct qstr { union __anonunion____missing_field_name_288 __annonCompField55; const unsigned char *name; } ; 65 struct dentry_operations ; 65 union __anonunion____missing_field_name_290 { struct list_head d_lru; wait_queue_head_t *d_wait; } ; 65 union __anonunion_d_u_291 { struct hlist_node d_alias; struct hlist_bl_node d_in_lookup_hash; struct callback_head d_rcu; } ; 65 struct dentry { unsigned int d_flags; seqcount_t d_seq; struct hlist_bl_node d_hash; struct dentry *d_parent; struct qstr d_name; struct inode *d_inode; unsigned char d_iname[32U]; struct lockref d_lockref; const struct dentry_operations *d_op; struct super_block *d_sb; unsigned long d_time; void *d_fsdata; union __anonunion____missing_field_name_290 __annonCompField56; struct list_head d_child; struct list_head d_subdirs; union __anonunion_d_u_291 d_u; } ; 121 struct dentry_operations { int (*d_revalidate)(struct dentry *, unsigned int); int (*d_weak_revalidate)(struct dentry *, unsigned int); int (*d_hash)(const struct dentry *, struct qstr *); int (*d_compare)(const struct dentry *, unsigned int, const char *, const struct qstr *); int (*d_delete)(const struct dentry *); int (*d_init)(struct dentry *); void (*d_release)(struct dentry *); void (*d_prune)(struct dentry *); void (*d_iput)(struct dentry *, struct inode *); char * (*d_dname)(struct dentry *, char *, int); struct vfsmount * (*d_automount)(struct path *); int (*d_manage)(struct dentry *, bool ); struct dentry * (*d_real)(struct dentry *, const struct inode *, unsigned int); } ; 591 struct path { struct vfsmount *mnt; struct dentry *dentry; } ; 19 struct shrink_control { gfp_t gfp_mask; unsigned long nr_to_scan; int nid; struct mem_cgroup *memcg; } ; 27 struct shrinker { unsigned long int (*count_objects)(struct shrinker *, struct shrink_control *); unsigned long int (*scan_objects)(struct shrinker *, struct shrink_control *); int seeks; long batch; unsigned long flags; struct list_head list; atomic_long_t *nr_deferred; } ; 80 struct list_lru_one { struct list_head list; long nr_items; } ; 32 struct list_lru_memcg { struct list_lru_one *lru[0U]; } ; 37 struct list_lru_node { spinlock_t lock; struct list_lru_one lru; struct list_lru_memcg *memcg_lrus; } ; 47 struct list_lru { struct list_lru_node *node; struct list_head list; } ; 63 struct __anonstruct____missing_field_name_293 { struct radix_tree_node *parent; void *private_data; } ; 63 union __anonunion____missing_field_name_292 { struct __anonstruct____missing_field_name_293 __annonCompField57; struct callback_head callback_head; } ; 63 struct radix_tree_node { unsigned char shift; unsigned char offset; unsigned int count; union __anonunion____missing_field_name_292 __annonCompField58; struct list_head private_list; void *slots[64U]; unsigned long tags[3U][1U]; } ; 106 struct radix_tree_root { gfp_t gfp_mask; struct radix_tree_node *rnode; } ; 523 enum pid_type { PIDTYPE_PID = 0, PIDTYPE_PGID = 1, PIDTYPE_SID = 2, PIDTYPE_MAX = 3 } ; 530 struct pid_namespace ; 530 struct upid { int nr; struct pid_namespace *ns; struct hlist_node pid_chain; } ; 56 struct pid { atomic_t count; unsigned int level; struct hlist_head tasks[3U]; struct callback_head rcu; struct upid numbers[1U]; } ; 68 struct pid_link { struct hlist_node node; struct pid *pid; } ; 22 struct kernel_cap_struct { __u32 cap[2U]; } ; 25 typedef struct kernel_cap_struct kernel_cap_t; 45 struct fiemap_extent { __u64 fe_logical; __u64 fe_physical; __u64 fe_length; __u64 fe_reserved64[2U]; __u32 fe_flags; __u32 fe_reserved[3U]; } ; 38 enum migrate_mode { MIGRATE_ASYNC = 0, MIGRATE_SYNC_LIGHT = 1, MIGRATE_SYNC = 2 } ; 44 enum rcu_sync_type { RCU_SYNC = 0, RCU_SCHED_SYNC = 1, RCU_BH_SYNC = 2 } ; 50 struct rcu_sync { int gp_state; int gp_count; wait_queue_head_t gp_wait; int cb_state; struct callback_head cb_head; enum rcu_sync_type gp_type; } ; 65 struct percpu_rw_semaphore { struct rcu_sync rss; unsigned int *fast_read_ctr; struct rw_semaphore rw_sem; atomic_t slow_read_ctr; wait_queue_head_t write_waitq; } ; 87 struct block_device ; 88 struct io_context ; 89 struct cgroup_subsys_state ; 266 struct delayed_call { void (*fn)(void *); void *arg; } ; 261 struct backing_dev_info ; 262 struct bdi_writeback ; 263 struct export_operations ; 266 struct kiocb ; 267 struct pipe_inode_info ; 268 struct poll_table_struct ; 269 struct kstatfs ; 270 struct swap_info_struct ; 271 struct iov_iter ; 272 struct fscrypt_info ; 273 struct fscrypt_operations ; 76 struct iattr { unsigned int ia_valid; umode_t ia_mode; kuid_t ia_uid; kgid_t ia_gid; loff_t ia_size; struct timespec ia_atime; struct timespec ia_mtime; struct timespec ia_ctime; struct file *ia_file; } ; 213 struct dquot ; 214 struct kqid ; 19 typedef __kernel_uid32_t projid_t; 23 struct __anonstruct_kprojid_t_302 { projid_t val; } ; 23 typedef struct __anonstruct_kprojid_t_302 kprojid_t; 181 enum quota_type { USRQUOTA = 0, GRPQUOTA = 1, PRJQUOTA = 2 } ; 66 typedef long long qsize_t; 67 union __anonunion____missing_field_name_303 { kuid_t uid; kgid_t gid; kprojid_t projid; } ; 67 struct kqid { union __anonunion____missing_field_name_303 __annonCompField60; enum quota_type type; } ; 194 struct mem_dqblk { qsize_t dqb_bhardlimit; qsize_t dqb_bsoftlimit; qsize_t dqb_curspace; qsize_t dqb_rsvspace; qsize_t dqb_ihardlimit; qsize_t dqb_isoftlimit; qsize_t dqb_curinodes; time64_t dqb_btime; time64_t dqb_itime; } ; 216 struct quota_format_type ; 217 struct mem_dqinfo { struct quota_format_type *dqi_format; int dqi_fmt_id; struct list_head dqi_dirty_list; unsigned long dqi_flags; unsigned int dqi_bgrace; unsigned int dqi_igrace; qsize_t dqi_max_spc_limit; qsize_t dqi_max_ino_limit; void *dqi_priv; } ; 282 struct dquot { struct hlist_node dq_hash; struct list_head dq_inuse; struct list_head dq_free; struct list_head dq_dirty; struct mutex dq_lock; atomic_t dq_count; wait_queue_head_t dq_wait_unused; struct super_block *dq_sb; struct kqid dq_id; loff_t dq_off; unsigned long dq_flags; struct mem_dqblk dq_dqb; } ; 309 struct quota_format_ops { int (*check_quota_file)(struct super_block *, int); int (*read_file_info)(struct super_block *, int); int (*write_file_info)(struct super_block *, int); int (*free_file_info)(struct super_block *, int); int (*read_dqblk)(struct dquot *); int (*commit_dqblk)(struct dquot *); int (*release_dqblk)(struct dquot *); int (*get_next_id)(struct super_block *, struct kqid *); } ; 321 struct dquot_operations { int (*write_dquot)(struct dquot *); struct dquot * (*alloc_dquot)(struct super_block *, int); void (*destroy_dquot)(struct dquot *); int (*acquire_dquot)(struct dquot *); int (*release_dquot)(struct dquot *); int (*mark_dirty)(struct dquot *); int (*write_info)(struct super_block *, int); qsize_t * (*get_reserved_space)(struct inode *); int (*get_projid)(struct inode *, kprojid_t *); int (*get_next_id)(struct super_block *, struct kqid *); } ; 338 struct qc_dqblk { int d_fieldmask; u64 d_spc_hardlimit; u64 d_spc_softlimit; u64 d_ino_hardlimit; u64 d_ino_softlimit; u64 d_space; u64 d_ino_count; s64 d_ino_timer; s64 d_spc_timer; int d_ino_warns; int d_spc_warns; u64 d_rt_spc_hardlimit; u64 d_rt_spc_softlimit; u64 d_rt_space; s64 d_rt_spc_timer; int d_rt_spc_warns; } ; 361 struct qc_type_state { unsigned int flags; unsigned int spc_timelimit; unsigned int ino_timelimit; unsigned int rt_spc_timelimit; unsigned int spc_warnlimit; unsigned int ino_warnlimit; unsigned int rt_spc_warnlimit; unsigned long long ino; blkcnt_t blocks; blkcnt_t nextents; } ; 407 struct qc_state { unsigned int s_incoredqs; struct qc_type_state s_state[3U]; } ; 418 struct qc_info { int i_fieldmask; unsigned int i_flags; unsigned int i_spc_timelimit; unsigned int i_ino_timelimit; unsigned int i_rt_spc_timelimit; unsigned int i_spc_warnlimit; unsigned int i_ino_warnlimit; unsigned int i_rt_spc_warnlimit; } ; 431 struct quotactl_ops { int (*quota_on)(struct super_block *, int, int, struct path *); int (*quota_off)(struct super_block *, int); int (*quota_enable)(struct super_block *, unsigned int); int (*quota_disable)(struct super_block *, unsigned int); int (*quota_sync)(struct super_block *, int); int (*set_info)(struct super_block *, int, struct qc_info *); int (*get_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *); int (*get_nextdqblk)(struct super_block *, struct kqid *, struct qc_dqblk *); int (*set_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *); int (*get_state)(struct super_block *, struct qc_state *); int (*rm_xquota)(struct super_block *, unsigned int); } ; 447 struct quota_format_type { int qf_fmt_id; const struct quota_format_ops *qf_ops; struct module *qf_owner; struct quota_format_type *qf_next; } ; 511 struct quota_info { unsigned int flags; struct mutex dqio_mutex; struct mutex dqonoff_mutex; struct inode *files[3U]; struct mem_dqinfo info[3U]; const struct quota_format_ops *ops[3U]; } ; 541 struct writeback_control ; 542 struct kiocb { struct file *ki_filp; loff_t ki_pos; void (*ki_complete)(struct kiocb *, long, long); void *private; int ki_flags; } ; 367 struct address_space_operations { int (*writepage)(struct page *, struct writeback_control *); int (*readpage)(struct file *, struct page *); int (*writepages)(struct address_space *, struct writeback_control *); int (*set_page_dirty)(struct page *); int (*readpages)(struct file *, struct address_space *, struct list_head *, unsigned int); int (*write_begin)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page **, void **); int (*write_end)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page *, void *); sector_t (*bmap)(struct address_space *, sector_t ); void (*invalidatepage)(struct page *, unsigned int, unsigned int); int (*releasepage)(struct page *, gfp_t ); void (*freepage)(struct page *); ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *); int (*migratepage)(struct address_space *, struct page *, struct page *, enum migrate_mode ); bool (*isolate_page)(struct page *, isolate_mode_t ); void (*putback_page)(struct page *); int (*launder_page)(struct page *); int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long); void (*is_dirty_writeback)(struct page *, bool *, bool *); int (*error_remove_page)(struct address_space *, struct page *); int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *); void (*swap_deactivate)(struct file *); } ; 426 struct address_space { struct inode *host; struct radix_tree_root page_tree; spinlock_t tree_lock; atomic_t i_mmap_writable; struct rb_root i_mmap; struct rw_semaphore i_mmap_rwsem; unsigned long nrpages; unsigned long nrexceptional; unsigned long writeback_index; const struct address_space_operations *a_ops; unsigned long flags; spinlock_t private_lock; struct list_head private_list; void *private_data; } ; 447 struct request_queue ; 448 struct hd_struct ; 448 struct gendisk ; 448 struct block_device { dev_t bd_dev; int bd_openers; struct inode *bd_inode; struct super_block *bd_super; struct mutex bd_mutex; void *bd_claiming; void *bd_holder; int bd_holders; bool bd_write_holder; struct list_head bd_holder_disks; struct block_device *bd_contains; unsigned int bd_block_size; struct hd_struct *bd_part; unsigned int bd_part_count; int bd_invalidated; struct gendisk *bd_disk; struct request_queue *bd_queue; struct list_head bd_list; unsigned long bd_private; int bd_fsfreeze_count; struct mutex bd_fsfreeze_mutex; } ; 563 struct posix_acl ; 589 struct inode_operations ; 589 union __anonunion____missing_field_name_308 { const unsigned int i_nlink; unsigned int __i_nlink; } ; 589 union __anonunion____missing_field_name_309 { struct hlist_head i_dentry; struct callback_head i_rcu; } ; 589 struct file_lock_context ; 589 struct cdev ; 589 union __anonunion____missing_field_name_310 { struct pipe_inode_info *i_pipe; struct block_device *i_bdev; struct cdev *i_cdev; char *i_link; unsigned int i_dir_seq; } ; 589 struct inode { umode_t i_mode; unsigned short i_opflags; kuid_t i_uid; kgid_t i_gid; unsigned int i_flags; struct posix_acl *i_acl; struct posix_acl *i_default_acl; const struct inode_operations *i_op; struct super_block *i_sb; struct address_space *i_mapping; void *i_security; unsigned long i_ino; union __anonunion____missing_field_name_308 __annonCompField61; dev_t i_rdev; loff_t i_size; struct timespec i_atime; struct timespec i_mtime; struct timespec i_ctime; spinlock_t i_lock; unsigned short i_bytes; unsigned int i_blkbits; blkcnt_t i_blocks; unsigned long i_state; struct rw_semaphore i_rwsem; unsigned long dirtied_when; unsigned long dirtied_time_when; struct hlist_node i_hash; struct list_head i_io_list; struct bdi_writeback *i_wb; int i_wb_frn_winner; u16 i_wb_frn_avg_time; u16 i_wb_frn_history; struct list_head i_lru; struct list_head i_sb_list; struct list_head i_wb_list; union __anonunion____missing_field_name_309 __annonCompField62; u64 i_version; atomic_t i_count; atomic_t i_dio_count; atomic_t i_writecount; atomic_t i_readcount; const struct file_operations *i_fop; struct file_lock_context *i_flctx; struct address_space i_data; struct list_head i_devices; union __anonunion____missing_field_name_310 __annonCompField63; __u32 i_generation; __u32 i_fsnotify_mask; struct hlist_head i_fsnotify_marks; struct fscrypt_info *i_crypt_info; void *i_private; } ; 843 struct fown_struct { rwlock_t lock; struct pid *pid; enum pid_type pid_type; kuid_t uid; kuid_t euid; int signum; } ; 851 struct file_ra_state { unsigned long start; unsigned int size; unsigned int async_size; unsigned int ra_pages; unsigned int mmap_miss; loff_t prev_pos; } ; 874 union __anonunion_f_u_311 { struct llist_node fu_llist; struct callback_head fu_rcuhead; } ; 874 struct file { union __anonunion_f_u_311 f_u; struct path f_path; struct inode *f_inode; const struct file_operations *f_op; spinlock_t f_lock; atomic_long_t f_count; unsigned int f_flags; fmode_t f_mode; struct mutex f_pos_lock; loff_t f_pos; struct fown_struct f_owner; const struct cred *f_cred; struct file_ra_state f_ra; u64 f_version; void *f_security; void *private_data; struct list_head f_ep_links; struct list_head f_tfile_llink; struct address_space *f_mapping; } ; 959 typedef void *fl_owner_t; 960 struct file_lock ; 961 struct file_lock_operations { void (*fl_copy_lock)(struct file_lock *, struct file_lock *); void (*fl_release_private)(struct file_lock *); } ; 967 struct lock_manager_operations { int (*lm_compare_owner)(struct file_lock *, struct file_lock *); unsigned long int (*lm_owner_key)(struct file_lock *); fl_owner_t (*lm_get_owner)(fl_owner_t ); void (*lm_put_owner)(fl_owner_t ); void (*lm_notify)(struct file_lock *); int (*lm_grant)(struct file_lock *, int); bool (*lm_break)(struct file_lock *); int (*lm_change)(struct file_lock *, int, struct list_head *); void (*lm_setup)(struct file_lock *, void **); } ; 994 struct nlm_lockowner ; 995 struct nfs_lock_info { u32 state; struct nlm_lockowner *owner; struct list_head list; } ; 14 struct nfs4_lock_state ; 15 struct nfs4_lock_info { struct nfs4_lock_state *owner; } ; 19 struct fasync_struct ; 19 struct __anonstruct_afs_313 { struct list_head link; int state; } ; 19 union __anonunion_fl_u_312 { struct nfs_lock_info nfs_fl; struct nfs4_lock_info nfs4_fl; struct __anonstruct_afs_313 afs; } ; 19 struct file_lock { struct file_lock *fl_next; struct list_head fl_list; struct hlist_node fl_link; struct list_head fl_block; fl_owner_t fl_owner; unsigned int fl_flags; unsigned char fl_type; unsigned int fl_pid; int fl_link_cpu; struct pid *fl_nspid; wait_queue_head_t fl_wait; struct file *fl_file; loff_t fl_start; loff_t fl_end; struct fasync_struct *fl_fasync; unsigned long fl_break_time; unsigned long fl_downgrade_time; const struct file_lock_operations *fl_ops; const struct lock_manager_operations *fl_lmops; union __anonunion_fl_u_312 fl_u; } ; 1047 struct file_lock_context { spinlock_t flc_lock; struct list_head flc_flock; struct list_head flc_posix; struct list_head flc_lease; } ; 1102 struct files_struct ; 1255 struct fasync_struct { spinlock_t fa_lock; int magic; int fa_fd; struct fasync_struct *fa_next; struct file *fa_file; struct callback_head fa_rcu; } ; 1290 struct sb_writers { int frozen; wait_queue_head_t wait_unfrozen; struct percpu_rw_semaphore rw_sem[3U]; } ; 1320 struct super_operations ; 1320 struct xattr_handler ; 1320 struct mtd_info ; 1320 struct super_block { struct list_head s_list; dev_t s_dev; unsigned char s_blocksize_bits; unsigned long s_blocksize; loff_t s_maxbytes; struct file_system_type *s_type; const struct super_operations *s_op; const struct dquot_operations *dq_op; const struct quotactl_ops *s_qcop; const struct export_operations *s_export_op; unsigned long s_flags; unsigned long s_iflags; unsigned long s_magic; struct dentry *s_root; struct rw_semaphore s_umount; int s_count; atomic_t s_active; void *s_security; const struct xattr_handler **s_xattr; const struct fscrypt_operations *s_cop; struct hlist_bl_head s_anon; struct list_head s_mounts; struct block_device *s_bdev; struct backing_dev_info *s_bdi; struct mtd_info *s_mtd; struct hlist_node s_instances; unsigned int s_quota_types; struct quota_info s_dquot; struct sb_writers s_writers; char s_id[32U]; u8 s_uuid[16U]; void *s_fs_info; unsigned int s_max_links; fmode_t s_mode; u32 s_time_gran; struct mutex s_vfs_rename_mutex; char *s_subtype; char *s_options; const struct dentry_operations *s_d_op; int cleancache_poolid; struct shrinker s_shrink; atomic_long_t s_remove_count; int s_readonly_remount; struct workqueue_struct *s_dio_done_wq; struct hlist_head s_pins; struct user_namespace *s_user_ns; struct list_lru s_dentry_lru; struct list_lru s_inode_lru; struct callback_head rcu; struct work_struct destroy_work; struct mutex s_sync_lock; int s_stack_depth; spinlock_t s_inode_list_lock; struct list_head s_inodes; spinlock_t s_inode_wblist_lock; struct list_head s_inodes_wb; } ; 1603 struct fiemap_extent_info { unsigned int fi_flags; unsigned int fi_extents_mapped; unsigned int fi_extents_max; struct fiemap_extent *fi_extents_start; } ; 1616 struct dir_context ; 1641 struct dir_context { int (*actor)(struct dir_context *, const char *, int, loff_t , u64 , unsigned int); loff_t pos; } ; 1648 struct file_operations { struct module *owner; loff_t (*llseek)(struct file *, loff_t , int); ssize_t (*read)(struct file *, char *, size_t , loff_t *); ssize_t (*write)(struct file *, const char *, size_t , loff_t *); ssize_t (*read_iter)(struct kiocb *, struct iov_iter *); ssize_t (*write_iter)(struct kiocb *, struct iov_iter *); int (*iterate)(struct file *, struct dir_context *); int (*iterate_shared)(struct file *, struct dir_context *); unsigned int (*poll)(struct file *, struct poll_table_struct *); long int (*unlocked_ioctl)(struct file *, unsigned int, unsigned long); long int (*compat_ioctl)(struct file *, unsigned int, unsigned long); int (*mmap)(struct file *, struct vm_area_struct *); int (*open)(struct inode *, struct file *); int (*flush)(struct file *, fl_owner_t ); int (*release)(struct inode *, struct file *); int (*fsync)(struct file *, loff_t , loff_t , int); int (*aio_fsync)(struct kiocb *, int); int (*fasync)(int, struct file *, int); int (*lock)(struct file *, int, struct file_lock *); ssize_t (*sendpage)(struct file *, struct page *, int, size_t , loff_t *, int); unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); int (*check_flags)(int); int (*flock)(struct file *, int, struct file_lock *); ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t , unsigned int); ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t , unsigned int); int (*setlease)(struct file *, long, struct file_lock **, void **); long int (*fallocate)(struct file *, int, loff_t , loff_t ); void (*show_fdinfo)(struct seq_file *, struct file *); ssize_t (*copy_file_range)(struct file *, loff_t , struct file *, loff_t , size_t , unsigned int); int (*clone_file_range)(struct file *, loff_t , struct file *, loff_t , u64 ); ssize_t (*dedupe_file_range)(struct file *, u64 , u64 , struct file *, u64 ); } ; 1717 struct inode_operations { struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int); const char * (*get_link)(struct dentry *, struct inode *, struct delayed_call *); int (*permission)(struct inode *, int); struct posix_acl * (*get_acl)(struct inode *, int); int (*readlink)(struct dentry *, char *, int); int (*create)(struct inode *, struct dentry *, umode_t , bool ); int (*link)(struct dentry *, struct inode *, struct dentry *); int (*unlink)(struct inode *, struct dentry *); int (*symlink)(struct inode *, struct dentry *, const char *); int (*mkdir)(struct inode *, struct dentry *, umode_t ); int (*rmdir)(struct inode *, struct dentry *); int (*mknod)(struct inode *, struct dentry *, umode_t , dev_t ); int (*rename)(struct inode *, struct dentry *, struct inode *, struct dentry *); int (*rename2)(struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); int (*setattr)(struct dentry *, struct iattr *); int (*getattr)(struct vfsmount *, struct dentry *, struct kstat *); int (*setxattr)(struct dentry *, struct inode *, const char *, const void *, size_t , int); ssize_t (*getxattr)(struct dentry *, struct inode *, const char *, void *, size_t ); ssize_t (*listxattr)(struct dentry *, char *, size_t ); int (*removexattr)(struct dentry *, const char *); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 , u64 ); int (*update_time)(struct inode *, struct timespec *, int); int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t , int *); int (*tmpfile)(struct inode *, struct dentry *, umode_t ); int (*set_acl)(struct inode *, struct posix_acl *, int); } ; 1774 struct super_operations { struct inode * (*alloc_inode)(struct super_block *); void (*destroy_inode)(struct inode *); void (*dirty_inode)(struct inode *, int); int (*write_inode)(struct inode *, struct writeback_control *); int (*drop_inode)(struct inode *); void (*evict_inode)(struct inode *); void (*put_super)(struct super_block *); int (*sync_fs)(struct super_block *, int); int (*freeze_super)(struct super_block *); int (*freeze_fs)(struct super_block *); int (*thaw_super)(struct super_block *); int (*unfreeze_fs)(struct super_block *); int (*statfs)(struct dentry *, struct kstatfs *); int (*remount_fs)(struct super_block *, int *, char *); void (*umount_begin)(struct super_block *); int (*show_options)(struct seq_file *, struct dentry *); int (*show_devname)(struct seq_file *, struct dentry *); int (*show_path)(struct seq_file *, struct dentry *); int (*show_stats)(struct seq_file *, struct dentry *); ssize_t (*quota_read)(struct super_block *, int, char *, size_t , loff_t ); ssize_t (*quota_write)(struct super_block *, int, const char *, size_t , loff_t ); struct dquot ** (*get_dquots)(struct inode *); int (*bdev_try_to_free_page)(struct super_block *, struct page *, gfp_t ); long int (*nr_cached_objects)(struct super_block *, struct shrink_control *); long int (*free_cached_objects)(struct super_block *, struct shrink_control *); } ; 2018 struct file_system_type { const char *name; int fs_flags; struct dentry * (*mount)(struct file_system_type *, int, const char *, void *); void (*kill_sb)(struct super_block *); struct module *owner; struct file_system_type *next; struct hlist_head fs_supers; struct lock_class_key s_lock_key; struct lock_class_key s_umount_key; struct lock_class_key s_vfs_rename_key; struct lock_class_key s_writers_key[3U]; struct lock_class_key i_lock_key; struct lock_class_key i_mutex_key; struct lock_class_key i_mutex_dir_key; } ; 3193 struct assoc_array_ptr ; 3193 struct assoc_array { struct assoc_array_ptr *root; unsigned long nr_leaves_on_tree; } ; 31 typedef int32_t key_serial_t; 34 typedef uint32_t key_perm_t; 35 struct key ; 36 struct user_struct ; 37 struct signal_struct ; 38 struct key_type ; 42 struct keyring_index_key { struct key_type *type; const char *description; size_t desc_len; } ; 91 union key_payload { void *rcu_data0; void *data[4U]; } ; 128 union __anonunion____missing_field_name_314 { struct list_head graveyard_link; struct rb_node serial_node; } ; 128 struct key_user ; 128 union __anonunion____missing_field_name_315 { time_t expiry; time_t revoked_at; } ; 128 struct __anonstruct____missing_field_name_317 { struct key_type *type; char *description; } ; 128 union __anonunion____missing_field_name_316 { struct keyring_index_key index_key; struct __anonstruct____missing_field_name_317 __annonCompField66; } ; 128 struct __anonstruct____missing_field_name_319 { struct list_head name_link; struct assoc_array keys; } ; 128 union __anonunion____missing_field_name_318 { union key_payload payload; struct __anonstruct____missing_field_name_319 __annonCompField68; int reject_error; } ; 128 struct key { atomic_t usage; key_serial_t serial; union __anonunion____missing_field_name_314 __annonCompField64; struct rw_semaphore sem; struct key_user *user; void *security; union __anonunion____missing_field_name_315 __annonCompField65; time_t last_used_at; kuid_t uid; kgid_t gid; key_perm_t perm; unsigned short quotalen; unsigned short datalen; unsigned long flags; union __anonunion____missing_field_name_316 __annonCompField67; union __anonunion____missing_field_name_318 __annonCompField69; int (*restrict_link)(struct key *, const struct key_type *, const union key_payload *); } ; 377 struct audit_context ; 27 struct group_info { atomic_t usage; int ngroups; int nblocks; kgid_t small_block[32U]; kgid_t *blocks[0U]; } ; 90 struct cred { atomic_t usage; atomic_t subscribers; void *put_addr; unsigned int magic; kuid_t uid; kgid_t gid; kuid_t suid; kgid_t sgid; kuid_t euid; kgid_t egid; kuid_t fsuid; kgid_t fsgid; unsigned int securebits; kernel_cap_t cap_inheritable; kernel_cap_t cap_permitted; kernel_cap_t cap_effective; kernel_cap_t cap_bset; kernel_cap_t cap_ambient; unsigned char jit_keyring; struct key *session_keyring; struct key *process_keyring; struct key *thread_keyring; struct key *request_key_auth; void *security; struct user_struct *user; struct user_namespace *user_ns; struct group_info *group_info; struct callback_head rcu; } ; 377 struct seq_file { char *buf; size_t size; size_t from; size_t count; size_t pad_until; loff_t index; loff_t read_pos; u64 version; struct mutex lock; const struct seq_operations *op; int poll_event; const struct file *file; void *private; } ; 30 struct seq_operations { void * (*start)(struct seq_file *, loff_t *); void (*stop)(struct seq_file *, void *); void * (*next)(struct seq_file *, void *, loff_t *); int (*show)(struct seq_file *, void *); } ; 222 struct pinctrl ; 223 struct pinctrl_state ; 194 struct dev_pin_info { struct pinctrl *p; struct pinctrl_state *default_state; struct pinctrl_state *init_state; struct pinctrl_state *sleep_state; struct pinctrl_state *idle_state; } ; 84 struct plist_node { int prio; struct list_head prio_list; struct list_head node_list; } ; 4 typedef unsigned long cputime_t; 25 struct sem_undo_list ; 25 struct sysv_sem { struct sem_undo_list *undo_list; } ; 26 struct sysv_shm { struct list_head shm_clist; } ; 24 struct __anonstruct_sigset_t_320 { unsigned long sig[1U]; } ; 24 typedef struct __anonstruct_sigset_t_320 sigset_t; 25 struct siginfo ; 17 typedef void __signalfn_t(int); 18 typedef __signalfn_t *__sighandler_t; 20 typedef void __restorefn_t(); 21 typedef __restorefn_t *__sigrestore_t; 34 union sigval { int sival_int; void *sival_ptr; } ; 10 typedef union sigval sigval_t; 11 struct __anonstruct__kill_322 { __kernel_pid_t _pid; __kernel_uid32_t _uid; } ; 11 struct __anonstruct__timer_323 { __kernel_timer_t _tid; int _overrun; char _pad[0U]; sigval_t _sigval; int _sys_private; } ; 11 struct __anonstruct__rt_324 { __kernel_pid_t _pid; __kernel_uid32_t _uid; sigval_t _sigval; } ; 11 struct __anonstruct__sigchld_325 { __kernel_pid_t _pid; __kernel_uid32_t _uid; int _status; __kernel_clock_t _utime; __kernel_clock_t _stime; } ; 11 struct __anonstruct__addr_bnd_328 { void *_lower; void *_upper; } ; 11 union __anonunion____missing_field_name_327 { struct __anonstruct__addr_bnd_328 _addr_bnd; __u32 _pkey; } ; 11 struct __anonstruct__sigfault_326 { void *_addr; short _addr_lsb; union __anonunion____missing_field_name_327 __annonCompField70; } ; 11 struct __anonstruct__sigpoll_329 { long _band; int _fd; } ; 11 struct __anonstruct__sigsys_330 { void *_call_addr; int _syscall; unsigned int _arch; } ; 11 union __anonunion__sifields_321 { int _pad[28U]; struct __anonstruct__kill_322 _kill; struct __anonstruct__timer_323 _timer; struct __anonstruct__rt_324 _rt; struct __anonstruct__sigchld_325 _sigchld; struct __anonstruct__sigfault_326 _sigfault; struct __anonstruct__sigpoll_329 _sigpoll; struct __anonstruct__sigsys_330 _sigsys; } ; 11 struct siginfo { int si_signo; int si_errno; int si_code; union __anonunion__sifields_321 _sifields; } ; 118 typedef struct siginfo siginfo_t; 22 struct sigpending { struct list_head list; sigset_t signal; } ; 257 struct sigaction { __sighandler_t sa_handler; unsigned long sa_flags; __sigrestore_t sa_restorer; sigset_t sa_mask; } ; 271 struct k_sigaction { struct sigaction sa; } ; 43 struct seccomp_filter ; 44 struct seccomp { int mode; struct seccomp_filter *filter; } ; 40 struct rt_mutex_waiter ; 41 struct rlimit { __kernel_ulong_t rlim_cur; __kernel_ulong_t rlim_max; } ; 11 struct timerqueue_node { struct rb_node node; ktime_t expires; } ; 12 struct timerqueue_head { struct rb_root head; struct timerqueue_node *next; } ; 50 struct hrtimer_clock_base ; 51 struct hrtimer_cpu_base ; 60 enum hrtimer_restart { HRTIMER_NORESTART = 0, HRTIMER_RESTART = 1 } ; 65 struct hrtimer { struct timerqueue_node node; ktime_t _softexpires; enum hrtimer_restart (*function)(struct hrtimer *); struct hrtimer_clock_base *base; u8 state; u8 is_rel; int start_pid; void *start_site; char start_comm[16U]; } ; 125 struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base; int index; clockid_t clockid; struct timerqueue_head active; ktime_t (*get_time)(); ktime_t offset; } ; 158 struct hrtimer_cpu_base { raw_spinlock_t lock; seqcount_t seq; struct hrtimer *running; unsigned int cpu; unsigned int active_bases; unsigned int clock_was_set_seq; bool migration_enabled; bool nohz_active; unsigned char in_hrtirq; unsigned char hres_active; unsigned char hang_detected; ktime_t expires_next; struct hrtimer *next_timer; unsigned int nr_events; unsigned int nr_retries; unsigned int nr_hangs; unsigned int max_hang_time; struct hrtimer_clock_base clock_base[4U]; } ; 12 enum kcov_mode { KCOV_MODE_DISABLED = 0, KCOV_MODE_TRACE = 1 } ; 17 struct task_io_accounting { u64 rchar; u64 wchar; u64 syscr; u64 syscw; u64 read_bytes; u64 write_bytes; u64 cancelled_write_bytes; } ; 45 struct latency_record { unsigned long backtrace[12U]; unsigned int count; unsigned long time; unsigned long max; } ; 41 struct percpu_ref ; 55 typedef void percpu_ref_func_t(struct percpu_ref *); 68 struct percpu_ref { atomic_long_t count; unsigned long percpu_count_ptr; percpu_ref_func_t *release; percpu_ref_func_t *confirm_switch; bool force_atomic; struct callback_head rcu; } ; 325 struct cgroup ; 326 struct cgroup_root ; 327 struct cgroup_subsys ; 328 struct cgroup_taskset ; 372 struct cgroup_file { struct kernfs_node *kn; } ; 90 struct cgroup_subsys_state { struct cgroup *cgroup; struct cgroup_subsys *ss; struct percpu_ref refcnt; struct cgroup_subsys_state *parent; struct list_head sibling; struct list_head children; int id; unsigned int flags; u64 serial_nr; atomic_t online_cnt; struct callback_head callback_head; struct work_struct destroy_work; } ; 141 struct css_set { atomic_t refcount; struct hlist_node hlist; struct list_head tasks; struct list_head mg_tasks; struct list_head cgrp_links; struct cgroup *dfl_cgrp; struct cgroup_subsys_state *subsys[13U]; struct list_head mg_preload_node; struct list_head mg_node; struct cgroup *mg_src_cgrp; struct cgroup *mg_dst_cgrp; struct css_set *mg_dst_cset; struct list_head e_cset_node[13U]; struct list_head task_iters; bool dead; struct callback_head callback_head; } ; 221 struct cgroup { struct cgroup_subsys_state self; unsigned long flags; int id; int level; int populated_cnt; struct kernfs_node *kn; struct cgroup_file procs_file; struct cgroup_file events_file; u16 subtree_control; u16 subtree_ss_mask; u16 old_subtree_control; u16 old_subtree_ss_mask; struct cgroup_subsys_state *subsys[13U]; struct cgroup_root *root; struct list_head cset_links; struct list_head e_csets[13U]; struct list_head pidlists; struct mutex pidlist_mutex; wait_queue_head_t offline_waitq; struct work_struct release_agent_work; int ancestor_ids[]; } ; 306 struct cgroup_root { struct kernfs_root *kf_root; unsigned int subsys_mask; int hierarchy_id; struct cgroup cgrp; int cgrp_ancestor_id_storage; atomic_t nr_cgrps; struct list_head root_list; unsigned int flags; struct idr cgroup_idr; char release_agent_path[4096U]; char name[64U]; } ; 345 struct cftype { char name[64U]; unsigned long private; size_t max_write_len; unsigned int flags; unsigned int file_offset; struct cgroup_subsys *ss; struct list_head node; struct kernfs_ops *kf_ops; u64 (*read_u64)(struct cgroup_subsys_state *, struct cftype *); s64 (*read_s64)(struct cgroup_subsys_state *, struct cftype *); int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); int (*write_u64)(struct cgroup_subsys_state *, struct cftype *, u64 ); int (*write_s64)(struct cgroup_subsys_state *, struct cftype *, s64 ); ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); struct lock_class_key lockdep_key; } ; 430 struct cgroup_subsys { struct cgroup_subsys_state * (*css_alloc)(struct cgroup_subsys_state *); int (*css_online)(struct cgroup_subsys_state *); void (*css_offline)(struct cgroup_subsys_state *); void (*css_released)(struct cgroup_subsys_state *); void (*css_free)(struct cgroup_subsys_state *); void (*css_reset)(struct cgroup_subsys_state *); int (*can_attach)(struct cgroup_taskset *); void (*cancel_attach)(struct cgroup_taskset *); void (*attach)(struct cgroup_taskset *); void (*post_attach)(); int (*can_fork)(struct task_struct *); void (*cancel_fork)(struct task_struct *); void (*fork)(struct task_struct *); void (*exit)(struct task_struct *); void (*free)(struct task_struct *); void (*bind)(struct cgroup_subsys_state *); bool early_init; bool implicit_on_dfl; bool broken_hierarchy; bool warned_broken_hierarchy; int id; const char *name; const char *legacy_name; struct cgroup_root *root; struct idr css_idr; struct list_head cfts; struct cftype *dfl_cftypes; struct cftype *legacy_cftypes; unsigned int depends_on; } ; 128 struct futex_pi_state ; 129 struct robust_list_head ; 130 struct bio_list ; 131 struct fs_struct ; 132 struct perf_event_context ; 133 struct blk_plug ; 134 struct nameidata ; 188 struct cfs_rq ; 189 struct task_group ; 493 struct sighand_struct { atomic_t count; struct k_sigaction action[64U]; spinlock_t siglock; wait_queue_head_t signalfd_wqh; } ; 536 struct pacct_struct { int ac_flag; long ac_exitcode; unsigned long ac_mem; cputime_t ac_utime; cputime_t ac_stime; unsigned long ac_minflt; unsigned long ac_majflt; } ; 544 struct cpu_itimer { cputime_t expires; cputime_t incr; u32 error; u32 incr_error; } ; 551 struct prev_cputime { cputime_t utime; cputime_t stime; raw_spinlock_t lock; } ; 576 struct task_cputime { cputime_t utime; cputime_t stime; unsigned long long sum_exec_runtime; } ; 592 struct task_cputime_atomic { atomic64_t utime; atomic64_t stime; atomic64_t sum_exec_runtime; } ; 614 struct thread_group_cputimer { struct task_cputime_atomic cputime_atomic; bool running; bool checking_timer; } ; 659 struct autogroup ; 660 struct tty_struct ; 660 struct taskstats ; 660 struct tty_audit_buf ; 660 struct signal_struct { atomic_t sigcnt; atomic_t live; int nr_threads; atomic_t oom_victims; struct list_head thread_head; wait_queue_head_t wait_chldexit; struct task_struct *curr_target; struct sigpending shared_pending; int group_exit_code; int notify_count; struct task_struct *group_exit_task; int group_stop_count; unsigned int flags; unsigned char is_child_subreaper; unsigned char has_child_subreaper; int posix_timer_id; struct list_head posix_timers; struct hrtimer real_timer; struct pid *leader_pid; ktime_t it_real_incr; struct cpu_itimer it[2U]; struct thread_group_cputimer cputimer; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; struct pid *tty_old_pgrp; int leader; struct tty_struct *tty; struct autogroup *autogroup; seqlock_t stats_lock; cputime_t utime; cputime_t stime; cputime_t cutime; cputime_t cstime; cputime_t gtime; cputime_t cgtime; struct prev_cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; unsigned long cnvcsw; unsigned long cnivcsw; unsigned long min_flt; unsigned long maj_flt; unsigned long cmin_flt; unsigned long cmaj_flt; unsigned long inblock; unsigned long oublock; unsigned long cinblock; unsigned long coublock; unsigned long maxrss; unsigned long cmaxrss; struct task_io_accounting ioac; unsigned long long sum_sched_runtime; struct rlimit rlim[16U]; struct pacct_struct pacct; struct taskstats *stats; unsigned int audit_tty; struct tty_audit_buf *tty_audit_buf; bool oom_flag_origin; short oom_score_adj; short oom_score_adj_min; struct mutex cred_guard_mutex; } ; 835 struct user_struct { atomic_t __count; atomic_t processes; atomic_t sigpending; atomic_t inotify_watches; atomic_t inotify_devs; atomic_t fanotify_listeners; atomic_long_t epoll_watches; unsigned long mq_bytes; unsigned long locked_shm; unsigned long unix_inflight; atomic_long_t pipe_bufs; struct key *uid_keyring; struct key *session_keyring; struct hlist_node uidhash_node; kuid_t uid; atomic_long_t locked_vm; } ; 880 struct reclaim_state ; 881 struct sched_info { unsigned long pcount; unsigned long long run_delay; unsigned long long last_arrival; unsigned long long last_queued; } ; 896 struct task_delay_info { spinlock_t lock; unsigned int flags; u64 blkio_start; u64 blkio_delay; u64 swapin_delay; u32 blkio_count; u32 swapin_count; u64 freepages_start; u64 freepages_delay; u32 freepages_count; } ; 953 struct wake_q_node { struct wake_q_node *next; } ; 1220 struct load_weight { unsigned long weight; u32 inv_weight; } ; 1228 struct sched_avg { u64 last_update_time; u64 load_sum; u32 util_sum; u32 period_contrib; unsigned long load_avg; unsigned long util_avg; } ; 1286 struct sched_statistics { u64 wait_start; u64 wait_max; u64 wait_count; u64 wait_sum; u64 iowait_count; u64 iowait_sum; u64 sleep_start; u64 sleep_max; s64 sum_sleep_runtime; u64 block_start; u64 block_max; u64 exec_max; u64 slice_max; u64 nr_migrations_cold; u64 nr_failed_migrations_affine; u64 nr_failed_migrations_running; u64 nr_failed_migrations_hot; u64 nr_forced_migrations; u64 nr_wakeups; u64 nr_wakeups_sync; u64 nr_wakeups_migrate; u64 nr_wakeups_local; u64 nr_wakeups_remote; u64 nr_wakeups_affine; u64 nr_wakeups_affine_attempts; u64 nr_wakeups_passive; u64 nr_wakeups_idle; } ; 1321 struct sched_entity { struct load_weight load; struct rb_node run_node; struct list_head group_node; unsigned int on_rq; u64 exec_start; u64 sum_exec_runtime; u64 vruntime; u64 prev_sum_exec_runtime; u64 nr_migrations; struct sched_statistics statistics; int depth; struct sched_entity *parent; struct cfs_rq *cfs_rq; struct cfs_rq *my_q; struct sched_avg avg; } ; 1358 struct rt_rq ; 1358 struct sched_rt_entity { struct list_head run_list; unsigned long timeout; unsigned long watchdog_stamp; unsigned int time_slice; unsigned short on_rq; unsigned short on_list; struct sched_rt_entity *back; struct sched_rt_entity *parent; struct rt_rq *rt_rq; struct rt_rq *my_q; } ; 1376 struct sched_dl_entity { struct rb_node rb_node; u64 dl_runtime; u64 dl_deadline; u64 dl_period; u64 dl_bw; s64 runtime; u64 deadline; unsigned int flags; int dl_throttled; int dl_boosted; int dl_yielded; struct hrtimer dl_timer; } ; 1440 struct tlbflush_unmap_batch { struct cpumask cpumask; bool flush_required; bool writable; } ; 1459 struct sched_class ; 1459 struct compat_robust_list_head ; 1459 struct numa_group ; 1459 struct kcov ; 1459 struct task_struct { volatile long state; void *stack; atomic_t usage; unsigned int flags; unsigned int ptrace; struct llist_node wake_entry; int on_cpu; unsigned int wakee_flips; unsigned long wakee_flip_decay_ts; struct task_struct *last_wakee; int wake_cpu; int on_rq; int prio; int static_prio; int normal_prio; unsigned int rt_priority; const struct sched_class *sched_class; struct sched_entity se; struct sched_rt_entity rt; struct task_group *sched_task_group; struct sched_dl_entity dl; struct hlist_head preempt_notifiers; unsigned int policy; int nr_cpus_allowed; cpumask_t cpus_allowed; unsigned long rcu_tasks_nvcsw; bool rcu_tasks_holdout; struct list_head rcu_tasks_holdout_list; int rcu_tasks_idle_cpu; struct sched_info sched_info; struct list_head tasks; struct plist_node pushable_tasks; struct rb_node pushable_dl_tasks; struct mm_struct *mm; struct mm_struct *active_mm; u32 vmacache_seqnum; struct vm_area_struct *vmacache[4U]; struct task_rss_stat rss_stat; int exit_state; int exit_code; int exit_signal; int pdeath_signal; unsigned long jobctl; unsigned int personality; unsigned char sched_reset_on_fork; unsigned char sched_contributes_to_load; unsigned char sched_migrated; unsigned char sched_remote_wakeup; unsigned char; unsigned char in_execve; unsigned char in_iowait; unsigned char restore_sigmask; unsigned char memcg_may_oom; unsigned char memcg_kmem_skip_account; unsigned char brk_randomized; unsigned long atomic_flags; struct restart_block restart_block; pid_t pid; pid_t tgid; struct task_struct *real_parent; struct task_struct *parent; struct list_head children; struct list_head sibling; struct task_struct *group_leader; struct list_head ptraced; struct list_head ptrace_entry; struct pid_link pids[3U]; struct list_head thread_group; struct list_head thread_node; struct completion *vfork_done; int *set_child_tid; int *clear_child_tid; cputime_t utime; cputime_t stime; cputime_t utimescaled; cputime_t stimescaled; cputime_t gtime; struct prev_cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; u64 start_time; u64 real_start_time; unsigned long min_flt; unsigned long maj_flt; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; const struct cred *real_cred; const struct cred *cred; char comm[16U]; struct nameidata *nameidata; struct sysv_sem sysvsem; struct sysv_shm sysvshm; unsigned long last_switch_count; struct fs_struct *fs; struct files_struct *files; struct nsproxy *nsproxy; struct signal_struct *signal; struct sighand_struct *sighand; sigset_t blocked; sigset_t real_blocked; sigset_t saved_sigmask; struct sigpending pending; unsigned long sas_ss_sp; size_t sas_ss_size; unsigned int sas_ss_flags; struct callback_head *task_works; struct audit_context *audit_context; kuid_t loginuid; unsigned int sessionid; struct seccomp seccomp; u32 parent_exec_id; u32 self_exec_id; spinlock_t alloc_lock; raw_spinlock_t pi_lock; struct wake_q_node wake_q; struct rb_root pi_waiters; struct rb_node *pi_waiters_leftmost; struct rt_mutex_waiter *pi_blocked_on; struct mutex_waiter *blocked_on; unsigned int irq_events; unsigned long hardirq_enable_ip; unsigned long hardirq_disable_ip; unsigned int hardirq_enable_event; unsigned int hardirq_disable_event; int hardirqs_enabled; int hardirq_context; unsigned long softirq_disable_ip; unsigned long softirq_enable_ip; unsigned int softirq_disable_event; unsigned int softirq_enable_event; int softirqs_enabled; int softirq_context; u64 curr_chain_key; int lockdep_depth; unsigned int lockdep_recursion; struct held_lock held_locks[48U]; gfp_t lockdep_reclaim_gfp; unsigned int in_ubsan; void *journal_info; struct bio_list *bio_list; struct blk_plug *plug; struct reclaim_state *reclaim_state; struct backing_dev_info *backing_dev_info; struct io_context *io_context; unsigned long ptrace_message; siginfo_t *last_siginfo; struct task_io_accounting ioac; u64 acct_rss_mem1; u64 acct_vm_mem1; cputime_t acct_timexpd; nodemask_t mems_allowed; seqcount_t mems_allowed_seq; int cpuset_mem_spread_rotor; int cpuset_slab_spread_rotor; struct css_set *cgroups; struct list_head cg_list; struct robust_list_head *robust_list; struct compat_robust_list_head *compat_robust_list; struct list_head pi_state_list; struct futex_pi_state *pi_state_cache; struct perf_event_context *perf_event_ctxp[2U]; struct mutex perf_event_mutex; struct list_head perf_event_list; struct mempolicy *mempolicy; short il_next; short pref_node_fork; int numa_scan_seq; unsigned int numa_scan_period; unsigned int numa_scan_period_max; int numa_preferred_nid; unsigned long numa_migrate_retry; u64 node_stamp; u64 last_task_numa_placement; u64 last_sum_exec_runtime; struct callback_head numa_work; struct list_head numa_entry; struct numa_group *numa_group; unsigned long *numa_faults; unsigned long total_numa_faults; unsigned long numa_faults_locality[3U]; unsigned long numa_pages_migrated; struct tlbflush_unmap_batch tlb_ubc; struct callback_head rcu; struct pipe_inode_info *splice_pipe; struct page_frag task_frag; struct task_delay_info *delays; int make_it_fail; int nr_dirtied; int nr_dirtied_pause; unsigned long dirty_paused_when; int latency_record_count; struct latency_record latency_record[32U]; u64 timer_slack_ns; u64 default_timer_slack_ns; unsigned int kasan_depth; unsigned long trace; unsigned long trace_recursion; enum kcov_mode kcov_mode; unsigned int kcov_size; void *kcov_area; struct kcov *kcov; struct mem_cgroup *memcg_in_oom; gfp_t memcg_oom_gfp_mask; int memcg_oom_order; unsigned int memcg_nr_pages_over_high; struct uprobe_task *utask; unsigned int sequential_io; unsigned int sequential_io_avg; unsigned long task_state_change; int pagefault_disabled; struct task_struct *oom_reaper_list; struct thread_struct thread; } ; 76 struct dma_map_ops ; 76 struct dev_archdata { struct dma_map_ops *dma_ops; void *iommu; } ; 21 struct pdev_archdata { } ; 24 struct device_private ; 25 struct device_driver ; 26 struct driver_private ; 27 struct class ; 28 struct subsys_private ; 29 struct bus_type ; 30 struct device_node ; 31 struct fwnode_handle ; 32 struct iommu_ops ; 33 struct iommu_group ; 61 struct device_attribute ; 61 struct bus_type { const char *name; const char *dev_name; struct device *dev_root; struct device_attribute *dev_attrs; const struct attribute_group **bus_groups; const struct attribute_group **dev_groups; const struct attribute_group **drv_groups; int (*match)(struct device *, struct device_driver *); int (*uevent)(struct device *, struct kobj_uevent_env *); int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*online)(struct device *); int (*offline)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct dev_pm_ops *pm; const struct iommu_ops *iommu_ops; struct subsys_private *p; struct lock_class_key lock_key; } ; 142 struct device_type ; 201 enum probe_type { PROBE_DEFAULT_STRATEGY = 0, PROBE_PREFER_ASYNCHRONOUS = 1, PROBE_FORCE_SYNCHRONOUS = 2 } ; 207 struct of_device_id ; 207 struct acpi_device_id ; 207 struct device_driver { const char *name; struct bus_type *bus; struct module *owner; const char *mod_name; bool suppress_bind_attrs; enum probe_type probe_type; const struct of_device_id *of_match_table; const struct acpi_device_id *acpi_match_table; int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct attribute_group **groups; const struct dev_pm_ops *pm; struct driver_private *p; } ; 357 struct class_attribute ; 357 struct class { const char *name; struct module *owner; struct class_attribute *class_attrs; const struct attribute_group **dev_groups; struct kobject *dev_kobj; int (*dev_uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *); void (*class_release)(struct class *); void (*dev_release)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct kobj_ns_type_operations *ns_type; const void * (*namespace)(struct device *); const struct dev_pm_ops *pm; struct subsys_private *p; } ; 450 struct class_attribute { struct attribute attr; ssize_t (*show)(struct class *, struct class_attribute *, char *); ssize_t (*store)(struct class *, struct class_attribute *, const char *, size_t ); } ; 518 struct device_type { const char *name; const struct attribute_group **groups; int (*uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *, kuid_t *, kgid_t *); void (*release)(struct device *); const struct dev_pm_ops *pm; } ; 546 struct device_attribute { struct attribute attr; ssize_t (*show)(struct device *, struct device_attribute *, char *); ssize_t (*store)(struct device *, struct device_attribute *, const char *, size_t ); } ; 699 struct device_dma_parameters { unsigned int max_segment_size; unsigned long segment_boundary_mask; } ; 708 struct irq_domain ; 708 struct dma_coherent_mem ; 708 struct cma ; 708 struct device { struct device *parent; struct device_private *p; struct kobject kobj; const char *init_name; const struct device_type *type; struct mutex mutex; struct bus_type *bus; struct device_driver *driver; void *platform_data; void *driver_data; struct dev_pm_info power; struct dev_pm_domain *pm_domain; struct irq_domain *msi_domain; struct dev_pin_info *pins; struct list_head msi_list; int numa_node; u64 *dma_mask; u64 coherent_dma_mask; unsigned long dma_pfn_offset; struct device_dma_parameters *dma_parms; struct list_head dma_pools; struct dma_coherent_mem *dma_mem; struct cma *cma_area; struct dev_archdata archdata; struct device_node *of_node; struct fwnode_handle *fwnode; dev_t devt; u32 id; spinlock_t devres_lock; struct list_head devres_head; struct klist_node knode_class; struct class *class; const struct attribute_group **groups; void (*release)(struct device *); struct iommu_group *iommu_group; bool offline_disabled; bool offline; } ; 862 struct wakeup_source { const char *name; struct list_head entry; spinlock_t lock; struct wake_irq *wakeirq; struct timer_list timer; unsigned long timer_expires; ktime_t total_time; ktime_t max_time; ktime_t last_time; ktime_t start_prevent_time; ktime_t prevent_sleep_time; unsigned long event_count; unsigned long active_count; unsigned long relax_count; unsigned long expire_count; unsigned long wakeup_count; bool active; bool autosleep_enabled; } ; 1327 struct scatterlist ; 89 enum dma_data_direction { DMA_BIDIRECTIONAL = 0, DMA_TO_DEVICE = 1, DMA_FROM_DEVICE = 2, DMA_NONE = 3 } ; 273 struct vm_fault { unsigned int flags; gfp_t gfp_mask; unsigned long pgoff; void *virtual_address; struct page *cow_page; struct page *page; void *entry; } ; 308 struct fault_env { struct vm_area_struct *vma; unsigned long address; unsigned int flags; pmd_t *pmd; pte_t *pte; spinlock_t *ptl; pgtable_t prealloc_pte; } ; 335 struct vm_operations_struct { void (*open)(struct vm_area_struct *); void (*close)(struct vm_area_struct *); int (*mremap)(struct vm_area_struct *); int (*fault)(struct vm_area_struct *, struct vm_fault *); int (*pmd_fault)(struct vm_area_struct *, unsigned long, pmd_t *, unsigned int); void (*map_pages)(struct fault_env *, unsigned long, unsigned long); int (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *); int (*pfn_mkwrite)(struct vm_area_struct *, struct vm_fault *); int (*access)(struct vm_area_struct *, unsigned long, void *, int, int); const char * (*name)(struct vm_area_struct *); int (*set_policy)(struct vm_area_struct *, struct mempolicy *); struct mempolicy * (*get_policy)(struct vm_area_struct *, unsigned long); struct page * (*find_special_page)(struct vm_area_struct *, unsigned long); } ; 2451 struct scatterlist { unsigned long sg_magic; unsigned long page_link; unsigned int offset; unsigned int length; dma_addr_t dma_address; unsigned int dma_length; } ; 21 struct sg_table { struct scatterlist *sgl; unsigned int nents; unsigned int orig_nents; } ; 158 struct dma_map_ops { void * (*alloc)(struct device *, size_t , dma_addr_t *, gfp_t , unsigned long); void (*free)(struct device *, size_t , void *, dma_addr_t , unsigned long); int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t , size_t , unsigned long); int (*get_sgtable)(struct device *, struct sg_table *, void *, dma_addr_t , size_t , unsigned long); dma_addr_t (*map_page)(struct device *, struct page *, unsigned long, size_t , enum dma_data_direction , unsigned long); void (*unmap_page)(struct device *, dma_addr_t , size_t , enum dma_data_direction , unsigned long); int (*map_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , unsigned long); void (*unmap_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , unsigned long); void (*sync_single_for_cpu)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_single_for_device)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_sg_for_cpu)(struct device *, struct scatterlist *, int, enum dma_data_direction ); void (*sync_sg_for_device)(struct device *, struct scatterlist *, int, enum dma_data_direction ); int (*mapping_error)(struct device *, dma_addr_t ); int (*dma_supported)(struct device *, u64 ); int (*set_dma_mask)(struct device *, u64 ); int is_phys; } ; 19 struct dma_pool ; 640 enum irqreturn { IRQ_NONE = 0, IRQ_HANDLED = 1, IRQ_WAKE_THREAD = 2 } ; 16 typedef enum irqreturn irqreturn_t; 63 struct exception_table_entry { int insn; int fixup; int handler; } ; 708 struct usb_ctrlrequest { __u8 bRequestType; __u8 bRequest; __le16 wValue; __le16 wIndex; __le16 wLength; } ; 388 struct usb_endpoint_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bEndpointAddress; __u8 bmAttributes; __le16 wMaxPacketSize; __u8 bInterval; __u8 bRefresh; __u8 bSynchAddress; } ; 650 struct usb_ss_ep_comp_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bMaxBurst; __u8 bmAttributes; __le16 wBytesPerInterval; } ; 1102 enum usb_device_speed { USB_SPEED_UNKNOWN = 0, USB_SPEED_LOW = 1, USB_SPEED_FULL = 2, USB_SPEED_HIGH = 3, USB_SPEED_WIRELESS = 4, USB_SPEED_SUPER = 5, USB_SPEED_SUPER_PLUS = 6 } ; 1112 enum usb_device_state { USB_STATE_NOTATTACHED = 0, USB_STATE_ATTACHED = 1, USB_STATE_POWERED = 2, USB_STATE_RECONNECTING = 3, USB_STATE_UNAUTHENTICATED = 4, USB_STATE_DEFAULT = 5, USB_STATE_ADDRESS = 6, USB_STATE_CONFIGURED = 7, USB_STATE_SUSPENDED = 8 } ; 63 struct usb_ep ; 64 struct usb_request { void *buf; unsigned int length; dma_addr_t dma; struct scatterlist *sg; unsigned int num_sgs; unsigned int num_mapped_sgs; unsigned short stream_id; unsigned char no_interrupt; unsigned char zero; unsigned char short_not_ok; void (*complete)(struct usb_ep *, struct usb_request *); void *context; struct list_head list; int status; unsigned int actual; } ; 115 struct usb_ep_ops { int (*enable)(struct usb_ep *, const struct usb_endpoint_descriptor *); int (*disable)(struct usb_ep *); struct usb_request * (*alloc_request)(struct usb_ep *, gfp_t ); void (*free_request)(struct usb_ep *, struct usb_request *); int (*queue)(struct usb_ep *, struct usb_request *, gfp_t ); int (*dequeue)(struct usb_ep *, struct usb_request *); int (*set_halt)(struct usb_ep *, int); int (*set_wedge)(struct usb_ep *); int (*fifo_status)(struct usb_ep *); void (*fifo_flush)(struct usb_ep *); } ; 144 struct usb_ep_caps { unsigned char type_control; unsigned char type_iso; unsigned char type_bulk; unsigned char type_int; unsigned char dir_in; unsigned char dir_out; } ; 162 struct usb_ep { void *driver_data; const char *name; const struct usb_ep_ops *ops; struct list_head ep_list; struct usb_ep_caps caps; bool claimed; bool enabled; unsigned short maxpacket; unsigned short maxpacket_limit; unsigned short max_streams; unsigned char mult; unsigned char maxburst; u8 address; const struct usb_endpoint_descriptor *desc; const struct usb_ss_ep_comp_descriptor *comp_desc; } ; 246 struct usb_dcd_config_params { __u8 bU1devExitLat; __le16 bU2DevExitLat; } ; 284 struct usb_gadget ; 285 struct usb_gadget_driver ; 286 struct usb_udc ; 287 struct usb_gadget_ops { int (*get_frame)(struct usb_gadget *); int (*wakeup)(struct usb_gadget *); int (*set_selfpowered)(struct usb_gadget *, int); int (*vbus_session)(struct usb_gadget *, int); int (*vbus_draw)(struct usb_gadget *, unsigned int); int (*pullup)(struct usb_gadget *, int); int (*ioctl)(struct usb_gadget *, unsigned int, unsigned long); void (*get_config_params)(struct usb_dcd_config_params *); int (*udc_start)(struct usb_gadget *, struct usb_gadget_driver *); int (*udc_stop)(struct usb_gadget *); struct usb_ep * (*match_ep)(struct usb_gadget *, struct usb_endpoint_descriptor *, struct usb_ss_ep_comp_descriptor *); } ; 309 struct usb_otg_caps ; 309 struct usb_gadget { struct work_struct work; struct usb_udc *udc; const struct usb_gadget_ops *ops; struct usb_ep *ep0; struct list_head ep_list; enum usb_device_speed speed; enum usb_device_speed max_speed; enum usb_device_state state; const char *name; struct device dev; unsigned int out_epnum; unsigned int in_epnum; unsigned int mA; struct usb_otg_caps *otg_caps; unsigned char sg_supported; unsigned char is_otg; unsigned char is_a_peripheral; unsigned char b_hnp_enable; unsigned char a_hnp_support; unsigned char a_alt_hnp_support; unsigned char hnp_polling_support; unsigned char host_request_flag; unsigned char quirk_ep_out_aligned_size; unsigned char quirk_altset_not_supp; unsigned char quirk_stall_not_supp; unsigned char quirk_zlp_not_supp; unsigned char is_selfpowered; unsigned char deactivated; unsigned char connected; } ; 523 struct usb_gadget_driver { char *function; enum usb_device_speed max_speed; int (*bind)(struct usb_gadget *, struct usb_gadget_driver *); void (*unbind)(struct usb_gadget *); int (*setup)(struct usb_gadget *, const struct usb_ctrlrequest *); void (*disconnect)(struct usb_gadget *); void (*suspend)(struct usb_gadget *); void (*resume)(struct usb_gadget *); void (*reset)(struct usb_gadget *); struct device_driver driver; char *udc_name; struct list_head pending; unsigned char match_existing_only; } ; 13 typedef unsigned long kernel_ulong_t; 186 struct acpi_device_id { __u8 id[9U]; kernel_ulong_t driver_data; __u32 cls; __u32 cls_msk; } ; 229 struct of_device_id { char name[32U]; char type[32U]; char compatible[128U]; const void *data; } ; 484 struct platform_device_id { char name[20U]; kernel_ulong_t driver_data; } ; 674 struct mfd_cell ; 676 struct platform_device { const char *name; int id; bool id_auto; struct device dev; u32 num_resources; struct resource *resource; const struct platform_device_id *id_entry; char *driver_override; struct mfd_cell *mfd_cell; struct pdev_archdata archdata; } ; 370 struct mv_usb_addon_irq { unsigned int irq; int (*poll)(); } ; 35 struct mv_usb_platform_data { struct mv_usb_addon_irq *id; struct mv_usb_addon_irq *vbus; unsigned int mode; unsigned char disable_otg_clock_gating; unsigned char otg_force_a_bus_req; int (*phy_init)(void *); void (*phy_deinit)(void *); int (*set_vbus)(unsigned int); int (*private_init)(void *, void *); } ; 53 struct clk ; 511 struct mv_u3d_cap_regs { u32 rsvd[5U]; u32 dboff; u32 rtsoff; u32 vuoff; } ; 130 struct mv_u3d_op_regs { u32 usbcmd; u32 rsvd1[11U]; u32 dcbaapl; u32 dcbaaph; u32 rsvd2[243U]; u32 portsc; u32 portlinkinfo; u32 rsvd3[9917U]; u32 doorbell; } ; 143 struct epxcr { u32 epxoutcr0; u32 epxoutcr1; u32 epxincr0; u32 epxincr1; } ; 151 struct xferstatus { u32 curdeqlo; u32 curdeqhi; u32 statuslo; u32 statushi; } ; 159 struct mv_u3d_vuc_regs { u32 ctrlepenable; u32 setuplock; u32 endcomplete; u32 intrcause; u32 intrenable; u32 trbcomplete; u32 linkchange; u32 rsvd1[5U]; u32 trbunderrun; u32 rsvd2[43U]; u32 bridgesetting; u32 rsvd3[7U]; struct xferstatus txst[16U]; struct xferstatus rxst[16U]; u32 ltssm; u32 pipe; u32 linkcr0; u32 linkcr1; u32 rsvd6[60U]; u32 mib0; u32 usblink; u32 ltssmstate; u32 linkerrorcause; u32 rsvd7[60U]; u32 devaddrtiebrkr; u32 itpinfo0; u32 itpinfo1; u32 rsvd8[61U]; struct epxcr epcr[16U]; u32 rsvd9[64U]; u32 phyaddr; u32 phydata; } ; 195 struct mv_u3d_ep_context { u32 rsvd0; u32 rsvd1; u32 trb_addr_lo; u32 trb_addr_hi; u32 rsvd2; u32 rsvd3; struct usb_ctrlrequest setup_buffer; } ; 206 struct mv_u3d_trb_ctrl { unsigned char own; unsigned char rsvd1; unsigned char chain; unsigned char ioc; unsigned char rsvd2; unsigned char type; unsigned char dir; unsigned short rsvd3; } ; 223 struct mv_u3d_trb_hw { u32 buf_addr_lo; u32 buf_addr_hi; u32 trb_len; struct mv_u3d_trb_ctrl ctrl; } ; 233 struct mv_u3d_trb { struct mv_u3d_trb_hw *trb_hw; dma_addr_t trb_dma; struct list_head trb_list; } ; 240 struct mv_u3d_ep ; 240 struct mv_u3d_req ; 240 struct mv_u3d { struct usb_gadget gadget; struct usb_gadget_driver *driver; spinlock_t lock; struct completion *done; struct device *dev; int irq; struct mv_u3d_cap_regs *cap_regs; struct mv_u3d_op_regs *op_regs; struct mv_u3d_vuc_regs *vuc_regs; void *phy_regs; unsigned int max_eps; struct mv_u3d_ep_context *ep_context; size_t ep_context_size; dma_addr_t ep_context_dma; struct dma_pool *trb_pool; struct mv_u3d_ep *eps; struct mv_u3d_req *status_req; struct usb_ctrlrequest local_setup_buff; unsigned int resume_state; unsigned int usb_state; unsigned int ep0_state; unsigned int ep0_dir; unsigned int dev_addr; unsigned int errors; unsigned char softconnect; unsigned char vbus_active; unsigned char remote_wakeup; unsigned char clock_gating; unsigned char active; unsigned char vbus_valid_detect; struct mv_usb_addon_irq *vbus; unsigned int power; struct clk *clk; } ; 288 struct mv_u3d_ep { struct usb_ep ep; struct mv_u3d *u3d; struct list_head queue; struct list_head req_list; struct mv_u3d_ep_context *ep_context; u32 direction; char name[14U]; u32 processing; spinlock_t req_lock; unsigned char wedge; unsigned char enabled; unsigned char ep_type; unsigned char ep_num; } ; 306 struct mv_u3d_req { struct usb_request req; struct mv_u3d_ep *ep; struct list_head queue; struct list_head list; struct list_head trb_list; struct mv_u3d_trb *trb_head; unsigned int trb_count; unsigned int chain; } ; 1 void * __builtin_memcpy(void *, const void *, unsigned long); 1 long int __builtin_expect(long, long); 218 void __read_once_size(const volatile void *p, void *res, int size); 243 void __write_once_size(volatile void *p, void *res, int size); 63 void __dynamic_dev_dbg(struct _ddebug *, const struct device *, const char *, ...); 414 int snprintf(char *, size_t , const char *, ...); 3 bool ldv_is_err(const void *ptr); 6 long int ldv_ptr_err(const void *ptr); 8 void ldv_dma_map_page(); 25 void INIT_LIST_HEAD(struct list_head *list); 48 void __list_add(struct list_head *, struct list_head *, struct list_head *); 75 void list_add_tail(struct list_head *new, struct list_head *head); 112 void __list_del_entry(struct list_head *); 143 void list_del_init(struct list_head *entry); 187 int list_empty(const struct list_head *head); 71 void warn_slowpath_null(const char *, const int); 23 unsigned long int __phys_addr(unsigned long); 31 void * __memcpy(void *, const void *, size_t ); 24 char * strncpy(char *, const char *, __kernel_size_t ); 32 long int PTR_ERR(const void *ptr); 41 bool IS_ERR(const void *ptr); 93 void __raw_spin_lock_init(raw_spinlock_t *, const char *, struct lock_class_key *); 22 void _raw_spin_lock(raw_spinlock_t *); 34 unsigned long int _raw_spin_lock_irqsave(raw_spinlock_t *); 41 void _raw_spin_unlock(raw_spinlock_t *); 45 void _raw_spin_unlock_irqrestore(raw_spinlock_t *, unsigned long); 289 raw_spinlock_t * spinlock_check(spinlock_t *lock); 300 void spin_lock(spinlock_t *lock); 345 void spin_unlock(spinlock_t *lock); 360 void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags); 193 resource_size_t resource_size(const struct resource *res); 118 phys_addr_t virt_to_phys(volatile void *address); 181 void * ioremap_nocache(resource_size_t , unsigned long); 192 void * ioremap(resource_size_t offset, unsigned long size); 197 void iounmap(volatile void *); 31 unsigned int ioread32(void *); 41 void iowrite32(u32 , void *); 912 void * dev_get_drvdata(const struct device *dev); 917 void dev_set_drvdata(struct device *dev, void *data); 1046 void * dev_get_platdata(const struct device *dev); 1135 void dev_err(const struct device *, const char *, ...); 37 void debug_dma_map_page(struct device *, struct page *, size_t , size_t , int, dma_addr_t , bool ); 44 void debug_dma_unmap_page(struct device *, dma_addr_t , size_t , int, bool ); 53 void debug_dma_alloc_coherent(struct device *, size_t , dma_addr_t , void *); 131 void kmemcheck_mark_initialized(void *address, unsigned int n); 125 int valid_dma_direction(int dma_direction); 28 extern struct dma_map_ops *dma_ops; 30 struct dma_map_ops * get_dma_ops(struct device *dev); 42 bool arch_dma_alloc_attrs(struct device **, gfp_t *); 169 dma_addr_t ldv_dma_map_single_attrs_5(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, unsigned long attrs); 169 dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, unsigned long attrs); 192 void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, unsigned long attrs); 404 void * dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs); 445 void * dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag); 451 void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle); 19 struct dma_pool * dma_pool_create(const char *, struct device *, size_t , size_t , size_t ); 22 void dma_pool_destroy(struct dma_pool *); 24 void * dma_pool_alloc(struct dma_pool *, gfp_t , dma_addr_t *); 33 void dma_pool_free(struct dma_pool *, void *, dma_addr_t ); 10 void __const_udelay(unsigned long); 154 void kfree(const void *); 318 void * __kmalloc(size_t , gfp_t ); 466 void * kmalloc(size_t size, gfp_t flags); 564 void * kmalloc_array(size_t n, size_t size, gfp_t flags); 579 void * kcalloc(size_t n, size_t size, gfp_t flags); 622 void * kzalloc(size_t size, gfp_t flags); 139 int request_threaded_irq(unsigned int, irqreturn_t (*)(int, void *), irqreturn_t (*)(int, void *), unsigned long, const char *, void *); 144 int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *), unsigned long flags, const char *name, void *dev); 158 void free_irq(unsigned int, void *); 234 void usb_ep_set_maxpacket_limit(struct usb_ep *, unsigned int); 685 int usb_add_gadget_udc(struct device *, struct usb_gadget *); 686 void usb_del_gadget_udc(struct usb_gadget *); 770 int usb_gadget_map_request(struct usb_gadget *, struct usb_request *, int); 775 void usb_gadget_unmap_request(struct usb_gadget *, struct usb_request *, int); 795 void usb_gadget_giveback_request(struct usb_ep *, struct usb_request *); 52 struct resource * platform_get_resource(struct platform_device *, unsigned int, unsigned int); 56 struct resource * platform_get_resource_byname(struct platform_device *, unsigned int, const char *); 211 void * platform_get_drvdata(const struct platform_device *pdev); 216 void platform_set_drvdata(struct platform_device *pdev, void *data); 229 struct clk * clk_get(struct device *, const char *); 261 int clk_enable(struct clk *); 277 void clk_disable(struct clk *); 296 void clk_put(struct clk *); 38 const char driver_name[7U] = { 'm', 'v', '_', 'u', '3', 'd', '\x0' }; 41 void mv_u3d_nuke(struct mv_u3d_ep *ep, int status); 42 void mv_u3d_stop_activity(struct mv_u3d *u3d, struct usb_gadget_driver *driver); 46 const struct usb_endpoint_descriptor mv_u3d_ep0_desc = { 7U, 5U, 0U, 0U, 512U, 0U, 0U, 0U }; 54 void mv_u3d_ep0_reset(struct mv_u3d *u3d); 100 void mv_u3d_ep0_stall(struct mv_u3d *u3d); 119 int mv_u3d_process_ep_req(struct mv_u3d *u3d, int index, struct mv_u3d_req *curr_req); 179 void mv_u3d_done(struct mv_u3d_ep *ep, struct mv_u3d_req *req, int status); 223 int mv_u3d_queue_trb(struct mv_u3d_ep *ep, struct mv_u3d_req *req); 272 struct mv_u3d_trb * mv_u3d_build_trb_one(struct mv_u3d_req *req, unsigned int *length, dma_addr_t *dma); 337 int mv_u3d_build_trb_chain(struct mv_u3d_req *req, unsigned int *length, struct mv_u3d_trb *trb, int *is_last); 404 int mv_u3d_req_to_trb(struct mv_u3d_req *req); 474 int mv_u3d_start_queue(struct mv_u3d_ep *ep); 517 int mv_u3d_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc); 620 int mv_u3d_ep_disable(struct usb_ep *_ep); 663 struct usb_request * mv_u3d_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags); 676 void mv_u3d_free_request(struct usb_ep *_ep, struct usb_request *_req); 683 void mv_u3d_ep_fifo_flush(struct usb_ep *_ep); 767 int mv_u3d_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags); 841 int mv_u3d_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req); 915 void mv_u3d_ep_set_stall(struct mv_u3d *u3d, u8 ep_num, u8 direction, int stall); 938 int mv_u3d_ep_set_halt_wedge(struct usb_ep *_ep, int halt___0, int wedge); 981 int mv_u3d_ep_set_halt(struct usb_ep *_ep, int halt___0); 986 int mv_u3d_ep_set_wedge(struct usb_ep *_ep); 991 struct usb_ep_ops mv_u3d_ep_ops = { &mv_u3d_ep_enable, &mv_u3d_ep_disable, &mv_u3d_alloc_request, &mv_u3d_free_request, &mv_u3d_ep_queue, &mv_u3d_ep_dequeue, &mv_u3d_ep_set_halt, &mv_u3d_ep_set_wedge, 0, &mv_u3d_ep_fifo_flush }; 1006 void mv_u3d_controller_stop(struct mv_u3d *u3d); 1029 void mv_u3d_controller_start(struct mv_u3d *u3d); 1055 int mv_u3d_controller_reset(struct mv_u3d *u3d); 1087 int mv_u3d_enable(struct mv_u3d *u3d); 1116 void mv_u3d_disable(struct mv_u3d *u3d); 1128 int mv_u3d_vbus_session(struct usb_gadget *gadget, int is_active); 1179 int mv_u3d_vbus_draw(struct usb_gadget *gadget, unsigned int mA); 1188 int mv_u3d_pullup(struct usb_gadget *gadget, int is_on); 1222 int mv_u3d_start(struct usb_gadget *g, struct usb_gadget_driver *driver); 1253 int mv_u3d_stop(struct usb_gadget *g); 1285 const struct usb_gadget_ops mv_u3d_ops = { 0, 0, 0, &mv_u3d_vbus_session, &mv_u3d_vbus_draw, &mv_u3d_pullup, 0, 0, &mv_u3d_start, &mv_u3d_stop, 0 }; 1297 int mv_u3d_eps_init(struct mv_u3d *u3d); 1391 void mv_u3d_irq_process_error(struct mv_u3d *u3d); 1398 void mv_u3d_irq_process_link_change(struct mv_u3d *u3d); 1462 void mv_u3d_ch9setaddress(struct mv_u3d *u3d, struct usb_ctrlrequest *setup); 1499 int mv_u3d_is_set_configuration(struct usb_ctrlrequest *setup); 1508 void mv_u3d_handle_setup_packet(struct mv_u3d *u3d, u8 ep_num, struct usb_ctrlrequest *setup); 1578 void mv_u3d_get_setup_data(struct mv_u3d *u3d, u8 ep_num, u8 *buffer_ptr); 1588 void mv_u3d_irq_process_setup(struct mv_u3d *u3d); 1607 void mv_u3d_irq_process_tr_complete(struct mv_u3d *u3d); 1671 irqreturn_t mv_u3d_irq(int irq, void *dev); 1745 int mv_u3d_remove(struct platform_device *dev); 1778 int mv_u3d_probe(struct platform_device *dev); 2032 void mv_u3d_shutdown(struct platform_device *dev); 2074 void ldv_check_final_state(); 2077 void ldv_check_return_value(int); 2080 void ldv_check_return_value_probe(int); 2083 void ldv_initialize(); 2086 void ldv_handler_precall(); 2089 int nondet_int(); 2092 int LDV_IN_INTERRUPT = 0; 2095 void ldv_main0_sequence_infinite_withcheck_stateful(); 10 void ldv_error(); 14 void * ldv_err_ptr(long error); 28 bool ldv_is_err_or_null(const void *ptr); 5 int LDV_DMA_MAP_CALLS = 0; 16 void ldv_dma_mapping_error(); return ; } { 2097 struct usb_ep *var_group1; 2098 const struct usb_endpoint_descriptor *var_mv_u3d_ep_enable_8_p1; 2099 unsigned int var_mv_u3d_alloc_request_10_p1; 2100 struct usb_request *var_group2; 2101 unsigned int var_mv_u3d_ep_queue_13_p2; 2102 int var_mv_u3d_ep_set_halt_17_p1; 2103 struct usb_gadget *var_group3; 2104 int var_mv_u3d_vbus_session_24_p1; 2105 unsigned int var_mv_u3d_vbus_draw_25_p1; 2106 int var_mv_u3d_pullup_26_p1; 2107 struct usb_gadget_driver *var_group4; 2108 struct platform_device *var_group5; 2109 int res_mv_u3d_probe_41; 2110 int var_mv_u3d_irq_39_p0; 2111 void *var_mv_u3d_irq_39_p1; 2112 int ldv_s_mv_u3d_driver_platform_driver; 2113 int tmp; 2114 int tmp___0; 2301 ldv_s_mv_u3d_driver_platform_driver = 0; 2287 LDV_IN_INTERRUPT = 1; 2296 ldv_initialize() { /* Function call is skipped due to function is undefined */} 2306 goto ldv_34620; 2306 tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */} 2309 goto ldv_34619; 2307 ldv_34619:; 2310 tmp = nondet_int() { /* Function call is skipped due to function is undefined */} 2310 switch (tmp); 2707 LDV_IN_INTERRUPT = 2; 2714 ldv_handler_precall() { /* Function call is skipped due to function is undefined */} { } 1673 struct mv_u3d *u3d; 1674 unsigned int status; 1675 unsigned int intr; 1676 unsigned int bridgesetting; 1677 unsigned int trbunderrun; 1678 struct _ddebug descriptor; 1679 long tmp; 1673 u3d = (struct mv_u3d *)dev; { 302 _raw_spin_lock(&(lock->__annonCompField20.rlock)) { /* Function call is skipped due to function is undefined */} 303 return ;; } 1680 status = ioread32((void *)(&(u3d->vuc_regs->intrcause))) { /* Function call is skipped due to function is undefined */} 1681 intr = ioread32((void *)(&(u3d->vuc_regs->intrenable))) { /* Function call is skipped due to function is undefined */} 1682 status = status & intr; 1691 bridgesetting = ioread32((void *)(&(u3d->vuc_regs->bridgesetting))) { /* Function call is skipped due to function is undefined */} 1694 bridgesetting = 65536U; 1695 iowrite32(bridgesetting, (void *)(&(u3d->vuc_regs->bridgesetting))) { /* Function call is skipped due to function is undefined */} 1696 descriptor.modname = "mv_u3d_core"; 1696 descriptor.function = "mv_u3d_irq"; 1696 descriptor.filename = "/home/ldvuser/ldv/ref_launches/work/current--X--drivers--X--defaultlinux-4.8-rc1.tar.xz--X--331_1a--X--cpachecker/linux-4.8-rc1.tar.xz/csd_deg_dscv/10667/dscv_tempdir/dscv/ri/331_1a/drivers/usb/gadget/udc/mv_u3d_core.c"; 1696 descriptor.format = "vbus valid\n"; 1696 descriptor.lineno = 1696U; 1696 descriptor.flags = 0U; 1696 tmp = __builtin_expect(((long)(descriptor.flags)) & 1L, 0L) { /* Function call is skipped due to function is undefined */} 1698 u3d->usb_state = 2U; 1699 u3d->vbus_valid_detect = 0U; 1703 unsigned long __CPAchecker_TMP_2 = (unsigned long)(u3d->vbus); { } 1609 unsigned int tmp; 1610 unsigned int bit_pos; 1611 int i; 1612 int ep_num; 1613 int direction; 1614 struct mv_u3d_ep *curr_ep; 1615 struct mv_u3d_req *curr_req; 1616 struct mv_u3d_req *temp_req; 1617 int status; 1618 struct _ddebug descriptor; 1619 long tmp___0; 1620 struct _ddebug descriptor___0; 1621 long tmp___1; 1622 struct mv_u3d_req *req; 1623 const struct list_head *__mptr; 1624 int tmp___2; 1625 const struct list_head *__mptr___0; 1626 const struct list_head *__mptr___1; 1627 const struct list_head *__mptr___2; 1628 struct _ddebug descriptor___1; 1629 long tmp___3; 1610 ep_num = 0; 1610 direction = 0; 1615 tmp = ioread32((void *)(&(u3d->vuc_regs->endcomplete))) { /* Function call is skipped due to function is undefined */} 1617 descriptor.modname = "mv_u3d_core"; 1617 descriptor.function = "mv_u3d_irq_process_tr_complete"; 1617 descriptor.filename = "/home/ldvuser/ldv/ref_launches/work/current--X--drivers--X--defaultlinux-4.8-rc1.tar.xz--X--331_1a--X--cpachecker/linux-4.8-rc1.tar.xz/csd_deg_dscv/10667/dscv_tempdir/dscv/ri/331_1a/drivers/usb/gadget/udc/mv_u3d_core.c"; 1617 descriptor.format = "tr_complete: ep: 0x%x\n"; 1617 descriptor.lineno = 1617U; 1617 descriptor.flags = 0U; 1617 tmp___0 = __builtin_expect(((long)(descriptor.flags)) & 1L, 0L) { /* Function call is skipped due to function is undefined */} 1620 iowrite32(tmp, (void *)(&(u3d->vuc_regs->endcomplete))) { /* Function call is skipped due to function is undefined */} 1622 i = 0; 1622 goto ldv_34487; 1624 goto ldv_34486; 1623 ldv_34486:; 1623 ep_num = i >> 1; 1624 direction = i % 2; 1626 bit_pos = (u32 )(1 << ((direction * 16) + ep_num)); 1632 curr_ep = (u3d->eps) + 1UL; 1637 descriptor___0.modname = "mv_u3d_core"; 1637 descriptor___0.function = "mv_u3d_irq_process_tr_complete"; 1637 descriptor___0.filename = "/home/ldvuser/ldv/ref_launches/work/current--X--drivers--X--defaultlinux-4.8-rc1.tar.xz--X--331_1a--X--cpachecker/linux-4.8-rc1.tar.xz/csd_deg_dscv/10667/dscv_tempdir/dscv/ri/331_1a/drivers/usb/gadget/udc/mv_u3d_core.c"; 1637 descriptor___0.format = "tr comp: check req_list\n"; 1637 descriptor___0.lineno = 1637U; 1637 descriptor___0.flags = 0U; 1637 tmp___1 = __builtin_expect(((long)(descriptor___0.flags)) & 1L, 0L) { /* Function call is skipped due to function is undefined */} { 302 _raw_spin_lock(&(lock->__annonCompField20.rlock)) { /* Function call is skipped due to function is undefined */} 303 return ;; } { 189 union __anonunion___u_13 __u; { 220 switch (size); 221 assume(!(size == 1)); 221 assume(!(size == 2)); 221 assume(!(size == 4)); 221 assume(size == 8); 220 *((__u64 *)res) = *((volatile __u64 *)p); 220 goto ldv_880; 222 return ;; } 189 return ((unsigned long)((const struct list_head *)(__u.__val))) == ((unsigned long)head);; } { 347 _raw_spin_unlock(&(lock->__annonCompField20.rlock)) { /* Function call is skipped due to function is undefined */} 348 return ;; } 1649 __mptr___0 = (const struct list_head *)(curr_ep->queue.next); 1649 curr_req = ((struct mv_u3d_req *)__mptr___0) + 18446744073709551520UL; 1649 __mptr___1 = (const struct list_head *)(curr_req->queue.next); 1649 temp_req = ((struct mv_u3d_req *)__mptr___1) + 18446744073709551520UL; 1649 goto ldv_34484; 1655 ldv_34482:; 1666 descriptor___1.modname = "mv_u3d_core"; 1666 descriptor___1.function = "mv_u3d_irq_process_tr_complete"; 1666 descriptor___1.filename = "/home/ldvuser/ldv/ref_launches/work/current--X--drivers--X--defaultlinux-4.8-rc1.tar.xz--X--331_1a--X--cpachecker/linux-4.8-rc1.tar.xz/csd_deg_dscv/10667/dscv_tempdir/dscv/ri/331_1a/drivers/usb/gadget/udc/mv_u3d_core.c"; 1666 descriptor___1.format = "call mv_u3d_start_queue from ep complete\n"; 1666 descriptor___1.lineno = 1666U; 1666 descriptor___1.flags = 0U; 1666 tmp___3 = __builtin_expect(((long)(descriptor___1.flags)) & 1L, 0L) { /* Function call is skipped due to function is undefined */} { 476 struct mv_u3d *u3d; 477 struct mv_u3d_req *req; 478 int ret; 479 const struct list_head *__mptr; 480 int tmp; 481 int tmp___0; 476 u3d = ep->u3d; { 189 union __anonunion___u_13 __u; { 220 switch (size); 221 assume(!(size == 1)); 221 assume(!(size == 2)); 221 assume(!(size == 4)); 221 assume(size == 8); 220 *((__u64 *)res) = *((volatile __u64 *)p); 220 goto ldv_880; 222 return ;; } 189 return ((unsigned long)((const struct list_head *)(__u.__val))) == ((unsigned long)head);; } 481 __mptr = (const struct list_head *)(ep->req_list.next); 481 req = ((struct mv_u3d_req *)__mptr) + 18446744073709551504UL; 485 ep->processing = 1U; 488 int __CPAchecker_TMP_0; 488 int __CPAchecker_TMP_1 = (int)(ep->u3d->ep0_dir); 488 __CPAchecker_TMP_0 = __CPAchecker_TMP_1; 488 ret = usb_gadget_map_request(&(u3d->gadget), &(req->req), __CPAchecker_TMP_0) { /* Function call is skipped due to function is undefined */} 493 req->req.status = -115; 494 req->req.actual = 0U; 495 req->trb_count = 0U; { 406 unsigned int count; 407 int is_last; 408 struct mv_u3d_trb *trb; 409 struct mv_u3d_trb_hw *trb_hw; 410 struct mv_u3d *u3d; 411 unsigned long long dma; 412 unsigned int length; 413 unsigned int trb_num; 414 void *tmp; 415 void *tmp___0; 416 int tmp___1; 417 const struct list_head *__mptr; 415 u3d = req->ep->u3d; { 27 union __anonunion___u_9 __u; 27 __u.__val = list; { 245 switch (size); 246 assume(!(size == 1)); 247 assume(!(size == 2)); 248 assume(!(size == 4)); 249 assume(size == 8); 249 *((volatile __u64 *)p) = *((__u64 *)res); 249 goto ldv_902; 256 return ;; } 28 list->prev = list; 29 return ;; } 419 length = (req->req.length) - (req->req.actual); 430 trb_num = length / 65536U; { 581 void *tmp; { 566 void *tmp___0; 566 assume(size != 0UL); 566 assume(!((18446744073709551615UL / size) < n)); 570 tmp___0 = __kmalloc(n * size, flags) { /* Function call is skipped due to function is undefined */} 570 return tmp___0;; } 581 return tmp;; } 434 trb = (struct mv_u3d_trb *)tmp; { 581 void *tmp; { 566 void *tmp___0; 566 assume(size != 0UL); 566 assume(!((18446744073709551615UL / size) < n)); 570 tmp___0 = __kmalloc(n * size, flags) { /* Function call is skipped due to function is undefined */} 570 return tmp___0;; } 581 return tmp;; } 438 trb_hw = (struct mv_u3d_trb_hw *)tmp___0; 445 ldv_34100:; 445 trb->trb_hw = trb_hw; { 340 unsigned int temp; 341 unsigned int direction; 342 struct mv_u3d *u3d; 343 unsigned int _min1; 344 unsigned int _min2; 345 struct _ddebug descriptor; 346 long tmp; 345 _min1 = (req->req.length) - (req->req.actual); 345 _min2 = 65536U; 345 unsigned int __CPAchecker_TMP_0; 345 __CPAchecker_TMP_0 = _min2; 345 *length = __CPAchecker_TMP_0; 348 u3d = req->ep->u3d; 350 trb->trb_dma = 0ULL; 353 temp = ((unsigned int)(req->req.dma)) + (req->req.actual); 355 trb->trb_hw->buf_addr_lo = temp; 356 trb->trb_hw->buf_addr_hi = 0U; 357 trb->trb_hw->trb_len = *length; 358 trb->trb_hw->ctrl.own = 1U; 360 unsigned short *__CPAchecker_TMP_1 = (unsigned short *)(req->ep); 361 trb->trb_hw->ctrl.type = 3U; 365 req->req.actual = (req->req.actual) + (*length); 367 unsigned int __CPAchecker_TMP_3; 367 unsigned short *__CPAchecker_TMP_4 = (unsigned short *)(req->ep); 367 __CPAchecker_TMP_3 = req->ep->u3d->ep0_dir; 367 direction = __CPAchecker_TMP_3; 371 trb->trb_hw->ctrl.dir = 0U; 380 *is_last = 1; 386 trb->trb_hw->ctrl.ioc = 1U; 389 trb->trb_hw->ctrl.chain = 0U; 395 Ignored inline assembler code} { 77 __list_add(new, head->prev, head) { /* Function call is skipped due to function is undefined */} 78 return ;; } 455 req->trb_count = (req->trb_count) + 1U; 456 trb = trb + 1; 457 trb_hw = trb_hw + 1; 460 __mptr = (const struct list_head *)(req->trb_list.next); 460 req->trb_head = ((struct mv_u3d_trb *)__mptr) + 18446744073709551600UL; 462 void *__CPAchecker_TMP_1 = (void *)(req->trb_head->trb_hw); 462 -dma_map_single_attrs(u3d->gadget.dev.parent, __CPAchecker_TMP_1, ((unsigned long)trb_num) * 16UL, 0, 0UL) { 38 unsigned long long tmp; { } 173 struct dma_map_ops *ops; 174 struct dma_map_ops *tmp; 175 unsigned long long addr; 176 int tmp___0; 177 long tmp___1; 178 unsigned long tmp___2; 179 unsigned long tmp___3; { 32 long tmp; 35 tmp = __builtin_expect(((unsigned long)dev) == ((unsigned long)((struct device *)0)), 0L) { /* Function call is skipped due to function is undefined */} 35 assume(!(tmp != 0L)); 35 assume(!(((unsigned long)(dev->archdata.dma_ops)) == ((unsigned long)((struct dma_map_ops *)0)))); 38 return dev->archdata.dma_ops;; } 174 ops = tmp; { 133 return ;; } { 127 int __CPAchecker_TMP_0; 127 assume(dma_direction == 0); __CPAchecker_TMP_0 = 1; 127 return __CPAchecker_TMP_0;; } 178 tmp___1 = __builtin_expect(tmp___0 == 0, 0L) { /* Function call is skipped due to function is undefined */} 178 assume(!(tmp___1 != 0L)); 179 tmp___2 = __phys_addr((unsigned long)ptr) { /* Function call is skipped due to function is undefined */} 179 addr = (*(ops->map_page))(dev, ((struct page *)-24189255811072L) + (tmp___2 >> 12), ((unsigned long)ptr) & 4095UL, size, dir, attrs); 182 tmp___3 = __phys_addr((unsigned long)ptr) { /* Function call is skipped due to function is undefined */} 182 debug_dma_map_page(dev, ((struct page *)-24189255811072L) + (tmp___3 >> 12), ((unsigned long)ptr) & 4095UL, size, (int)dir, addr, 1) { /* Function call is skipped due to function is undefined */} 185 return addr;; } 467 req->chain = 1U; } { 225 unsigned int tmp; 226 unsigned int direction; 227 struct mv_u3d *u3d; 228 struct mv_u3d_ep_context *ep_context; 229 int retval; 230 int __ret_warn_on; 231 long tmp___0; 232 int tmp___1; 228 retval = 0; 230 u3d = ep->u3d; 231 unsigned int __CPAchecker_TMP_0; 231 __CPAchecker_TMP_0 = ep->u3d->ep0_dir; 231 direction = __CPAchecker_TMP_0; 235 ep_context = (u3d->ep_context) + 1UL; { 189 union __anonunion___u_13 __u; { 220 switch (size); 221 assume(!(size == 1)); 221 assume(!(size == 2)); 221 assume(!(size == 4)); 221 assume(size == 8); 220 *((__u64 *)res) = *((volatile __u64 *)p); 220 goto ldv_880; 222 return ;; } 189 return ((unsigned long)((const struct list_head *)(__u.__val))) == ((unsigned long)head);; } 245 ep_context->rsvd0 = 1U; 246 ep_context->rsvd1 = 0U; 251 unsigned int __CPAchecker_TMP_3 = (unsigned int)(req->trb_head->trb_dma); 251 ep_context->trb_addr_lo = __CPAchecker_TMP_3 | 1U; 253 ep_context->trb_addr_hi = 0U; 258 Ignored inline assembler code 262 tmp = 1U; 267 iowrite32(tmp, (void *)(&(u3d->op_regs->doorbell))) { /* Function call is skipped due to function is undefined */} } { } 77 __list_add(new, head->prev, head) { /* Function call is skipped due to function is undefined */} 78 return ;; } 1668 ldv_34471:; 1622 i = i + 1; 1623 ldv_34487:; 1624 goto ldv_34486; 1623 ldv_34486:; 1623 ep_num = i >> 1; 1624 direction = i % 2; 1626 bit_pos = (u32 )(1 << ((direction * 16) + ep_num)); 1634 curr_ep = (u3d->eps) + ((unsigned long)i); 1637 descriptor___0.modname = "mv_u3d_core"; 1637 descriptor___0.function = "mv_u3d_irq_process_tr_complete"; 1637 descriptor___0.filename = "/home/ldvuser/ldv/ref_launches/work/current--X--drivers--X--defaultlinux-4.8-rc1.tar.xz--X--331_1a--X--cpachecker/linux-4.8-rc1.tar.xz/csd_deg_dscv/10667/dscv_tempdir/dscv/ri/331_1a/drivers/usb/gadget/udc/mv_u3d_core.c"; 1637 descriptor___0.format = "tr comp: check req_list\n"; 1637 descriptor___0.lineno = 1637U; 1637 descriptor___0.flags = 0U; 1637 tmp___1 = __builtin_expect(((long)(descriptor___0.flags)) & 1L, 0L) { /* Function call is skipped due to function is undefined */} { 302 _raw_spin_lock(&(lock->__annonCompField20.rlock)) { /* Function call is skipped due to function is undefined */} 303 return ;; } { 189 union __anonunion___u_13 __u; { 220 switch (size); 221 assume(!(size == 1)); 221 assume(!(size == 2)); 221 assume(!(size == 4)); 221 assume(size == 8); 220 *((__u64 *)res) = *((volatile __u64 *)p); 220 goto ldv_880; 222 return ;; } 189 return ((unsigned long)((const struct list_head *)(__u.__val))) == ((unsigned long)head);; } { 347 _raw_spin_unlock(&(lock->__annonCompField20.rlock)) { /* Function call is skipped due to function is undefined */} 348 return ;; } 1649 __mptr___0 = (const struct list_head *)(curr_ep->queue.next); 1649 curr_req = ((struct mv_u3d_req *)__mptr___0) + 18446744073709551520UL; 1649 __mptr___1 = (const struct list_head *)(curr_req->queue.next); 1649 temp_req = ((struct mv_u3d_req *)__mptr___1) + 18446744073709551520UL; 1649 goto ldv_34484; 1655 ldv_34482:; 1666 descriptor___1.modname = "mv_u3d_core"; 1666 descriptor___1.function = "mv_u3d_irq_process_tr_complete"; 1666 descriptor___1.filename = "/home/ldvuser/ldv/ref_launches/work/current--X--drivers--X--defaultlinux-4.8-rc1.tar.xz--X--331_1a--X--cpachecker/linux-4.8-rc1.tar.xz/csd_deg_dscv/10667/dscv_tempdir/dscv/ri/331_1a/drivers/usb/gadget/udc/mv_u3d_core.c"; 1666 descriptor___1.format = "call mv_u3d_start_queue from ep complete\n"; 1666 descriptor___1.lineno = 1666U; 1666 descriptor___1.flags = 0U; 1666 tmp___3 = __builtin_expect(((long)(descriptor___1.flags)) & 1L, 0L) { /* Function call is skipped due to function is undefined */} { } 476 struct mv_u3d *u3d; 477 struct mv_u3d_req *req; 478 int ret; 479 const struct list_head *__mptr; 480 int tmp; 481 int tmp___0; 476 u3d = ep->u3d; { 189 union __anonunion___u_13 __u; { 220 switch (size); 221 assume(!(size == 1)); 221 assume(!(size == 2)); 221 assume(!(size == 4)); 221 assume(size == 8); 220 *((__u64 *)res) = *((volatile __u64 *)p); 220 goto ldv_880; 222 return ;; } 189 return ((unsigned long)((const struct list_head *)(__u.__val))) == ((unsigned long)head);; } 481 __mptr = (const struct list_head *)(ep->req_list.next); 481 req = ((struct mv_u3d_req *)__mptr) + 18446744073709551504UL; 485 ep->processing = 1U; 488 int __CPAchecker_TMP_0; 488 int __CPAchecker_TMP_1 = (int)(ep->u3d->ep0_dir); 488 __CPAchecker_TMP_0 = __CPAchecker_TMP_1; 488 ret = usb_gadget_map_request(&(u3d->gadget), &(req->req), __CPAchecker_TMP_0) { /* Function call is skipped due to function is undefined */} 493 req->req.status = -115; 494 req->req.actual = 0U; 495 req->trb_count = 0U; { } 406 unsigned int count; 407 int is_last; 408 struct mv_u3d_trb *trb; 409 struct mv_u3d_trb_hw *trb_hw; 410 struct mv_u3d *u3d; 411 unsigned long long dma; 412 unsigned int length; 413 unsigned int trb_num; 414 void *tmp; 415 void *tmp___0; 416 int tmp___1; 417 const struct list_head *__mptr; 415 u3d = req->ep->u3d; { 27 union __anonunion___u_9 __u; 27 __u.__val = list; { 245 switch (size); 246 assume(!(size == 1)); 247 assume(!(size == 2)); 248 assume(!(size == 4)); 249 assume(size == 8); 249 *((volatile __u64 *)p) = *((__u64 *)res); 249 goto ldv_902; 256 return ;; } 28 list->prev = list; 29 return ;; } 419 length = (req->req.length) - (req->req.actual); 430 trb_num = length / 65536U; 432 trb_num = trb_num + 1U; { 581 void *tmp; { 566 void *tmp___0; 566 assume(size != 0UL); 566 assume(!((18446744073709551615UL / size) < n)); 570 tmp___0 = __kmalloc(n * size, flags) { /* Function call is skipped due to function is undefined */} 570 return tmp___0;; } 581 return tmp;; } 434 trb = (struct mv_u3d_trb *)tmp; { 581 void *tmp; { 566 void *tmp___0; 566 assume(size != 0UL); 566 assume(!((18446744073709551615UL / size) < n)); 570 tmp___0 = __kmalloc(n * size, flags) { /* Function call is skipped due to function is undefined */} 570 return tmp___0;; } 581 return tmp;; } 438 trb_hw = (struct mv_u3d_trb_hw *)tmp___0; 445 ldv_34100:; 445 trb->trb_hw = trb_hw; { 340 unsigned int temp; 341 unsigned int direction; 342 struct mv_u3d *u3d; 343 unsigned int _min1; 344 unsigned int _min2; 345 struct _ddebug descriptor; 346 long tmp; 345 _min1 = (req->req.length) - (req->req.actual); 345 _min2 = 65536U; 345 unsigned int __CPAchecker_TMP_0; 345 __CPAchecker_TMP_0 = _min2; 345 *length = __CPAchecker_TMP_0; 348 u3d = req->ep->u3d; 350 trb->trb_dma = 0ULL; 353 temp = ((unsigned int)(req->req.dma)) + (req->req.actual); 355 trb->trb_hw->buf_addr_lo = temp; 356 trb->trb_hw->buf_addr_hi = 0U; 357 trb->trb_hw->trb_len = *length; 358 trb->trb_hw->ctrl.own = 1U; 360 unsigned short *__CPAchecker_TMP_1 = (unsigned short *)(req->ep); 363 trb->trb_hw->ctrl.type = 1U; 365 req->req.actual = (req->req.actual) + (*length); 367 unsigned int __CPAchecker_TMP_3; 367 unsigned short *__CPAchecker_TMP_4 = (unsigned short *)(req->ep); 367 __CPAchecker_TMP_3 = req->ep->direction; 367 direction = __CPAchecker_TMP_3; 369 trb->trb_hw->ctrl.dir = 1U; 376 *is_last = 1; 389 trb->trb_hw->ctrl.chain = 0U; 395 Ignored inline assembler code} { 77 __list_add(new, head->prev, head) { /* Function call is skipped due to function is undefined */} 78 return ;; } 455 req->trb_count = (req->trb_count) + 1U; 456 trb = trb + 1; 457 trb_hw = trb_hw + 1; 460 __mptr = (const struct list_head *)(req->trb_list.next); 460 req->trb_head = ((struct mv_u3d_trb *)__mptr) + 18446744073709551600UL; 462 void *__CPAchecker_TMP_1 = (void *)(req->trb_head->trb_hw); 462 -dma_map_single_attrs(u3d->gadget.dev.parent, __CPAchecker_TMP_1, ((unsigned long)trb_num) * 16UL, 0, 0UL) } | Source code
1 #ifndef _ASM_X86_DMA_MAPPING_H
2 #define _ASM_X86_DMA_MAPPING_H
3
4 /*
5 * IOMMU interface. See Documentation/DMA-API-HOWTO.txt and
6 * Documentation/DMA-API.txt for documentation.
7 */
8
9 #include <linux/kmemcheck.h>
10 #include <linux/scatterlist.h>
11 #include <linux/dma-debug.h>
12 #include <asm/io.h>
13 #include <asm/swiotlb.h>
14 #include <linux/dma-contiguous.h>
15
16 #ifdef CONFIG_ISA
17 # define ISA_DMA_BIT_MASK DMA_BIT_MASK(24)
18 #else
19 # define ISA_DMA_BIT_MASK DMA_BIT_MASK(32)
20 #endif
21
22 #define DMA_ERROR_CODE 0
23
24 extern int iommu_merge;
25 extern struct device x86_dma_fallback_dev;
26 extern int panic_on_overflow;
27
28 extern struct dma_map_ops *dma_ops;
29
30 static inline struct dma_map_ops *get_dma_ops(struct device *dev)
31 {
32 #ifndef CONFIG_X86_DEV_DMA_OPS
33 return dma_ops;
34 #else
35 if (unlikely(!dev) || !dev->archdata.dma_ops)
36 return dma_ops;
37 else
38 return dev->archdata.dma_ops;
39 #endif
40 }
41
42 bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp);
43 #define arch_dma_alloc_attrs arch_dma_alloc_attrs
44
45 #define HAVE_ARCH_DMA_SUPPORTED 1
46 extern int dma_supported(struct device *hwdev, u64 mask);
47
48 extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
49 dma_addr_t *dma_addr, gfp_t flag,
50 unsigned long attrs);
51
52 extern void dma_generic_free_coherent(struct device *dev, size_t size,
53 void *vaddr, dma_addr_t dma_addr,
54 unsigned long attrs);
55
56 #ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */
57 extern bool dma_capable(struct device *dev, dma_addr_t addr, size_t size);
58 extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
59 extern phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
60 #else
61
62 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
63 {
64 if (!dev->dma_mask)
65 return 0;
66
67 return addr + size - 1 <= *dev->dma_mask;
68 }
69
70 static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
71 {
72 return paddr;
73 }
74
75 static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
76 {
77 return daddr;
78 }
79 #endif /* CONFIG_X86_DMA_REMAP */
80
81 static inline void
82 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
83 enum dma_data_direction dir)
84 {
85 flush_write_buffers();
86 }
87
88 static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
89 gfp_t gfp)
90 {
91 unsigned long dma_mask = 0;
92
93 dma_mask = dev->coherent_dma_mask;
94 if (!dma_mask)
95 dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32);
96
97 return dma_mask;
98 }
99
100 static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
101 {
102 unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
103
104 if (dma_mask <= DMA_BIT_MASK(24))
105 gfp |= GFP_DMA;
106 #ifdef CONFIG_X86_64
107 if (dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
108 gfp |= GFP_DMA32;
109 #endif
110 return gfp;
111 }
112
113 #endif 1
2 /*
3 * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 */
9
10 #include <linux/module.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/dmapool.h>
13 #include <linux/kernel.h>
14 #include <linux/delay.h>
15 #include <linux/ioport.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/errno.h>
19 #include <linux/timer.h>
20 #include <linux/list.h>
21 #include <linux/notifier.h>
22 #include <linux/interrupt.h>
23 #include <linux/moduleparam.h>
24 #include <linux/device.h>
25 #include <linux/usb/ch9.h>
26 #include <linux/usb/gadget.h>
27 #include <linux/pm.h>
28 #include <linux/io.h>
29 #include <linux/irq.h>
30 #include <linux/platform_device.h>
31 #include <linux/platform_data/mv_usb.h>
32 #include <linux/clk.h>
33
34 #include "mv_u3d.h"
35
36 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
37
38 static const char driver_name[] = "mv_u3d";
39 static const char driver_desc[] = DRIVER_DESC;
40
41 static void mv_u3d_nuke(struct mv_u3d_ep *ep, int status);
42 static void mv_u3d_stop_activity(struct mv_u3d *u3d,
43 struct usb_gadget_driver *driver);
44
45 /* for endpoint 0 operations */
46 static const struct usb_endpoint_descriptor mv_u3d_ep0_desc = {
47 .bLength = USB_DT_ENDPOINT_SIZE,
48 .bDescriptorType = USB_DT_ENDPOINT,
49 .bEndpointAddress = 0,
50 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
51 .wMaxPacketSize = MV_U3D_EP0_MAX_PKT_SIZE,
52 };
53
54 static void mv_u3d_ep0_reset(struct mv_u3d *u3d)
55 {
56 struct mv_u3d_ep *ep;
57 u32 epxcr;
58 int i;
59
60 for (i = 0; i < 2; i++) {
61 ep = &u3d->eps[i];
62 ep->u3d = u3d;
63
64 /* ep0 ep context, ep0 in and out share the same ep context */
65 ep->ep_context = &u3d->ep_context[1];
66 }
67
68 /* reset ep state machine */
69 /* reset ep0 out */
70 epxcr = ioread32(&u3d->vuc_regs->epcr[0].epxoutcr0);
71 epxcr |= MV_U3D_EPXCR_EP_INIT;
72 iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxoutcr0);
73 udelay(5);
74 epxcr &= ~MV_U3D_EPXCR_EP_INIT;
75 iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxoutcr0);
76
77 epxcr = ((MV_U3D_EP0_MAX_PKT_SIZE
78 << MV_U3D_EPXCR_MAX_PACKET_SIZE_SHIFT)
79 | (1 << MV_U3D_EPXCR_MAX_BURST_SIZE_SHIFT)
80 | (1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT)
81 | MV_U3D_EPXCR_EP_TYPE_CONTROL);
82 iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxoutcr1);
83
84 /* reset ep0 in */
85 epxcr = ioread32(&u3d->vuc_regs->epcr[0].epxincr0);
86 epxcr |= MV_U3D_EPXCR_EP_INIT;
87 iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxincr0);
88 udelay(5);
89 epxcr &= ~MV_U3D_EPXCR_EP_INIT;
90 iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxincr0);
91
92 epxcr = ((MV_U3D_EP0_MAX_PKT_SIZE
93 << MV_U3D_EPXCR_MAX_PACKET_SIZE_SHIFT)
94 | (1 << MV_U3D_EPXCR_MAX_BURST_SIZE_SHIFT)
95 | (1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT)
96 | MV_U3D_EPXCR_EP_TYPE_CONTROL);
97 iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxincr1);
98 }
99
100 static void mv_u3d_ep0_stall(struct mv_u3d *u3d)
101 {
102 u32 tmp;
103 dev_dbg(u3d->dev, "%s\n", __func__);
104
105 /* set TX and RX to stall */
106 tmp = ioread32(&u3d->vuc_regs->epcr[0].epxoutcr0);
107 tmp |= MV_U3D_EPXCR_EP_HALT;
108 iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxoutcr0);
109
110 tmp = ioread32(&u3d->vuc_regs->epcr[0].epxincr0);
111 tmp |= MV_U3D_EPXCR_EP_HALT;
112 iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxincr0);
113
114 /* update ep0 state */
115 u3d->ep0_state = MV_U3D_WAIT_FOR_SETUP;
116 u3d->ep0_dir = MV_U3D_EP_DIR_OUT;
117 }
118
119 static int mv_u3d_process_ep_req(struct mv_u3d *u3d, int index,
120 struct mv_u3d_req *curr_req)
121 {
122 struct mv_u3d_trb *curr_trb;
123 int actual, remaining_length = 0;
124 int direction, ep_num;
125 int retval = 0;
126 u32 tmp, status, length;
127
128 direction = index % 2;
129 ep_num = index / 2;
130
131 actual = curr_req->req.length;
132
133 while (!list_empty(&curr_req->trb_list)) {
134 curr_trb = list_entry(curr_req->trb_list.next,
135 struct mv_u3d_trb, trb_list);
136 if (!curr_trb->trb_hw->ctrl.own) {
137 dev_err(u3d->dev, "%s, TRB own error!\n",
138 u3d->eps[index].name);
139 return 1;
140 }
141
142 curr_trb->trb_hw->ctrl.own = 0;
143 if (direction == MV_U3D_EP_DIR_OUT)
144 tmp = ioread32(&u3d->vuc_regs->rxst[ep_num].statuslo);
145 else
146 tmp = ioread32(&u3d->vuc_regs->txst[ep_num].statuslo);
147
148 status = tmp >> MV_U3D_XFERSTATUS_COMPLETE_SHIFT;
149 length = tmp & MV_U3D_XFERSTATUS_TRB_LENGTH_MASK;
150
151 if (status == MV_U3D_COMPLETE_SUCCESS ||
152 (status == MV_U3D_COMPLETE_SHORT_PACKET &&
153 direction == MV_U3D_EP_DIR_OUT)) {
154 remaining_length += length;
155 actual -= remaining_length;
156 } else {
157 dev_err(u3d->dev,
158 "complete_tr error: ep=%d %s: error = 0x%x\n",
159 index >> 1, direction ? "SEND" : "RECV",
160 status);
161 retval = -EPROTO;
162 }
163
164 list_del_init(&curr_trb->trb_list);
165 }
166 if (retval)
167 return retval;
168
169 curr_req->req.actual = actual;
170 return 0;
171 }
172
173 /*
174 * mv_u3d_done() - retire a request; caller blocked irqs
175 * @status : request status to be set, only works when
176 * request is still in progress.
177 */
178 static
179 void mv_u3d_done(struct mv_u3d_ep *ep, struct mv_u3d_req *req, int status)
180 __releases(&ep->udc->lock)
181 __acquires(&ep->udc->lock)
182 {
183 struct mv_u3d *u3d = (struct mv_u3d *)ep->u3d;
184
185 dev_dbg(u3d->dev, "mv_u3d_done: remove req->queue\n");
186 /* Removed the req from ep queue */
187 list_del_init(&req->queue);
188
189 /* req.status should be set as -EINPROGRESS in ep_queue() */
190 if (req->req.status == -EINPROGRESS)
191 req->req.status = status;
192 else
193 status = req->req.status;
194
195 /* Free trb for the request */
196 if (!req->chain)
197 dma_pool_free(u3d->trb_pool,
198 req->trb_head->trb_hw, req->trb_head->trb_dma);
199 else {
200 dma_unmap_single(ep->u3d->gadget.dev.parent,
201 (dma_addr_t)req->trb_head->trb_dma,
202 req->trb_count * sizeof(struct mv_u3d_trb_hw),
203 DMA_BIDIRECTIONAL);
204 kfree(req->trb_head->trb_hw);
205 }
206 kfree(req->trb_head);
207
208 usb_gadget_unmap_request(&u3d->gadget, &req->req, mv_u3d_ep_dir(ep));
209
210 if (status && (status != -ESHUTDOWN)) {
211 dev_dbg(u3d->dev, "complete %s req %p stat %d len %u/%u",
212 ep->ep.name, &req->req, status,
213 req->req.actual, req->req.length);
214 }
215
216 spin_unlock(&ep->u3d->lock);
217
218 usb_gadget_giveback_request(&ep->ep, &req->req);
219
220 spin_lock(&ep->u3d->lock);
221 }
222
223 static int mv_u3d_queue_trb(struct mv_u3d_ep *ep, struct mv_u3d_req *req)
224 {
225 u32 tmp, direction;
226 struct mv_u3d *u3d;
227 struct mv_u3d_ep_context *ep_context;
228 int retval = 0;
229
230 u3d = ep->u3d;
231 direction = mv_u3d_ep_dir(ep);
232
233 /* ep0 in and out share the same ep context slot 1*/
234 if (ep->ep_num == 0)
235 ep_context = &(u3d->ep_context[1]);
236 else
237 ep_context = &(u3d->ep_context[ep->ep_num * 2 + direction]);
238
239 /* check if the pipe is empty or not */
240 if (!list_empty(&ep->queue)) {
241 dev_err(u3d->dev, "add trb to non-empty queue!\n");
242 retval = -ENOMEM;
243 WARN_ON(1);
244 } else {
245 ep_context->rsvd0 = cpu_to_le32(1);
246 ep_context->rsvd1 = 0;
247
248 /* Configure the trb address and set the DCS bit.
249 * Both DCS bit and own bit in trb should be set.
250 */
251 ep_context->trb_addr_lo =
252 cpu_to_le32(req->trb_head->trb_dma | DCS_ENABLE);
253 ep_context->trb_addr_hi = 0;
254
255 /* Ensure that updates to the EP Context will
256 * occure before Ring Bell.
257 */
258 wmb();
259
260 /* ring bell the ep */
261 if (ep->ep_num == 0)
262 tmp = 0x1;
263 else
264 tmp = ep->ep_num * 2
265 + ((direction == MV_U3D_EP_DIR_OUT) ? 0 : 1);
266
267 iowrite32(tmp, &u3d->op_regs->doorbell);
268 }
269 return retval;
270 }
271
272 static struct mv_u3d_trb *mv_u3d_build_trb_one(struct mv_u3d_req *req,
273 unsigned *length, dma_addr_t *dma)
274 {
275 u32 temp;
276 unsigned int direction;
277 struct mv_u3d_trb *trb;
278 struct mv_u3d_trb_hw *trb_hw;
279 struct mv_u3d *u3d;
280
281 /* how big will this transfer be? */
282 *length = req->req.length - req->req.actual;
283 BUG_ON(*length > (unsigned)MV_U3D_EP_MAX_LENGTH_TRANSFER);
284
285 u3d = req->ep->u3d;
286
287 trb = kzalloc(sizeof(*trb), GFP_ATOMIC);
288 if (!trb)
289 return NULL;
290
291 /*
292 * Be careful that no _GFP_HIGHMEM is set,
293 * or we can not use dma_to_virt
294 * cannot use GFP_KERNEL in spin lock
295 */
296 trb_hw = dma_pool_alloc(u3d->trb_pool, GFP_ATOMIC, dma);
297 if (!trb_hw) {
298 kfree(trb);
299 dev_err(u3d->dev,
300 "%s, dma_pool_alloc fail\n", __func__);
301 return NULL;
302 }
303 trb->trb_dma = *dma;
304 trb->trb_hw = trb_hw;
305
306 /* initialize buffer page pointers */
307 temp = (u32)(req->req.dma + req->req.actual);
308
309 trb_hw->buf_addr_lo = cpu_to_le32(temp);
310 trb_hw->buf_addr_hi = 0;
311 trb_hw->trb_len = cpu_to_le32(*length);
312 trb_hw->ctrl.own = 1;
313
314 if (req->ep->ep_num == 0)
315 trb_hw->ctrl.type = TYPE_DATA;
316 else
317 trb_hw->ctrl.type = TYPE_NORMAL;
318
319 req->req.actual += *length;
320
321 direction = mv_u3d_ep_dir(req->ep);
322 if (direction == MV_U3D_EP_DIR_IN)
323 trb_hw->ctrl.dir = 1;
324 else
325 trb_hw->ctrl.dir = 0;
326
327 /* Enable interrupt for the last trb of a request */
328 if (!req->req.no_interrupt)
329 trb_hw->ctrl.ioc = 1;
330
331 trb_hw->ctrl.chain = 0;
332
333 wmb();
334 return trb;
335 }
336
337 static int mv_u3d_build_trb_chain(struct mv_u3d_req *req, unsigned *length,
338 struct mv_u3d_trb *trb, int *is_last)
339 {
340 u32 temp;
341 unsigned int direction;
342 struct mv_u3d *u3d;
343
344 /* how big will this transfer be? */
345 *length = min(req->req.length - req->req.actual,
346 (unsigned)MV_U3D_EP_MAX_LENGTH_TRANSFER);
347
348 u3d = req->ep->u3d;
349
350 trb->trb_dma = 0;
351
352 /* initialize buffer page pointers */
353 temp = (u32)(req->req.dma + req->req.actual);
354
355 trb->trb_hw->buf_addr_lo = cpu_to_le32(temp);
356 trb->trb_hw->buf_addr_hi = 0;
357 trb->trb_hw->trb_len = cpu_to_le32(*length);
358 trb->trb_hw->ctrl.own = 1;
359
360 if (req->ep->ep_num == 0)
361 trb->trb_hw->ctrl.type = TYPE_DATA;
362 else
363 trb->trb_hw->ctrl.type = TYPE_NORMAL;
364
365 req->req.actual += *length;
366
367 direction = mv_u3d_ep_dir(req->ep);
368 if (direction == MV_U3D_EP_DIR_IN)
369 trb->trb_hw->ctrl.dir = 1;
370 else
371 trb->trb_hw->ctrl.dir = 0;
372
373 /* zlp is needed if req->req.zero is set */
374 if (req->req.zero) {
375 if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
376 *is_last = 1;
377 else
378 *is_last = 0;
379 } else if (req->req.length == req->req.actual)
380 *is_last = 1;
381 else
382 *is_last = 0;
383
384 /* Enable interrupt for the last trb of a request */
385 if (*is_last && !req->req.no_interrupt)
386 trb->trb_hw->ctrl.ioc = 1;
387
388 if (*is_last)
389 trb->trb_hw->ctrl.chain = 0;
390 else {
391 trb->trb_hw->ctrl.chain = 1;
392 dev_dbg(u3d->dev, "chain trb\n");
393 }
394
395 wmb();
396
397 return 0;
398 }
399
400 /* generate TRB linked list for a request
401 * usb controller only supports continous trb chain,
402 * that trb structure physical address should be continous.
403 */
404 static int mv_u3d_req_to_trb(struct mv_u3d_req *req)
405 {
406 unsigned count;
407 int is_last;
408 struct mv_u3d_trb *trb;
409 struct mv_u3d_trb_hw *trb_hw;
410 struct mv_u3d *u3d;
411 dma_addr_t dma;
412 unsigned length;
413 unsigned trb_num;
414
415 u3d = req->ep->u3d;
416
417 INIT_LIST_HEAD(&req->trb_list);
418
419 length = req->req.length - req->req.actual;
420 /* normally the request transfer length is less than 16KB.
421 * we use buil_trb_one() to optimize it.
422 */
423 if (length <= (unsigned)MV_U3D_EP_MAX_LENGTH_TRANSFER) {
424 trb = mv_u3d_build_trb_one(req, &count, &dma);
425 list_add_tail(&trb->trb_list, &req->trb_list);
426 req->trb_head = trb;
427 req->trb_count = 1;
428 req->chain = 0;
429 } else {
430 trb_num = length / MV_U3D_EP_MAX_LENGTH_TRANSFER;
431 if (length % MV_U3D_EP_MAX_LENGTH_TRANSFER)
432 trb_num++;
433
434 trb = kcalloc(trb_num, sizeof(*trb), GFP_ATOMIC);
435 if (!trb)
436 return -ENOMEM;
437
438 trb_hw = kcalloc(trb_num, sizeof(*trb_hw), GFP_ATOMIC);
439 if (!trb_hw) {
440 kfree(trb);
441 return -ENOMEM;
442 }
443
444 do {
445 trb->trb_hw = trb_hw;
446 if (mv_u3d_build_trb_chain(req, &count,
447 trb, &is_last)) {
448 dev_err(u3d->dev,
449 "%s, mv_u3d_build_trb_chain fail\n",
450 __func__);
451 return -EIO;
452 }
453
454 list_add_tail(&trb->trb_list, &req->trb_list);
455 req->trb_count++;
456 trb++;
457 trb_hw++;
458 } while (!is_last);
459
460 req->trb_head = list_entry(req->trb_list.next,
461 struct mv_u3d_trb, trb_list);
462 req->trb_head->trb_dma = dma_map_single(u3d->gadget.dev.parent,
463 req->trb_head->trb_hw,
464 trb_num * sizeof(*trb_hw),
465 DMA_BIDIRECTIONAL);
466
467 req->chain = 1;
468 }
469
470 return 0;
471 }
472
473 static int
474 mv_u3d_start_queue(struct mv_u3d_ep *ep)
475 {
476 struct mv_u3d *u3d = ep->u3d;
477 struct mv_u3d_req *req;
478 int ret;
479
480 if (!list_empty(&ep->req_list) && !ep->processing)
481 req = list_entry(ep->req_list.next, struct mv_u3d_req, list);
482 else
483 return 0;
484
485 ep->processing = 1;
486
487 /* set up dma mapping */
488 ret = usb_gadget_map_request(&u3d->gadget, &req->req,
489 mv_u3d_ep_dir(ep));
490 if (ret)
491 return ret;
492
493 req->req.status = -EINPROGRESS;
494 req->req.actual = 0;
495 req->trb_count = 0;
496
497 /* build trbs and push them to device queue */
498 if (!mv_u3d_req_to_trb(req)) {
499 ret = mv_u3d_queue_trb(ep, req);
500 if (ret) {
501 ep->processing = 0;
502 return ret;
503 }
504 } else {
505 ep->processing = 0;
506 dev_err(u3d->dev, "%s, mv_u3d_req_to_trb fail\n", __func__);
507 return -ENOMEM;
508 }
509
510 /* irq handler advances the queue */
511 if (req)
512 list_add_tail(&req->queue, &ep->queue);
513
514 return 0;
515 }
516
517 static int mv_u3d_ep_enable(struct usb_ep *_ep,
518 const struct usb_endpoint_descriptor *desc)
519 {
520 struct mv_u3d *u3d;
521 struct mv_u3d_ep *ep;
522 u16 max = 0;
523 unsigned maxburst = 0;
524 u32 epxcr, direction;
525
526 if (!_ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT)
527 return -EINVAL;
528
529 ep = container_of(_ep, struct mv_u3d_ep, ep);
530 u3d = ep->u3d;
531
532 if (!u3d->driver || u3d->gadget.speed == USB_SPEED_UNKNOWN)
533 return -ESHUTDOWN;
534
535 direction = mv_u3d_ep_dir(ep);
536 max = le16_to_cpu(desc->wMaxPacketSize);
537
538 if (!_ep->maxburst)
539 _ep->maxburst = 1;
540 maxburst = _ep->maxburst;
541
542 /* Set the max burst size */
543 switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
544 case USB_ENDPOINT_XFER_BULK:
545 if (maxburst > 16) {
546 dev_dbg(u3d->dev,
547 "max burst should not be greater "
548 "than 16 on bulk ep\n");
549 maxburst = 1;
550 _ep->maxburst = maxburst;
551 }
552 dev_dbg(u3d->dev,
553 "maxburst: %d on bulk %s\n", maxburst, ep->name);
554 break;
555 case USB_ENDPOINT_XFER_CONTROL:
556 /* control transfer only supports maxburst as one */
557 maxburst = 1;
558 _ep->maxburst = maxburst;
559 break;
560 case USB_ENDPOINT_XFER_INT:
561 if (maxburst != 1) {
562 dev_dbg(u3d->dev,
563 "max burst should be 1 on int ep "
564 "if transfer size is not 1024\n");
565 maxburst = 1;
566 _ep->maxburst = maxburst;
567 }
568 break;
569 case USB_ENDPOINT_XFER_ISOC:
570 if (maxburst != 1) {
571 dev_dbg(u3d->dev,
572 "max burst should be 1 on isoc ep "
573 "if transfer size is not 1024\n");
574 maxburst = 1;
575 _ep->maxburst = maxburst;
576 }
577 break;
578 default:
579 goto en_done;
580 }
581
582 ep->ep.maxpacket = max;
583 ep->ep.desc = desc;
584 ep->enabled = 1;
585
586 /* Enable the endpoint for Rx or Tx and set the endpoint type */
587 if (direction == MV_U3D_EP_DIR_OUT) {
588 epxcr = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
589 epxcr |= MV_U3D_EPXCR_EP_INIT;
590 iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
591 udelay(5);
592 epxcr &= ~MV_U3D_EPXCR_EP_INIT;
593 iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
594
595 epxcr = ((max << MV_U3D_EPXCR_MAX_PACKET_SIZE_SHIFT)
596 | ((maxburst - 1) << MV_U3D_EPXCR_MAX_BURST_SIZE_SHIFT)
597 | (1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT)
598 | (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK));
599 iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr1);
600 } else {
601 epxcr = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
602 epxcr |= MV_U3D_EPXCR_EP_INIT;
603 iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
604 udelay(5);
605 epxcr &= ~MV_U3D_EPXCR_EP_INIT;
606 iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
607
608 epxcr = ((max << MV_U3D_EPXCR_MAX_PACKET_SIZE_SHIFT)
609 | ((maxburst - 1) << MV_U3D_EPXCR_MAX_BURST_SIZE_SHIFT)
610 | (1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT)
611 | (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK));
612 iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr1);
613 }
614
615 return 0;
616 en_done:
617 return -EINVAL;
618 }
619
620 static int mv_u3d_ep_disable(struct usb_ep *_ep)
621 {
622 struct mv_u3d *u3d;
623 struct mv_u3d_ep *ep;
624 u32 epxcr, direction;
625 unsigned long flags;
626
627 if (!_ep)
628 return -EINVAL;
629
630 ep = container_of(_ep, struct mv_u3d_ep, ep);
631 if (!ep->ep.desc)
632 return -EINVAL;
633
634 u3d = ep->u3d;
635
636 direction = mv_u3d_ep_dir(ep);
637
638 /* nuke all pending requests (does flush) */
639 spin_lock_irqsave(&u3d->lock, flags);
640 mv_u3d_nuke(ep, -ESHUTDOWN);
641 spin_unlock_irqrestore(&u3d->lock, flags);
642
643 /* Disable the endpoint for Rx or Tx and reset the endpoint type */
644 if (direction == MV_U3D_EP_DIR_OUT) {
645 epxcr = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr1);
646 epxcr &= ~((1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT)
647 | USB_ENDPOINT_XFERTYPE_MASK);
648 iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr1);
649 } else {
650 epxcr = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr1);
651 epxcr &= ~((1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT)
652 | USB_ENDPOINT_XFERTYPE_MASK);
653 iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr1);
654 }
655
656 ep->enabled = 0;
657
658 ep->ep.desc = NULL;
659 return 0;
660 }
661
662 static struct usb_request *
663 mv_u3d_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
664 {
665 struct mv_u3d_req *req = NULL;
666
667 req = kzalloc(sizeof *req, gfp_flags);
668 if (!req)
669 return NULL;
670
671 INIT_LIST_HEAD(&req->queue);
672
673 return &req->req;
674 }
675
676 static void mv_u3d_free_request(struct usb_ep *_ep, struct usb_request *_req)
677 {
678 struct mv_u3d_req *req = container_of(_req, struct mv_u3d_req, req);
679
680 kfree(req);
681 }
682
683 static void mv_u3d_ep_fifo_flush(struct usb_ep *_ep)
684 {
685 struct mv_u3d *u3d;
686 u32 direction;
687 struct mv_u3d_ep *ep = container_of(_ep, struct mv_u3d_ep, ep);
688 unsigned int loops;
689 u32 tmp;
690
691 /* if endpoint is not enabled, cannot flush endpoint */
692 if (!ep->enabled)
693 return;
694
695 u3d = ep->u3d;
696 direction = mv_u3d_ep_dir(ep);
697
698 /* ep0 need clear bit after flushing fifo. */
699 if (!ep->ep_num) {
700 if (direction == MV_U3D_EP_DIR_OUT) {
701 tmp = ioread32(&u3d->vuc_regs->epcr[0].epxoutcr0);
702 tmp |= MV_U3D_EPXCR_EP_FLUSH;
703 iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxoutcr0);
704 udelay(10);
705 tmp &= ~MV_U3D_EPXCR_EP_FLUSH;
706 iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxoutcr0);
707 } else {
708 tmp = ioread32(&u3d->vuc_regs->epcr[0].epxincr0);
709 tmp |= MV_U3D_EPXCR_EP_FLUSH;
710 iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxincr0);
711 udelay(10);
712 tmp &= ~MV_U3D_EPXCR_EP_FLUSH;
713 iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxincr0);
714 }
715 return;
716 }
717
718 if (direction == MV_U3D_EP_DIR_OUT) {
719 tmp = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
720 tmp |= MV_U3D_EPXCR_EP_FLUSH;
721 iowrite32(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
722
723 /* Wait until flushing completed */
724 loops = LOOPS(MV_U3D_FLUSH_TIMEOUT);
725 while (ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0) &
726 MV_U3D_EPXCR_EP_FLUSH) {
727 /*
728 * EP_FLUSH bit should be cleared to indicate this
729 * operation is complete
730 */
731 if (loops == 0) {
732 dev_dbg(u3d->dev,
733 "EP FLUSH TIMEOUT for ep%d%s\n", ep->ep_num,
734 direction ? "in" : "out");
735 return;
736 }
737 loops--;
738 udelay(LOOPS_USEC);
739 }
740 } else { /* EP_DIR_IN */
741 tmp = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
742 tmp |= MV_U3D_EPXCR_EP_FLUSH;
743 iowrite32(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
744
745 /* Wait until flushing completed */
746 loops = LOOPS(MV_U3D_FLUSH_TIMEOUT);
747 while (ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0) &
748 MV_U3D_EPXCR_EP_FLUSH) {
749 /*
750 * EP_FLUSH bit should be cleared to indicate this
751 * operation is complete
752 */
753 if (loops == 0) {
754 dev_dbg(u3d->dev,
755 "EP FLUSH TIMEOUT for ep%d%s\n", ep->ep_num,
756 direction ? "in" : "out");
757 return;
758 }
759 loops--;
760 udelay(LOOPS_USEC);
761 }
762 }
763 }
764
765 /* queues (submits) an I/O request to an endpoint */
766 static int
767 mv_u3d_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
768 {
769 struct mv_u3d_ep *ep;
770 struct mv_u3d_req *req;
771 struct mv_u3d *u3d;
772 unsigned long flags;
773 int is_first_req = 0;
774
775 if (unlikely(!_ep || !_req))
776 return -EINVAL;
777
778 ep = container_of(_ep, struct mv_u3d_ep, ep);
779 u3d = ep->u3d;
780
781 req = container_of(_req, struct mv_u3d_req, req);
782
783 if (!ep->ep_num
784 && u3d->ep0_state == MV_U3D_STATUS_STAGE
785 && !_req->length) {
786 dev_dbg(u3d->dev, "ep0 status stage\n");
787 u3d->ep0_state = MV_U3D_WAIT_FOR_SETUP;
788 return 0;
789 }
790
791 dev_dbg(u3d->dev, "%s: %s, req: 0x%p\n",
792 __func__, _ep->name, req);
793
794 /* catch various bogus parameters */
795 if (!req->req.complete || !req->req.buf
796 || !list_empty(&req->queue)) {
797 dev_err(u3d->dev,
798 "%s, bad params, _req: 0x%p,"
799 "req->req.complete: 0x%p, req->req.buf: 0x%p,"
800 "list_empty: 0x%x\n",
801 __func__, _req,
802 req->req.complete, req->req.buf,
803 list_empty(&req->queue));
804 return -EINVAL;
805 }
806 if (unlikely(!ep->ep.desc)) {
807 dev_err(u3d->dev, "%s, bad ep\n", __func__);
808 return -EINVAL;
809 }
810 if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
811 if (req->req.length > ep->ep.maxpacket)
812 return -EMSGSIZE;
813 }
814
815 if (!u3d->driver || u3d->gadget.speed == USB_SPEED_UNKNOWN) {
816 dev_err(u3d->dev,
817 "bad params of driver/speed\n");
818 return -ESHUTDOWN;
819 }
820
821 req->ep = ep;
822
823 /* Software list handles usb request. */
824 spin_lock_irqsave(&ep->req_lock, flags);
825 is_first_req = list_empty(&ep->req_list);
826 list_add_tail(&req->list, &ep->req_list);
827 spin_unlock_irqrestore(&ep->req_lock, flags);
828 if (!is_first_req) {
829 dev_dbg(u3d->dev, "list is not empty\n");
830 return 0;
831 }
832
833 dev_dbg(u3d->dev, "call mv_u3d_start_queue from usb_ep_queue\n");
834 spin_lock_irqsave(&u3d->lock, flags);
835 mv_u3d_start_queue(ep);
836 spin_unlock_irqrestore(&u3d->lock, flags);
837 return 0;
838 }
839
840 /* dequeues (cancels, unlinks) an I/O request from an endpoint */
841 static int mv_u3d_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
842 {
843 struct mv_u3d_ep *ep;
844 struct mv_u3d_req *req;
845 struct mv_u3d *u3d;
846 struct mv_u3d_ep_context *ep_context;
847 struct mv_u3d_req *next_req;
848
849 unsigned long flags;
850 int ret = 0;
851
852 if (!_ep || !_req)
853 return -EINVAL;
854
855 ep = container_of(_ep, struct mv_u3d_ep, ep);
856 u3d = ep->u3d;
857
858 spin_lock_irqsave(&ep->u3d->lock, flags);
859
860 /* make sure it's actually queued on this endpoint */
861 list_for_each_entry(req, &ep->queue, queue) {
862 if (&req->req == _req)
863 break;
864 }
865 if (&req->req != _req) {
866 ret = -EINVAL;
867 goto out;
868 }
869
870 /* The request is in progress, or completed but not dequeued */
871 if (ep->queue.next == &req->queue) {
872 _req->status = -ECONNRESET;
873 mv_u3d_ep_fifo_flush(_ep);
874
875 /* The request isn't the last request in this ep queue */
876 if (req->queue.next != &ep->queue) {
877 dev_dbg(u3d->dev,
878 "it is the last request in this ep queue\n");
879 ep_context = ep->ep_context;
880 next_req = list_entry(req->queue.next,
881 struct mv_u3d_req, queue);
882
883 /* Point first TRB of next request to the EP context. */
884 iowrite32((unsigned long) next_req->trb_head,
885 &ep_context->trb_addr_lo);
886 } else {
887 struct mv_u3d_ep_context *ep_context;
888 ep_context = ep->ep_context;
889 ep_context->trb_addr_lo = 0;
890 ep_context->trb_addr_hi = 0;
891 }
892
893 } else
894 WARN_ON(1);
895
896 mv_u3d_done(ep, req, -ECONNRESET);
897
898 /* remove the req from the ep req list */
899 if (!list_empty(&ep->req_list)) {
900 struct mv_u3d_req *curr_req;
901 curr_req = list_entry(ep->req_list.next,
902 struct mv_u3d_req, list);
903 if (curr_req == req) {
904 list_del_init(&req->list);
905 ep->processing = 0;
906 }
907 }
908
909 out:
910 spin_unlock_irqrestore(&ep->u3d->lock, flags);
911 return ret;
912 }
913
914 static void
915 mv_u3d_ep_set_stall(struct mv_u3d *u3d, u8 ep_num, u8 direction, int stall)
916 {
917 u32 tmp;
918 struct mv_u3d_ep *ep = u3d->eps;
919
920 dev_dbg(u3d->dev, "%s\n", __func__);
921 if (direction == MV_U3D_EP_DIR_OUT) {
922 tmp = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
923 if (stall)
924 tmp |= MV_U3D_EPXCR_EP_HALT;
925 else
926 tmp &= ~MV_U3D_EPXCR_EP_HALT;
927 iowrite32(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
928 } else {
929 tmp = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
930 if (stall)
931 tmp |= MV_U3D_EPXCR_EP_HALT;
932 else
933 tmp &= ~MV_U3D_EPXCR_EP_HALT;
934 iowrite32(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
935 }
936 }
937
938 static int mv_u3d_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
939 {
940 struct mv_u3d_ep *ep;
941 unsigned long flags = 0;
942 int status = 0;
943 struct mv_u3d *u3d;
944
945 ep = container_of(_ep, struct mv_u3d_ep, ep);
946 u3d = ep->u3d;
947 if (!ep->ep.desc) {
948 status = -EINVAL;
949 goto out;
950 }
951
952 if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
953 status = -EOPNOTSUPP;
954 goto out;
955 }
956
957 /*
958 * Attempt to halt IN ep will fail if any transfer requests
959 * are still queue
960 */
961 if (halt && (mv_u3d_ep_dir(ep) == MV_U3D_EP_DIR_IN)
962 && !list_empty(&ep->queue)) {
963 status = -EAGAIN;
964 goto out;
965 }
966
967 spin_lock_irqsave(&ep->u3d->lock, flags);
968 mv_u3d_ep_set_stall(u3d, ep->ep_num, mv_u3d_ep_dir(ep), halt);
969 if (halt && wedge)
970 ep->wedge = 1;
971 else if (!halt)
972 ep->wedge = 0;
973 spin_unlock_irqrestore(&ep->u3d->lock, flags);
974
975 if (ep->ep_num == 0)
976 u3d->ep0_dir = MV_U3D_EP_DIR_OUT;
977 out:
978 return status;
979 }
980
981 static int mv_u3d_ep_set_halt(struct usb_ep *_ep, int halt)
982 {
983 return mv_u3d_ep_set_halt_wedge(_ep, halt, 0);
984 }
985
986 static int mv_u3d_ep_set_wedge(struct usb_ep *_ep)
987 {
988 return mv_u3d_ep_set_halt_wedge(_ep, 1, 1);
989 }
990
991 static struct usb_ep_ops mv_u3d_ep_ops = {
992 .enable = mv_u3d_ep_enable,
993 .disable = mv_u3d_ep_disable,
994
995 .alloc_request = mv_u3d_alloc_request,
996 .free_request = mv_u3d_free_request,
997
998 .queue = mv_u3d_ep_queue,
999 .dequeue = mv_u3d_ep_dequeue,
1000
1001 .set_wedge = mv_u3d_ep_set_wedge,
1002 .set_halt = mv_u3d_ep_set_halt,
1003 .fifo_flush = mv_u3d_ep_fifo_flush,
1004 };
1005
1006 static void mv_u3d_controller_stop(struct mv_u3d *u3d)
1007 {
1008 u32 tmp;
1009
1010 if (!u3d->clock_gating && u3d->vbus_valid_detect)
1011 iowrite32(MV_U3D_INTR_ENABLE_VBUS_VALID,
1012 &u3d->vuc_regs->intrenable);
1013 else
1014 iowrite32(0, &u3d->vuc_regs->intrenable);
1015 iowrite32(~0x0, &u3d->vuc_regs->endcomplete);
1016 iowrite32(~0x0, &u3d->vuc_regs->trbunderrun);
1017 iowrite32(~0x0, &u3d->vuc_regs->trbcomplete);
1018 iowrite32(~0x0, &u3d->vuc_regs->linkchange);
1019 iowrite32(0x1, &u3d->vuc_regs->setuplock);
1020
1021 /* Reset the RUN bit in the command register to stop USB */
1022 tmp = ioread32(&u3d->op_regs->usbcmd);
1023 tmp &= ~MV_U3D_CMD_RUN_STOP;
1024 iowrite32(tmp, &u3d->op_regs->usbcmd);
1025 dev_dbg(u3d->dev, "after u3d_stop, USBCMD 0x%x\n",
1026 ioread32(&u3d->op_regs->usbcmd));
1027 }
1028
1029 static void mv_u3d_controller_start(struct mv_u3d *u3d)
1030 {
1031 u32 usbintr;
1032 u32 temp;
1033
1034 /* enable link LTSSM state machine */
1035 temp = ioread32(&u3d->vuc_regs->ltssm);
1036 temp |= MV_U3D_LTSSM_PHY_INIT_DONE;
1037 iowrite32(temp, &u3d->vuc_regs->ltssm);
1038
1039 /* Enable interrupts */
1040 usbintr = MV_U3D_INTR_ENABLE_LINK_CHG | MV_U3D_INTR_ENABLE_TXDESC_ERR |
1041 MV_U3D_INTR_ENABLE_RXDESC_ERR | MV_U3D_INTR_ENABLE_TX_COMPLETE |
1042 MV_U3D_INTR_ENABLE_RX_COMPLETE | MV_U3D_INTR_ENABLE_SETUP |
1043 (u3d->vbus_valid_detect ? MV_U3D_INTR_ENABLE_VBUS_VALID : 0);
1044 iowrite32(usbintr, &u3d->vuc_regs->intrenable);
1045
1046 /* Enable ctrl ep */
1047 iowrite32(0x1, &u3d->vuc_regs->ctrlepenable);
1048
1049 /* Set the Run bit in the command register */
1050 iowrite32(MV_U3D_CMD_RUN_STOP, &u3d->op_regs->usbcmd);
1051 dev_dbg(u3d->dev, "after u3d_start, USBCMD 0x%x\n",
1052 ioread32(&u3d->op_regs->usbcmd));
1053 }
1054
1055 static int mv_u3d_controller_reset(struct mv_u3d *u3d)
1056 {
1057 unsigned int loops;
1058 u32 tmp;
1059
1060 /* Stop the controller */
1061 tmp = ioread32(&u3d->op_regs->usbcmd);
1062 tmp &= ~MV_U3D_CMD_RUN_STOP;
1063 iowrite32(tmp, &u3d->op_regs->usbcmd);
1064
1065 /* Reset the controller to get default values */
1066 iowrite32(MV_U3D_CMD_CTRL_RESET, &u3d->op_regs->usbcmd);
1067
1068 /* wait for reset to complete */
1069 loops = LOOPS(MV_U3D_RESET_TIMEOUT);
1070 while (ioread32(&u3d->op_regs->usbcmd) & MV_U3D_CMD_CTRL_RESET) {
1071 if (loops == 0) {
1072 dev_err(u3d->dev,
1073 "Wait for RESET completed TIMEOUT\n");
1074 return -ETIMEDOUT;
1075 }
1076 loops--;
1077 udelay(LOOPS_USEC);
1078 }
1079
1080 /* Configure the Endpoint Context Address */
1081 iowrite32(u3d->ep_context_dma, &u3d->op_regs->dcbaapl);
1082 iowrite32(0, &u3d->op_regs->dcbaaph);
1083
1084 return 0;
1085 }
1086
1087 static int mv_u3d_enable(struct mv_u3d *u3d)
1088 {
1089 struct mv_usb_platform_data *pdata = dev_get_platdata(u3d->dev);
1090 int retval;
1091
1092 if (u3d->active)
1093 return 0;
1094
1095 if (!u3d->clock_gating) {
1096 u3d->active = 1;
1097 return 0;
1098 }
1099
1100 dev_dbg(u3d->dev, "enable u3d\n");
1101 clk_enable(u3d->clk);
1102 if (pdata->phy_init) {
1103 retval = pdata->phy_init(u3d->phy_regs);
1104 if (retval) {
1105 dev_err(u3d->dev,
1106 "init phy error %d\n", retval);
1107 clk_disable(u3d->clk);
1108 return retval;
1109 }
1110 }
1111 u3d->active = 1;
1112
1113 return 0;
1114 }
1115
1116 static void mv_u3d_disable(struct mv_u3d *u3d)
1117 {
1118 struct mv_usb_platform_data *pdata = dev_get_platdata(u3d->dev);
1119 if (u3d->clock_gating && u3d->active) {
1120 dev_dbg(u3d->dev, "disable u3d\n");
1121 if (pdata->phy_deinit)
1122 pdata->phy_deinit(u3d->phy_regs);
1123 clk_disable(u3d->clk);
1124 u3d->active = 0;
1125 }
1126 }
1127
1128 static int mv_u3d_vbus_session(struct usb_gadget *gadget, int is_active)
1129 {
1130 struct mv_u3d *u3d;
1131 unsigned long flags;
1132 int retval = 0;
1133
1134 u3d = container_of(gadget, struct mv_u3d, gadget);
1135
1136 spin_lock_irqsave(&u3d->lock, flags);
1137
1138 u3d->vbus_active = (is_active != 0);
1139 dev_dbg(u3d->dev, "%s: softconnect %d, vbus_active %d\n",
1140 __func__, u3d->softconnect, u3d->vbus_active);
1141 /*
1142 * 1. external VBUS detect: we can disable/enable clock on demand.
1143 * 2. UDC VBUS detect: we have to enable clock all the time.
1144 * 3. No VBUS detect: we have to enable clock all the time.
1145 */
1146 if (u3d->driver && u3d->softconnect && u3d->vbus_active) {
1147 retval = mv_u3d_enable(u3d);
1148 if (retval == 0) {
1149 /*
1150 * after clock is disabled, we lost all the register
1151 * context. We have to re-init registers
1152 */
1153 mv_u3d_controller_reset(u3d);
1154 mv_u3d_ep0_reset(u3d);
1155 mv_u3d_controller_start(u3d);
1156 }
1157 } else if (u3d->driver && u3d->softconnect) {
1158 if (!u3d->active)
1159 goto out;
1160
1161 /* stop all the transfer in queue*/
1162 mv_u3d_stop_activity(u3d, u3d->driver);
1163 mv_u3d_controller_stop(u3d);
1164 mv_u3d_disable(u3d);
1165 }
1166
1167 out:
1168 spin_unlock_irqrestore(&u3d->lock, flags);
1169 return retval;
1170 }
1171
1172 /* constrain controller's VBUS power usage
1173 * This call is used by gadget drivers during SET_CONFIGURATION calls,
1174 * reporting how much power the device may consume. For example, this
1175 * could affect how quickly batteries are recharged.
1176 *
1177 * Returns zero on success, else negative errno.
1178 */
1179 static int mv_u3d_vbus_draw(struct usb_gadget *gadget, unsigned mA)
1180 {
1181 struct mv_u3d *u3d = container_of(gadget, struct mv_u3d, gadget);
1182
1183 u3d->power = mA;
1184
1185 return 0;
1186 }
1187
1188 static int mv_u3d_pullup(struct usb_gadget *gadget, int is_on)
1189 {
1190 struct mv_u3d *u3d = container_of(gadget, struct mv_u3d, gadget);
1191 unsigned long flags;
1192 int retval = 0;
1193
1194 spin_lock_irqsave(&u3d->lock, flags);
1195
1196 dev_dbg(u3d->dev, "%s: softconnect %d, vbus_active %d\n",
1197 __func__, u3d->softconnect, u3d->vbus_active);
1198 u3d->softconnect = (is_on != 0);
1199 if (u3d->driver && u3d->softconnect && u3d->vbus_active) {
1200 retval = mv_u3d_enable(u3d);
1201 if (retval == 0) {
1202 /*
1203 * after clock is disabled, we lost all the register
1204 * context. We have to re-init registers
1205 */
1206 mv_u3d_controller_reset(u3d);
1207 mv_u3d_ep0_reset(u3d);
1208 mv_u3d_controller_start(u3d);
1209 }
1210 } else if (u3d->driver && u3d->vbus_active) {
1211 /* stop all the transfer in queue*/
1212 mv_u3d_stop_activity(u3d, u3d->driver);
1213 mv_u3d_controller_stop(u3d);
1214 mv_u3d_disable(u3d);
1215 }
1216
1217 spin_unlock_irqrestore(&u3d->lock, flags);
1218
1219 return retval;
1220 }
1221
1222 static int mv_u3d_start(struct usb_gadget *g,
1223 struct usb_gadget_driver *driver)
1224 {
1225 struct mv_u3d *u3d = container_of(g, struct mv_u3d, gadget);
1226 struct mv_usb_platform_data *pdata = dev_get_platdata(u3d->dev);
1227 unsigned long flags;
1228
1229 if (u3d->driver)
1230 return -EBUSY;
1231
1232 spin_lock_irqsave(&u3d->lock, flags);
1233
1234 if (!u3d->clock_gating) {
1235 clk_enable(u3d->clk);
1236 if (pdata->phy_init)
1237 pdata->phy_init(u3d->phy_regs);
1238 }
1239
1240 /* hook up the driver ... */
1241 driver->driver.bus = NULL;
1242 u3d->driver = driver;
1243
1244 u3d->ep0_dir = USB_DIR_OUT;
1245
1246 spin_unlock_irqrestore(&u3d->lock, flags);
1247
1248 u3d->vbus_valid_detect = 1;
1249
1250 return 0;
1251 }
1252
1253 static int mv_u3d_stop(struct usb_gadget *g)
1254 {
1255 struct mv_u3d *u3d = container_of(g, struct mv_u3d, gadget);
1256 struct mv_usb_platform_data *pdata = dev_get_platdata(u3d->dev);
1257 unsigned long flags;
1258
1259 u3d->vbus_valid_detect = 0;
1260 spin_lock_irqsave(&u3d->lock, flags);
1261
1262 /* enable clock to access controller register */
1263 clk_enable(u3d->clk);
1264 if (pdata->phy_init)
1265 pdata->phy_init(u3d->phy_regs);
1266
1267 mv_u3d_controller_stop(u3d);
1268 /* stop all usb activities */
1269 u3d->gadget.speed = USB_SPEED_UNKNOWN;
1270 mv_u3d_stop_activity(u3d, NULL);
1271 mv_u3d_disable(u3d);
1272
1273 if (pdata->phy_deinit)
1274 pdata->phy_deinit(u3d->phy_regs);
1275 clk_disable(u3d->clk);
1276
1277 spin_unlock_irqrestore(&u3d->lock, flags);
1278
1279 u3d->driver = NULL;
1280
1281 return 0;
1282 }
1283
1284 /* device controller usb_gadget_ops structure */
1285 static const struct usb_gadget_ops mv_u3d_ops = {
1286 /* notify controller that VBUS is powered or not */
1287 .vbus_session = mv_u3d_vbus_session,
1288
1289 /* constrain controller's VBUS power usage */
1290 .vbus_draw = mv_u3d_vbus_draw,
1291
1292 .pullup = mv_u3d_pullup,
1293 .udc_start = mv_u3d_start,
1294 .udc_stop = mv_u3d_stop,
1295 };
1296
1297 static int mv_u3d_eps_init(struct mv_u3d *u3d)
1298 {
1299 struct mv_u3d_ep *ep;
1300 char name[14];
1301 int i;
1302
1303 /* initialize ep0, ep0 in/out use eps[1] */
1304 ep = &u3d->eps[1];
1305 ep->u3d = u3d;
1306 strncpy(ep->name, "ep0", sizeof(ep->name));
1307 ep->ep.name = ep->name;
1308 ep->ep.ops = &mv_u3d_ep_ops;
1309 ep->wedge = 0;
1310 usb_ep_set_maxpacket_limit(&ep->ep, MV_U3D_EP0_MAX_PKT_SIZE);
1311 ep->ep.caps.type_control = true;
1312 ep->ep.caps.dir_in = true;
1313 ep->ep.caps.dir_out = true;
1314 ep->ep_num = 0;
1315 ep->ep.desc = &mv_u3d_ep0_desc;
1316 INIT_LIST_HEAD(&ep->queue);
1317 INIT_LIST_HEAD(&ep->req_list);
1318 ep->ep_type = USB_ENDPOINT_XFER_CONTROL;
1319
1320 /* add ep0 ep_context */
1321 ep->ep_context = &u3d->ep_context[1];
1322
1323 /* initialize other endpoints */
1324 for (i = 2; i < u3d->max_eps * 2; i++) {
1325 ep = &u3d->eps[i];
1326 if (i & 1) {
1327 snprintf(name, sizeof(name), "ep%din", i >> 1);
1328 ep->direction = MV_U3D_EP_DIR_IN;
1329 ep->ep.caps.dir_in = true;
1330 } else {
1331 snprintf(name, sizeof(name), "ep%dout", i >> 1);
1332 ep->direction = MV_U3D_EP_DIR_OUT;
1333 ep->ep.caps.dir_out = true;
1334 }
1335 ep->u3d = u3d;
1336 strncpy(ep->name, name, sizeof(ep->name));
1337 ep->ep.name = ep->name;
1338
1339 ep->ep.caps.type_iso = true;
1340 ep->ep.caps.type_bulk = true;
1341 ep->ep.caps.type_int = true;
1342
1343 ep->ep.ops = &mv_u3d_ep_ops;
1344 usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0);
1345 ep->ep_num = i / 2;
1346
1347 INIT_LIST_HEAD(&ep->queue);
1348 list_add_tail(&ep->ep.ep_list, &u3d->gadget.ep_list);
1349
1350 INIT_LIST_HEAD(&ep->req_list);
1351 spin_lock_init(&ep->req_lock);
1352 ep->ep_context = &u3d->ep_context[i];
1353 }
1354
1355 return 0;
1356 }
1357
1358 /* delete all endpoint requests, called with spinlock held */
1359 static void mv_u3d_nuke(struct mv_u3d_ep *ep, int status)
1360 {
1361 /* endpoint fifo flush */
1362 mv_u3d_ep_fifo_flush(&ep->ep);
1363
1364 while (!list_empty(&ep->queue)) {
1365 struct mv_u3d_req *req = NULL;
1366 req = list_entry(ep->queue.next, struct mv_u3d_req, queue);
1367 mv_u3d_done(ep, req, status);
1368 }
1369 }
1370
1371 /* stop all USB activities */
1372 static
1373 void mv_u3d_stop_activity(struct mv_u3d *u3d, struct usb_gadget_driver *driver)
1374 {
1375 struct mv_u3d_ep *ep;
1376
1377 mv_u3d_nuke(&u3d->eps[1], -ESHUTDOWN);
1378
1379 list_for_each_entry(ep, &u3d->gadget.ep_list, ep.ep_list) {
1380 mv_u3d_nuke(ep, -ESHUTDOWN);
1381 }
1382
1383 /* report disconnect; the driver is already quiesced */
1384 if (driver) {
1385 spin_unlock(&u3d->lock);
1386 driver->disconnect(&u3d->gadget);
1387 spin_lock(&u3d->lock);
1388 }
1389 }
1390
1391 static void mv_u3d_irq_process_error(struct mv_u3d *u3d)
1392 {
1393 /* Increment the error count */
1394 u3d->errors++;
1395 dev_err(u3d->dev, "%s\n", __func__);
1396 }
1397
1398 static void mv_u3d_irq_process_link_change(struct mv_u3d *u3d)
1399 {
1400 u32 linkchange;
1401
1402 linkchange = ioread32(&u3d->vuc_regs->linkchange);
1403 iowrite32(linkchange, &u3d->vuc_regs->linkchange);
1404
1405 dev_dbg(u3d->dev, "linkchange: 0x%x\n", linkchange);
1406
1407 if (linkchange & MV_U3D_LINK_CHANGE_LINK_UP) {
1408 dev_dbg(u3d->dev, "link up: ltssm state: 0x%x\n",
1409 ioread32(&u3d->vuc_regs->ltssmstate));
1410
1411 u3d->usb_state = USB_STATE_DEFAULT;
1412 u3d->ep0_dir = MV_U3D_EP_DIR_OUT;
1413 u3d->ep0_state = MV_U3D_WAIT_FOR_SETUP;
1414
1415 /* set speed */
1416 u3d->gadget.speed = USB_SPEED_SUPER;
1417 }
1418
1419 if (linkchange & MV_U3D_LINK_CHANGE_SUSPEND) {
1420 dev_dbg(u3d->dev, "link suspend\n");
1421 u3d->resume_state = u3d->usb_state;
1422 u3d->usb_state = USB_STATE_SUSPENDED;
1423 }
1424
1425 if (linkchange & MV_U3D_LINK_CHANGE_RESUME) {
1426 dev_dbg(u3d->dev, "link resume\n");
1427 u3d->usb_state = u3d->resume_state;
1428 u3d->resume_state = 0;
1429 }
1430
1431 if (linkchange & MV_U3D_LINK_CHANGE_WRESET) {
1432 dev_dbg(u3d->dev, "warm reset\n");
1433 u3d->usb_state = USB_STATE_POWERED;
1434 }
1435
1436 if (linkchange & MV_U3D_LINK_CHANGE_HRESET) {
1437 dev_dbg(u3d->dev, "hot reset\n");
1438 u3d->usb_state = USB_STATE_DEFAULT;
1439 }
1440
1441 if (linkchange & MV_U3D_LINK_CHANGE_INACT)
1442 dev_dbg(u3d->dev, "inactive\n");
1443
1444 if (linkchange & MV_U3D_LINK_CHANGE_DISABLE_AFTER_U0)
1445 dev_dbg(u3d->dev, "ss.disabled\n");
1446
1447 if (linkchange & MV_U3D_LINK_CHANGE_VBUS_INVALID) {
1448 dev_dbg(u3d->dev, "vbus invalid\n");
1449 u3d->usb_state = USB_STATE_ATTACHED;
1450 u3d->vbus_valid_detect = 1;
1451 /* if external vbus detect is not supported,
1452 * we handle it here.
1453 */
1454 if (!u3d->vbus) {
1455 spin_unlock(&u3d->lock);
1456 mv_u3d_vbus_session(&u3d->gadget, 0);
1457 spin_lock(&u3d->lock);
1458 }
1459 }
1460 }
1461
1462 static void mv_u3d_ch9setaddress(struct mv_u3d *u3d,
1463 struct usb_ctrlrequest *setup)
1464 {
1465 u32 tmp;
1466
1467 if (u3d->usb_state != USB_STATE_DEFAULT) {
1468 dev_err(u3d->dev,
1469 "%s, cannot setaddr in this state (%d)\n",
1470 __func__, u3d->usb_state);
1471 goto err;
1472 }
1473
1474 u3d->dev_addr = (u8)setup->wValue;
1475
1476 dev_dbg(u3d->dev, "%s: 0x%x\n", __func__, u3d->dev_addr);
1477
1478 if (u3d->dev_addr > 127) {
1479 dev_err(u3d->dev,
1480 "%s, u3d address is wrong (out of range)\n", __func__);
1481 u3d->dev_addr = 0;
1482 goto err;
1483 }
1484
1485 /* update usb state */
1486 u3d->usb_state = USB_STATE_ADDRESS;
1487
1488 /* set the new address */
1489 tmp = ioread32(&u3d->vuc_regs->devaddrtiebrkr);
1490 tmp &= ~0x7F;
1491 tmp |= (u32)u3d->dev_addr;
1492 iowrite32(tmp, &u3d->vuc_regs->devaddrtiebrkr);
1493
1494 return;
1495 err:
1496 mv_u3d_ep0_stall(u3d);
1497 }
1498
1499 static int mv_u3d_is_set_configuration(struct usb_ctrlrequest *setup)
1500 {
1501 if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
1502 if (setup->bRequest == USB_REQ_SET_CONFIGURATION)
1503 return 1;
1504
1505 return 0;
1506 }
1507
1508 static void mv_u3d_handle_setup_packet(struct mv_u3d *u3d, u8 ep_num,
1509 struct usb_ctrlrequest *setup)
1510 __releases(&u3c->lock)
1511 __acquires(&u3c->lock)
1512 {
1513 bool delegate = false;
1514
1515 mv_u3d_nuke(&u3d->eps[ep_num * 2 + MV_U3D_EP_DIR_IN], -ESHUTDOWN);
1516
1517 dev_dbg(u3d->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
1518 setup->bRequestType, setup->bRequest,
1519 setup->wValue, setup->wIndex, setup->wLength);
1520
1521 /* We process some stardard setup requests here */
1522 if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1523 switch (setup->bRequest) {
1524 case USB_REQ_GET_STATUS:
1525 delegate = true;
1526 break;
1527
1528 case USB_REQ_SET_ADDRESS:
1529 mv_u3d_ch9setaddress(u3d, setup);
1530 break;
1531
1532 case USB_REQ_CLEAR_FEATURE:
1533 delegate = true;
1534 break;
1535
1536 case USB_REQ_SET_FEATURE:
1537 delegate = true;
1538 break;
1539
1540 default:
1541 delegate = true;
1542 }
1543 } else
1544 delegate = true;
1545
1546 /* delegate USB standard requests to the gadget driver */
1547 if (delegate == true) {
1548 /* USB requests handled by gadget */
1549 if (setup->wLength) {
1550 /* DATA phase from gadget, STATUS phase from u3d */
1551 u3d->ep0_dir = (setup->bRequestType & USB_DIR_IN)
1552 ? MV_U3D_EP_DIR_IN : MV_U3D_EP_DIR_OUT;
1553 spin_unlock(&u3d->lock);
1554 if (u3d->driver->setup(&u3d->gadget,
1555 &u3d->local_setup_buff) < 0) {
1556 dev_err(u3d->dev, "setup error!\n");
1557 mv_u3d_ep0_stall(u3d);
1558 }
1559 spin_lock(&u3d->lock);
1560 } else {
1561 /* no DATA phase, STATUS phase from gadget */
1562 u3d->ep0_dir = MV_U3D_EP_DIR_IN;
1563 u3d->ep0_state = MV_U3D_STATUS_STAGE;
1564 spin_unlock(&u3d->lock);
1565 if (u3d->driver->setup(&u3d->gadget,
1566 &u3d->local_setup_buff) < 0)
1567 mv_u3d_ep0_stall(u3d);
1568 spin_lock(&u3d->lock);
1569 }
1570
1571 if (mv_u3d_is_set_configuration(setup)) {
1572 dev_dbg(u3d->dev, "u3d configured\n");
1573 u3d->usb_state = USB_STATE_CONFIGURED;
1574 }
1575 }
1576 }
1577
1578 static void mv_u3d_get_setup_data(struct mv_u3d *u3d, u8 ep_num, u8 *buffer_ptr)
1579 {
1580 struct mv_u3d_ep_context *epcontext;
1581
1582 epcontext = &u3d->ep_context[ep_num * 2 + MV_U3D_EP_DIR_IN];
1583
1584 /* Copy the setup packet to local buffer */
1585 memcpy(buffer_ptr, (u8 *) &epcontext->setup_buffer, 8);
1586 }
1587
1588 static void mv_u3d_irq_process_setup(struct mv_u3d *u3d)
1589 {
1590 u32 tmp, i;
1591 /* Process all Setup packet received interrupts */
1592 tmp = ioread32(&u3d->vuc_regs->setuplock);
1593 if (tmp) {
1594 for (i = 0; i < u3d->max_eps; i++) {
1595 if (tmp & (1 << i)) {
1596 mv_u3d_get_setup_data(u3d, i,
1597 (u8 *)(&u3d->local_setup_buff));
1598 mv_u3d_handle_setup_packet(u3d, i,
1599 &u3d->local_setup_buff);
1600 }
1601 }
1602 }
1603
1604 iowrite32(tmp, &u3d->vuc_regs->setuplock);
1605 }
1606
1607 static void mv_u3d_irq_process_tr_complete(struct mv_u3d *u3d)
1608 {
1609 u32 tmp, bit_pos;
1610 int i, ep_num = 0, direction = 0;
1611 struct mv_u3d_ep *curr_ep;
1612 struct mv_u3d_req *curr_req, *temp_req;
1613 int status;
1614
1615 tmp = ioread32(&u3d->vuc_regs->endcomplete);
1616
1617 dev_dbg(u3d->dev, "tr_complete: ep: 0x%x\n", tmp);
1618 if (!tmp)
1619 return;
1620 iowrite32(tmp, &u3d->vuc_regs->endcomplete);
1621
1622 for (i = 0; i < u3d->max_eps * 2; i++) {
1623 ep_num = i >> 1;
1624 direction = i % 2;
1625
1626 bit_pos = 1 << (ep_num + 16 * direction);
1627
1628 if (!(bit_pos & tmp))
1629 continue;
1630
1631 if (i == 0)
1632 curr_ep = &u3d->eps[1];
1633 else
1634 curr_ep = &u3d->eps[i];
1635
1636 /* remove req out of ep request list after completion */
1637 dev_dbg(u3d->dev, "tr comp: check req_list\n");
1638 spin_lock(&curr_ep->req_lock);
1639 if (!list_empty(&curr_ep->req_list)) {
1640 struct mv_u3d_req *req;
1641 req = list_entry(curr_ep->req_list.next,
1642 struct mv_u3d_req, list);
1643 list_del_init(&req->list);
1644 curr_ep->processing = 0;
1645 }
1646 spin_unlock(&curr_ep->req_lock);
1647
1648 /* process the req queue until an uncomplete request */
1649 list_for_each_entry_safe(curr_req, temp_req,
1650 &curr_ep->queue, queue) {
1651 status = mv_u3d_process_ep_req(u3d, i, curr_req);
1652 if (status)
1653 break;
1654 /* write back status to req */
1655 curr_req->req.status = status;
1656
1657 /* ep0 request completion */
1658 if (ep_num == 0) {
1659 mv_u3d_done(curr_ep, curr_req, 0);
1660 break;
1661 } else {
1662 mv_u3d_done(curr_ep, curr_req, status);
1663 }
1664 }
1665
1666 dev_dbg(u3d->dev, "call mv_u3d_start_queue from ep complete\n");
1667 mv_u3d_start_queue(curr_ep);
1668 }
1669 }
1670
1671 static irqreturn_t mv_u3d_irq(int irq, void *dev)
1672 {
1673 struct mv_u3d *u3d = (struct mv_u3d *)dev;
1674 u32 status, intr;
1675 u32 bridgesetting;
1676 u32 trbunderrun;
1677
1678 spin_lock(&u3d->lock);
1679
1680 status = ioread32(&u3d->vuc_regs->intrcause);
1681 intr = ioread32(&u3d->vuc_regs->intrenable);
1682 status &= intr;
1683
1684 if (status == 0) {
1685 spin_unlock(&u3d->lock);
1686 dev_err(u3d->dev, "irq error!\n");
1687 return IRQ_NONE;
1688 }
1689
1690 if (status & MV_U3D_USBINT_VBUS_VALID) {
1691 bridgesetting = ioread32(&u3d->vuc_regs->bridgesetting);
1692 if (bridgesetting & MV_U3D_BRIDGE_SETTING_VBUS_VALID) {
1693 /* write vbus valid bit of bridge setting to clear */
1694 bridgesetting = MV_U3D_BRIDGE_SETTING_VBUS_VALID;
1695 iowrite32(bridgesetting, &u3d->vuc_regs->bridgesetting);
1696 dev_dbg(u3d->dev, "vbus valid\n");
1697
1698 u3d->usb_state = USB_STATE_POWERED;
1699 u3d->vbus_valid_detect = 0;
1700 /* if external vbus detect is not supported,
1701 * we handle it here.
1702 */
1703 if (!u3d->vbus) {
1704 spin_unlock(&u3d->lock);
1705 mv_u3d_vbus_session(&u3d->gadget, 1);
1706 spin_lock(&u3d->lock);
1707 }
1708 } else
1709 dev_err(u3d->dev, "vbus bit is not set\n");
1710 }
1711
1712 /* RX data is already in the 16KB FIFO.*/
1713 if (status & MV_U3D_USBINT_UNDER_RUN) {
1714 trbunderrun = ioread32(&u3d->vuc_regs->trbunderrun);
1715 dev_err(u3d->dev, "under run, ep%d\n", trbunderrun);
1716 iowrite32(trbunderrun, &u3d->vuc_regs->trbunderrun);
1717 mv_u3d_irq_process_error(u3d);
1718 }
1719
1720 if (status & (MV_U3D_USBINT_RXDESC_ERR | MV_U3D_USBINT_TXDESC_ERR)) {
1721 /* write one to clear */
1722 iowrite32(status & (MV_U3D_USBINT_RXDESC_ERR
1723 | MV_U3D_USBINT_TXDESC_ERR),
1724 &u3d->vuc_regs->intrcause);
1725 dev_err(u3d->dev, "desc err 0x%x\n", status);
1726 mv_u3d_irq_process_error(u3d);
1727 }
1728
1729 if (status & MV_U3D_USBINT_LINK_CHG)
1730 mv_u3d_irq_process_link_change(u3d);
1731
1732 if (status & MV_U3D_USBINT_TX_COMPLETE)
1733 mv_u3d_irq_process_tr_complete(u3d);
1734
1735 if (status & MV_U3D_USBINT_RX_COMPLETE)
1736 mv_u3d_irq_process_tr_complete(u3d);
1737
1738 if (status & MV_U3D_USBINT_SETUP)
1739 mv_u3d_irq_process_setup(u3d);
1740
1741 spin_unlock(&u3d->lock);
1742 return IRQ_HANDLED;
1743 }
1744
1745 static int mv_u3d_remove(struct platform_device *dev)
1746 {
1747 struct mv_u3d *u3d = platform_get_drvdata(dev);
1748
1749 BUG_ON(u3d == NULL);
1750
1751 usb_del_gadget_udc(&u3d->gadget);
1752
1753 /* free memory allocated in probe */
1754 dma_pool_destroy(u3d->trb_pool);
1755
1756 if (u3d->ep_context)
1757 dma_free_coherent(&dev->dev, u3d->ep_context_size,
1758 u3d->ep_context, u3d->ep_context_dma);
1759
1760 kfree(u3d->eps);
1761
1762 if (u3d->irq)
1763 free_irq(u3d->irq, u3d);
1764
1765 if (u3d->cap_regs)
1766 iounmap(u3d->cap_regs);
1767 u3d->cap_regs = NULL;
1768
1769 kfree(u3d->status_req);
1770
1771 clk_put(u3d->clk);
1772
1773 kfree(u3d);
1774
1775 return 0;
1776 }
1777
1778 static int mv_u3d_probe(struct platform_device *dev)
1779 {
1780 struct mv_u3d *u3d = NULL;
1781 struct mv_usb_platform_data *pdata = dev_get_platdata(&dev->dev);
1782 int retval = 0;
1783 struct resource *r;
1784 size_t size;
1785
1786 if (!dev_get_platdata(&dev->dev)) {
1787 dev_err(&dev->dev, "missing platform_data\n");
1788 retval = -ENODEV;
1789 goto err_pdata;
1790 }
1791
1792 u3d = kzalloc(sizeof(*u3d), GFP_KERNEL);
1793 if (!u3d) {
1794 retval = -ENOMEM;
1795 goto err_alloc_private;
1796 }
1797
1798 spin_lock_init(&u3d->lock);
1799
1800 platform_set_drvdata(dev, u3d);
1801
1802 u3d->dev = &dev->dev;
1803 u3d->vbus = pdata->vbus;
1804
1805 u3d->clk = clk_get(&dev->dev, NULL);
1806 if (IS_ERR(u3d->clk)) {
1807 retval = PTR_ERR(u3d->clk);
1808 goto err_get_clk;
1809 }
1810
1811 r = platform_get_resource_byname(dev, IORESOURCE_MEM, "capregs");
1812 if (!r) {
1813 dev_err(&dev->dev, "no I/O memory resource defined\n");
1814 retval = -ENODEV;
1815 goto err_get_cap_regs;
1816 }
1817
1818 u3d->cap_regs = (struct mv_u3d_cap_regs __iomem *)
1819 ioremap(r->start, resource_size(r));
1820 if (!u3d->cap_regs) {
1821 dev_err(&dev->dev, "failed to map I/O memory\n");
1822 retval = -EBUSY;
1823 goto err_map_cap_regs;
1824 } else {
1825 dev_dbg(&dev->dev, "cap_regs address: 0x%lx/0x%lx\n",
1826 (unsigned long) r->start,
1827 (unsigned long) u3d->cap_regs);
1828 }
1829
1830 /* we will access controller register, so enable the u3d controller */
1831 clk_enable(u3d->clk);
1832
1833 if (pdata->phy_init) {
1834 retval = pdata->phy_init(u3d->phy_regs);
1835 if (retval) {
1836 dev_err(&dev->dev, "init phy error %d\n", retval);
1837 goto err_u3d_enable;
1838 }
1839 }
1840
1841 u3d->op_regs = (struct mv_u3d_op_regs __iomem *)(u3d->cap_regs
1842 + MV_U3D_USB3_OP_REGS_OFFSET);
1843
1844 u3d->vuc_regs = (struct mv_u3d_vuc_regs __iomem *)(u3d->cap_regs
1845 + ioread32(&u3d->cap_regs->vuoff));
1846
1847 u3d->max_eps = 16;
1848
1849 /*
1850 * some platform will use usb to download image, it may not disconnect
1851 * usb gadget before loading kernel. So first stop u3d here.
1852 */
1853 mv_u3d_controller_stop(u3d);
1854 iowrite32(0xFFFFFFFF, &u3d->vuc_regs->intrcause);
1855
1856 if (pdata->phy_deinit)
1857 pdata->phy_deinit(u3d->phy_regs);
1858 clk_disable(u3d->clk);
1859
1860 size = u3d->max_eps * sizeof(struct mv_u3d_ep_context) * 2;
1861 size = (size + MV_U3D_EP_CONTEXT_ALIGNMENT - 1)
1862 & ~(MV_U3D_EP_CONTEXT_ALIGNMENT - 1);
1863 u3d->ep_context = dma_alloc_coherent(&dev->dev, size,
1864 &u3d->ep_context_dma, GFP_KERNEL);
1865 if (!u3d->ep_context) {
1866 dev_err(&dev->dev, "allocate ep context memory failed\n");
1867 retval = -ENOMEM;
1868 goto err_alloc_ep_context;
1869 }
1870 u3d->ep_context_size = size;
1871
1872 /* create TRB dma_pool resource */
1873 u3d->trb_pool = dma_pool_create("u3d_trb",
1874 &dev->dev,
1875 sizeof(struct mv_u3d_trb_hw),
1876 MV_U3D_TRB_ALIGNMENT,
1877 MV_U3D_DMA_BOUNDARY);
1878
1879 if (!u3d->trb_pool) {
1880 retval = -ENOMEM;
1881 goto err_alloc_trb_pool;
1882 }
1883
1884 size = u3d->max_eps * sizeof(struct mv_u3d_ep) * 2;
1885 u3d->eps = kzalloc(size, GFP_KERNEL);
1886 if (!u3d->eps) {
1887 retval = -ENOMEM;
1888 goto err_alloc_eps;
1889 }
1890
1891 /* initialize ep0 status request structure */
1892 u3d->status_req = kzalloc(sizeof(struct mv_u3d_req) + 8, GFP_KERNEL);
1893 if (!u3d->status_req) {
1894 retval = -ENOMEM;
1895 goto err_alloc_status_req;
1896 }
1897 INIT_LIST_HEAD(&u3d->status_req->queue);
1898
1899 /* allocate a small amount of memory to get valid address */
1900 u3d->status_req->req.buf = (char *)u3d->status_req
1901 + sizeof(struct mv_u3d_req);
1902 u3d->status_req->req.dma = virt_to_phys(u3d->status_req->req.buf);
1903
1904 u3d->resume_state = USB_STATE_NOTATTACHED;
1905 u3d->usb_state = USB_STATE_ATTACHED;
1906 u3d->ep0_dir = MV_U3D_EP_DIR_OUT;
1907 u3d->remote_wakeup = 0;
1908
1909 r = platform_get_resource(dev, IORESOURCE_IRQ, 0);
1910 if (!r) {
1911 dev_err(&dev->dev, "no IRQ resource defined\n");
1912 retval = -ENODEV;
1913 goto err_get_irq;
1914 }
1915 u3d->irq = r->start;
1916 if (request_irq(u3d->irq, mv_u3d_irq,
1917 IRQF_SHARED, driver_name, u3d)) {
1918 u3d->irq = 0;
1919 dev_err(&dev->dev, "Request irq %d for u3d failed\n",
1920 u3d->irq);
1921 retval = -ENODEV;
1922 goto err_request_irq;
1923 }
1924
1925 /* initialize gadget structure */
1926 u3d->gadget.ops = &mv_u3d_ops; /* usb_gadget_ops */
1927 u3d->gadget.ep0 = &u3d->eps[1].ep; /* gadget ep0 */
1928 INIT_LIST_HEAD(&u3d->gadget.ep_list); /* ep_list */
1929 u3d->gadget.speed = USB_SPEED_UNKNOWN; /* speed */
1930
1931 /* the "gadget" abstracts/virtualizes the controller */
1932 u3d->gadget.name = driver_name; /* gadget name */
1933
1934 mv_u3d_eps_init(u3d);
1935
1936 /* external vbus detection */
1937 if (u3d->vbus) {
1938 u3d->clock_gating = 1;
1939 dev_err(&dev->dev, "external vbus detection\n");
1940 }
1941
1942 if (!u3d->clock_gating)
1943 u3d->vbus_active = 1;
1944
1945 /* enable usb3 controller vbus detection */
1946 u3d->vbus_valid_detect = 1;
1947
1948 retval = usb_add_gadget_udc(&dev->dev, &u3d->gadget);
1949 if (retval)
1950 goto err_unregister;
1951
1952 dev_dbg(&dev->dev, "successful probe usb3 device %s clock gating.\n",
1953 u3d->clock_gating ? "with" : "without");
1954
1955 return 0;
1956
1957 err_unregister:
1958 free_irq(u3d->irq, u3d);
1959 err_request_irq:
1960 err_get_irq:
1961 kfree(u3d->status_req);
1962 err_alloc_status_req:
1963 kfree(u3d->eps);
1964 err_alloc_eps:
1965 dma_pool_destroy(u3d->trb_pool);
1966 err_alloc_trb_pool:
1967 dma_free_coherent(&dev->dev, u3d->ep_context_size,
1968 u3d->ep_context, u3d->ep_context_dma);
1969 err_alloc_ep_context:
1970 if (pdata->phy_deinit)
1971 pdata->phy_deinit(u3d->phy_regs);
1972 clk_disable(u3d->clk);
1973 err_u3d_enable:
1974 iounmap(u3d->cap_regs);
1975 err_map_cap_regs:
1976 err_get_cap_regs:
1977 err_get_clk:
1978 clk_put(u3d->clk);
1979 kfree(u3d);
1980 err_alloc_private:
1981 err_pdata:
1982 return retval;
1983 }
1984
1985 #ifdef CONFIG_PM_SLEEP
1986 static int mv_u3d_suspend(struct device *dev)
1987 {
1988 struct mv_u3d *u3d = dev_get_drvdata(dev);
1989
1990 /*
1991 * only cable is unplugged, usb can suspend.
1992 * So do not care about clock_gating == 1, it is handled by
1993 * vbus session.
1994 */
1995 if (!u3d->clock_gating) {
1996 mv_u3d_controller_stop(u3d);
1997
1998 spin_lock_irq(&u3d->lock);
1999 /* stop all usb activities */
2000 mv_u3d_stop_activity(u3d, u3d->driver);
2001 spin_unlock_irq(&u3d->lock);
2002
2003 mv_u3d_disable(u3d);
2004 }
2005
2006 return 0;
2007 }
2008
2009 static int mv_u3d_resume(struct device *dev)
2010 {
2011 struct mv_u3d *u3d = dev_get_drvdata(dev);
2012 int retval;
2013
2014 if (!u3d->clock_gating) {
2015 retval = mv_u3d_enable(u3d);
2016 if (retval)
2017 return retval;
2018
2019 if (u3d->driver && u3d->softconnect) {
2020 mv_u3d_controller_reset(u3d);
2021 mv_u3d_ep0_reset(u3d);
2022 mv_u3d_controller_start(u3d);
2023 }
2024 }
2025
2026 return 0;
2027 }
2028 #endif
2029
2030 static SIMPLE_DEV_PM_OPS(mv_u3d_pm_ops, mv_u3d_suspend, mv_u3d_resume);
2031
2032 static void mv_u3d_shutdown(struct platform_device *dev)
2033 {
2034 struct mv_u3d *u3d = platform_get_drvdata(dev);
2035 u32 tmp;
2036
2037 tmp = ioread32(&u3d->op_regs->usbcmd);
2038 tmp &= ~MV_U3D_CMD_RUN_STOP;
2039 iowrite32(tmp, &u3d->op_regs->usbcmd);
2040 }
2041
2042 static struct platform_driver mv_u3d_driver = {
2043 .probe = mv_u3d_probe,
2044 .remove = mv_u3d_remove,
2045 .shutdown = mv_u3d_shutdown,
2046 .driver = {
2047 .name = "mv-u3d",
2048 .pm = &mv_u3d_pm_ops,
2049 },
2050 };
2051
2052 module_platform_driver(mv_u3d_driver);
2053 MODULE_ALIAS("platform:mv-u3d");
2054 MODULE_DESCRIPTION(DRIVER_DESC);
2055 MODULE_AUTHOR("Yu Xu <yuxu@marvell.com>");
2056 MODULE_LICENSE("GPL");
2057
2058
2059
2060
2061
2062 /* LDV_COMMENT_BEGIN_MAIN */
2063 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful
2064
2065 /*###########################################################################*/
2066
2067 /*############## Driver Environment Generator 0.2 output ####################*/
2068
2069 /*###########################################################################*/
2070
2071
2072
2073 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
2074 void ldv_check_final_state(void);
2075
2076 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
2077 void ldv_check_return_value(int res);
2078
2079 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
2080 void ldv_check_return_value_probe(int res);
2081
2082 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
2083 void ldv_initialize(void);
2084
2085 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
2086 void ldv_handler_precall(void);
2087
2088 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
2089 int nondet_int(void);
2090
2091 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
2092 int LDV_IN_INTERRUPT;
2093
2094 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
2095 void ldv_main0_sequence_infinite_withcheck_stateful(void) {
2096
2097
2098
2099 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
2100 /*============================= VARIABLE DECLARATION PART =============================*/
2101 /** STRUCT: struct type: usb_ep_ops, struct name: mv_u3d_ep_ops **/
2102 /* content: static int mv_u3d_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)*/
2103 /* LDV_COMMENT_BEGIN_PREP */
2104 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2105 /* LDV_COMMENT_END_PREP */
2106 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_ep_enable" */
2107 struct usb_ep * var_group1;
2108 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_ep_enable" */
2109 const struct usb_endpoint_descriptor * var_mv_u3d_ep_enable_8_p1;
2110 /* LDV_COMMENT_BEGIN_PREP */
2111 #ifdef CONFIG_PM_SLEEP
2112 #endif
2113 /* LDV_COMMENT_END_PREP */
2114 /* content: static int mv_u3d_ep_disable(struct usb_ep *_ep)*/
2115 /* LDV_COMMENT_BEGIN_PREP */
2116 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2117 /* LDV_COMMENT_END_PREP */
2118 /* LDV_COMMENT_BEGIN_PREP */
2119 #ifdef CONFIG_PM_SLEEP
2120 #endif
2121 /* LDV_COMMENT_END_PREP */
2122 /* content: static struct usb_request * mv_u3d_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)*/
2123 /* LDV_COMMENT_BEGIN_PREP */
2124 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2125 /* LDV_COMMENT_END_PREP */
2126 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_alloc_request" */
2127 gfp_t var_mv_u3d_alloc_request_10_p1;
2128 /* LDV_COMMENT_BEGIN_PREP */
2129 #ifdef CONFIG_PM_SLEEP
2130 #endif
2131 /* LDV_COMMENT_END_PREP */
2132 /* content: static void mv_u3d_free_request(struct usb_ep *_ep, struct usb_request *_req)*/
2133 /* LDV_COMMENT_BEGIN_PREP */
2134 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2135 /* LDV_COMMENT_END_PREP */
2136 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_free_request" */
2137 struct usb_request * var_group2;
2138 /* LDV_COMMENT_BEGIN_PREP */
2139 #ifdef CONFIG_PM_SLEEP
2140 #endif
2141 /* LDV_COMMENT_END_PREP */
2142 /* content: static int mv_u3d_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)*/
2143 /* LDV_COMMENT_BEGIN_PREP */
2144 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2145 /* LDV_COMMENT_END_PREP */
2146 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_ep_queue" */
2147 gfp_t var_mv_u3d_ep_queue_13_p2;
2148 /* LDV_COMMENT_BEGIN_PREP */
2149 #ifdef CONFIG_PM_SLEEP
2150 #endif
2151 /* LDV_COMMENT_END_PREP */
2152 /* content: static int mv_u3d_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)*/
2153 /* LDV_COMMENT_BEGIN_PREP */
2154 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2155 /* LDV_COMMENT_END_PREP */
2156 /* LDV_COMMENT_BEGIN_PREP */
2157 #ifdef CONFIG_PM_SLEEP
2158 #endif
2159 /* LDV_COMMENT_END_PREP */
2160 /* content: static int mv_u3d_ep_set_wedge(struct usb_ep *_ep)*/
2161 /* LDV_COMMENT_BEGIN_PREP */
2162 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2163 /* LDV_COMMENT_END_PREP */
2164 /* LDV_COMMENT_BEGIN_PREP */
2165 #ifdef CONFIG_PM_SLEEP
2166 #endif
2167 /* LDV_COMMENT_END_PREP */
2168 /* content: static int mv_u3d_ep_set_halt(struct usb_ep *_ep, int halt)*/
2169 /* LDV_COMMENT_BEGIN_PREP */
2170 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2171 /* LDV_COMMENT_END_PREP */
2172 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_ep_set_halt" */
2173 int var_mv_u3d_ep_set_halt_17_p1;
2174 /* LDV_COMMENT_BEGIN_PREP */
2175 #ifdef CONFIG_PM_SLEEP
2176 #endif
2177 /* LDV_COMMENT_END_PREP */
2178 /* content: static void mv_u3d_ep_fifo_flush(struct usb_ep *_ep)*/
2179 /* LDV_COMMENT_BEGIN_PREP */
2180 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2181 /* LDV_COMMENT_END_PREP */
2182 /* LDV_COMMENT_BEGIN_PREP */
2183 #ifdef CONFIG_PM_SLEEP
2184 #endif
2185 /* LDV_COMMENT_END_PREP */
2186
2187 /** STRUCT: struct type: usb_gadget_ops, struct name: mv_u3d_ops **/
2188 /* content: static int mv_u3d_vbus_session(struct usb_gadget *gadget, int is_active)*/
2189 /* LDV_COMMENT_BEGIN_PREP */
2190 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2191 /* LDV_COMMENT_END_PREP */
2192 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_vbus_session" */
2193 struct usb_gadget * var_group3;
2194 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_vbus_session" */
2195 int var_mv_u3d_vbus_session_24_p1;
2196 /* LDV_COMMENT_BEGIN_PREP */
2197 #ifdef CONFIG_PM_SLEEP
2198 #endif
2199 /* LDV_COMMENT_END_PREP */
2200 /* content: static int mv_u3d_vbus_draw(struct usb_gadget *gadget, unsigned mA)*/
2201 /* LDV_COMMENT_BEGIN_PREP */
2202 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2203 /* LDV_COMMENT_END_PREP */
2204 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_vbus_draw" */
2205 unsigned var_mv_u3d_vbus_draw_25_p1;
2206 /* LDV_COMMENT_BEGIN_PREP */
2207 #ifdef CONFIG_PM_SLEEP
2208 #endif
2209 /* LDV_COMMENT_END_PREP */
2210 /* content: static int mv_u3d_pullup(struct usb_gadget *gadget, int is_on)*/
2211 /* LDV_COMMENT_BEGIN_PREP */
2212 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2213 /* LDV_COMMENT_END_PREP */
2214 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_pullup" */
2215 int var_mv_u3d_pullup_26_p1;
2216 /* LDV_COMMENT_BEGIN_PREP */
2217 #ifdef CONFIG_PM_SLEEP
2218 #endif
2219 /* LDV_COMMENT_END_PREP */
2220 /* content: static int mv_u3d_start(struct usb_gadget *g, struct usb_gadget_driver *driver)*/
2221 /* LDV_COMMENT_BEGIN_PREP */
2222 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2223 /* LDV_COMMENT_END_PREP */
2224 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_start" */
2225 struct usb_gadget_driver * var_group4;
2226 /* LDV_COMMENT_BEGIN_PREP */
2227 #ifdef CONFIG_PM_SLEEP
2228 #endif
2229 /* LDV_COMMENT_END_PREP */
2230 /* content: static int mv_u3d_stop(struct usb_gadget *g)*/
2231 /* LDV_COMMENT_BEGIN_PREP */
2232 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2233 /* LDV_COMMENT_END_PREP */
2234 /* LDV_COMMENT_BEGIN_PREP */
2235 #ifdef CONFIG_PM_SLEEP
2236 #endif
2237 /* LDV_COMMENT_END_PREP */
2238
2239 /** STRUCT: struct type: platform_driver, struct name: mv_u3d_driver **/
2240 /* content: static int mv_u3d_probe(struct platform_device *dev)*/
2241 /* LDV_COMMENT_BEGIN_PREP */
2242 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2243 /* LDV_COMMENT_END_PREP */
2244 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_probe" */
2245 struct platform_device * var_group5;
2246 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "mv_u3d_probe" */
2247 static int res_mv_u3d_probe_41;
2248 /* LDV_COMMENT_BEGIN_PREP */
2249 #ifdef CONFIG_PM_SLEEP
2250 #endif
2251 /* LDV_COMMENT_END_PREP */
2252 /* content: static int mv_u3d_remove(struct platform_device *dev)*/
2253 /* LDV_COMMENT_BEGIN_PREP */
2254 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2255 /* LDV_COMMENT_END_PREP */
2256 /* LDV_COMMENT_BEGIN_PREP */
2257 #ifdef CONFIG_PM_SLEEP
2258 #endif
2259 /* LDV_COMMENT_END_PREP */
2260 /* content: static void mv_u3d_shutdown(struct platform_device *dev)*/
2261 /* LDV_COMMENT_BEGIN_PREP */
2262 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2263 #ifdef CONFIG_PM_SLEEP
2264 #endif
2265 /* LDV_COMMENT_END_PREP */
2266
2267 /** CALLBACK SECTION request_irq **/
2268 /* content: static irqreturn_t mv_u3d_irq(int irq, void *dev)*/
2269 /* LDV_COMMENT_BEGIN_PREP */
2270 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2271 /* LDV_COMMENT_END_PREP */
2272 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_irq" */
2273 int var_mv_u3d_irq_39_p0;
2274 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_irq" */
2275 void * var_mv_u3d_irq_39_p1;
2276 /* LDV_COMMENT_BEGIN_PREP */
2277 #ifdef CONFIG_PM_SLEEP
2278 #endif
2279 /* LDV_COMMENT_END_PREP */
2280
2281
2282
2283
2284 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
2285 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
2286 /*============================= VARIABLE INITIALIZING PART =============================*/
2287 LDV_IN_INTERRUPT=1;
2288
2289
2290
2291
2292 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
2293 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
2294 /*============================= FUNCTION CALL SECTION =============================*/
2295 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
2296 ldv_initialize();
2297
2298
2299
2300
2301 int ldv_s_mv_u3d_driver_platform_driver = 0;
2302
2303
2304
2305
2306 while( nondet_int()
2307 || !(ldv_s_mv_u3d_driver_platform_driver == 0)
2308 ) {
2309
2310 switch(nondet_int()) {
2311
2312 case 0: {
2313
2314 /** STRUCT: struct type: usb_ep_ops, struct name: mv_u3d_ep_ops **/
2315
2316
2317 /* content: static int mv_u3d_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)*/
2318 /* LDV_COMMENT_BEGIN_PREP */
2319 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2320 /* LDV_COMMENT_END_PREP */
2321 /* LDV_COMMENT_FUNCTION_CALL Function from field "enable" from driver structure with callbacks "mv_u3d_ep_ops" */
2322 ldv_handler_precall();
2323 mv_u3d_ep_enable( var_group1, var_mv_u3d_ep_enable_8_p1);
2324 /* LDV_COMMENT_BEGIN_PREP */
2325 #ifdef CONFIG_PM_SLEEP
2326 #endif
2327 /* LDV_COMMENT_END_PREP */
2328
2329
2330
2331
2332 }
2333
2334 break;
2335 case 1: {
2336
2337 /** STRUCT: struct type: usb_ep_ops, struct name: mv_u3d_ep_ops **/
2338
2339
2340 /* content: static int mv_u3d_ep_disable(struct usb_ep *_ep)*/
2341 /* LDV_COMMENT_BEGIN_PREP */
2342 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2343 /* LDV_COMMENT_END_PREP */
2344 /* LDV_COMMENT_FUNCTION_CALL Function from field "disable" from driver structure with callbacks "mv_u3d_ep_ops" */
2345 ldv_handler_precall();
2346 mv_u3d_ep_disable( var_group1);
2347 /* LDV_COMMENT_BEGIN_PREP */
2348 #ifdef CONFIG_PM_SLEEP
2349 #endif
2350 /* LDV_COMMENT_END_PREP */
2351
2352
2353
2354
2355 }
2356
2357 break;
2358 case 2: {
2359
2360 /** STRUCT: struct type: usb_ep_ops, struct name: mv_u3d_ep_ops **/
2361
2362
2363 /* content: static struct usb_request * mv_u3d_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)*/
2364 /* LDV_COMMENT_BEGIN_PREP */
2365 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2366 /* LDV_COMMENT_END_PREP */
2367 /* LDV_COMMENT_FUNCTION_CALL Function from field "alloc_request" from driver structure with callbacks "mv_u3d_ep_ops" */
2368 ldv_handler_precall();
2369 mv_u3d_alloc_request( var_group1, var_mv_u3d_alloc_request_10_p1);
2370 /* LDV_COMMENT_BEGIN_PREP */
2371 #ifdef CONFIG_PM_SLEEP
2372 #endif
2373 /* LDV_COMMENT_END_PREP */
2374
2375
2376
2377
2378 }
2379
2380 break;
2381 case 3: {
2382
2383 /** STRUCT: struct type: usb_ep_ops, struct name: mv_u3d_ep_ops **/
2384
2385
2386 /* content: static void mv_u3d_free_request(struct usb_ep *_ep, struct usb_request *_req)*/
2387 /* LDV_COMMENT_BEGIN_PREP */
2388 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2389 /* LDV_COMMENT_END_PREP */
2390 /* LDV_COMMENT_FUNCTION_CALL Function from field "free_request" from driver structure with callbacks "mv_u3d_ep_ops" */
2391 ldv_handler_precall();
2392 mv_u3d_free_request( var_group1, var_group2);
2393 /* LDV_COMMENT_BEGIN_PREP */
2394 #ifdef CONFIG_PM_SLEEP
2395 #endif
2396 /* LDV_COMMENT_END_PREP */
2397
2398
2399
2400
2401 }
2402
2403 break;
2404 case 4: {
2405
2406 /** STRUCT: struct type: usb_ep_ops, struct name: mv_u3d_ep_ops **/
2407
2408
2409 /* content: static int mv_u3d_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)*/
2410 /* LDV_COMMENT_BEGIN_PREP */
2411 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2412 /* LDV_COMMENT_END_PREP */
2413 /* LDV_COMMENT_FUNCTION_CALL Function from field "queue" from driver structure with callbacks "mv_u3d_ep_ops" */
2414 ldv_handler_precall();
2415 mv_u3d_ep_queue( var_group1, var_group2, var_mv_u3d_ep_queue_13_p2);
2416 /* LDV_COMMENT_BEGIN_PREP */
2417 #ifdef CONFIG_PM_SLEEP
2418 #endif
2419 /* LDV_COMMENT_END_PREP */
2420
2421
2422
2423
2424 }
2425
2426 break;
2427 case 5: {
2428
2429 /** STRUCT: struct type: usb_ep_ops, struct name: mv_u3d_ep_ops **/
2430
2431
2432 /* content: static int mv_u3d_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)*/
2433 /* LDV_COMMENT_BEGIN_PREP */
2434 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2435 /* LDV_COMMENT_END_PREP */
2436 /* LDV_COMMENT_FUNCTION_CALL Function from field "dequeue" from driver structure with callbacks "mv_u3d_ep_ops" */
2437 ldv_handler_precall();
2438 mv_u3d_ep_dequeue( var_group1, var_group2);
2439 /* LDV_COMMENT_BEGIN_PREP */
2440 #ifdef CONFIG_PM_SLEEP
2441 #endif
2442 /* LDV_COMMENT_END_PREP */
2443
2444
2445
2446
2447 }
2448
2449 break;
2450 case 6: {
2451
2452 /** STRUCT: struct type: usb_ep_ops, struct name: mv_u3d_ep_ops **/
2453
2454
2455 /* content: static int mv_u3d_ep_set_wedge(struct usb_ep *_ep)*/
2456 /* LDV_COMMENT_BEGIN_PREP */
2457 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2458 /* LDV_COMMENT_END_PREP */
2459 /* LDV_COMMENT_FUNCTION_CALL Function from field "set_wedge" from driver structure with callbacks "mv_u3d_ep_ops" */
2460 ldv_handler_precall();
2461 mv_u3d_ep_set_wedge( var_group1);
2462 /* LDV_COMMENT_BEGIN_PREP */
2463 #ifdef CONFIG_PM_SLEEP
2464 #endif
2465 /* LDV_COMMENT_END_PREP */
2466
2467
2468
2469
2470 }
2471
2472 break;
2473 case 7: {
2474
2475 /** STRUCT: struct type: usb_ep_ops, struct name: mv_u3d_ep_ops **/
2476
2477
2478 /* content: static int mv_u3d_ep_set_halt(struct usb_ep *_ep, int halt)*/
2479 /* LDV_COMMENT_BEGIN_PREP */
2480 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2481 /* LDV_COMMENT_END_PREP */
2482 /* LDV_COMMENT_FUNCTION_CALL Function from field "set_halt" from driver structure with callbacks "mv_u3d_ep_ops" */
2483 ldv_handler_precall();
2484 mv_u3d_ep_set_halt( var_group1, var_mv_u3d_ep_set_halt_17_p1);
2485 /* LDV_COMMENT_BEGIN_PREP */
2486 #ifdef CONFIG_PM_SLEEP
2487 #endif
2488 /* LDV_COMMENT_END_PREP */
2489
2490
2491
2492
2493 }
2494
2495 break;
2496 case 8: {
2497
2498 /** STRUCT: struct type: usb_ep_ops, struct name: mv_u3d_ep_ops **/
2499
2500
2501 /* content: static void mv_u3d_ep_fifo_flush(struct usb_ep *_ep)*/
2502 /* LDV_COMMENT_BEGIN_PREP */
2503 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2504 /* LDV_COMMENT_END_PREP */
2505 /* LDV_COMMENT_FUNCTION_CALL Function from field "fifo_flush" from driver structure with callbacks "mv_u3d_ep_ops" */
2506 ldv_handler_precall();
2507 mv_u3d_ep_fifo_flush( var_group1);
2508 /* LDV_COMMENT_BEGIN_PREP */
2509 #ifdef CONFIG_PM_SLEEP
2510 #endif
2511 /* LDV_COMMENT_END_PREP */
2512
2513
2514
2515
2516 }
2517
2518 break;
2519 case 9: {
2520
2521 /** STRUCT: struct type: usb_gadget_ops, struct name: mv_u3d_ops **/
2522
2523
2524 /* content: static int mv_u3d_vbus_session(struct usb_gadget *gadget, int is_active)*/
2525 /* LDV_COMMENT_BEGIN_PREP */
2526 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2527 /* LDV_COMMENT_END_PREP */
2528 /* LDV_COMMENT_FUNCTION_CALL Function from field "vbus_session" from driver structure with callbacks "mv_u3d_ops" */
2529 ldv_handler_precall();
2530 mv_u3d_vbus_session( var_group3, var_mv_u3d_vbus_session_24_p1);
2531 /* LDV_COMMENT_BEGIN_PREP */
2532 #ifdef CONFIG_PM_SLEEP
2533 #endif
2534 /* LDV_COMMENT_END_PREP */
2535
2536
2537
2538
2539 }
2540
2541 break;
2542 case 10: {
2543
2544 /** STRUCT: struct type: usb_gadget_ops, struct name: mv_u3d_ops **/
2545
2546
2547 /* content: static int mv_u3d_vbus_draw(struct usb_gadget *gadget, unsigned mA)*/
2548 /* LDV_COMMENT_BEGIN_PREP */
2549 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2550 /* LDV_COMMENT_END_PREP */
2551 /* LDV_COMMENT_FUNCTION_CALL Function from field "vbus_draw" from driver structure with callbacks "mv_u3d_ops" */
2552 ldv_handler_precall();
2553 mv_u3d_vbus_draw( var_group3, var_mv_u3d_vbus_draw_25_p1);
2554 /* LDV_COMMENT_BEGIN_PREP */
2555 #ifdef CONFIG_PM_SLEEP
2556 #endif
2557 /* LDV_COMMENT_END_PREP */
2558
2559
2560
2561
2562 }
2563
2564 break;
2565 case 11: {
2566
2567 /** STRUCT: struct type: usb_gadget_ops, struct name: mv_u3d_ops **/
2568
2569
2570 /* content: static int mv_u3d_pullup(struct usb_gadget *gadget, int is_on)*/
2571 /* LDV_COMMENT_BEGIN_PREP */
2572 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2573 /* LDV_COMMENT_END_PREP */
2574 /* LDV_COMMENT_FUNCTION_CALL Function from field "pullup" from driver structure with callbacks "mv_u3d_ops" */
2575 ldv_handler_precall();
2576 mv_u3d_pullup( var_group3, var_mv_u3d_pullup_26_p1);
2577 /* LDV_COMMENT_BEGIN_PREP */
2578 #ifdef CONFIG_PM_SLEEP
2579 #endif
2580 /* LDV_COMMENT_END_PREP */
2581
2582
2583
2584
2585 }
2586
2587 break;
2588 case 12: {
2589
2590 /** STRUCT: struct type: usb_gadget_ops, struct name: mv_u3d_ops **/
2591
2592
2593 /* content: static int mv_u3d_start(struct usb_gadget *g, struct usb_gadget_driver *driver)*/
2594 /* LDV_COMMENT_BEGIN_PREP */
2595 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2596 /* LDV_COMMENT_END_PREP */
2597 /* LDV_COMMENT_FUNCTION_CALL Function from field "udc_start" from driver structure with callbacks "mv_u3d_ops" */
2598 ldv_handler_precall();
2599 mv_u3d_start( var_group3, var_group4);
2600 /* LDV_COMMENT_BEGIN_PREP */
2601 #ifdef CONFIG_PM_SLEEP
2602 #endif
2603 /* LDV_COMMENT_END_PREP */
2604
2605
2606
2607
2608 }
2609
2610 break;
2611 case 13: {
2612
2613 /** STRUCT: struct type: usb_gadget_ops, struct name: mv_u3d_ops **/
2614
2615
2616 /* content: static int mv_u3d_stop(struct usb_gadget *g)*/
2617 /* LDV_COMMENT_BEGIN_PREP */
2618 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2619 /* LDV_COMMENT_END_PREP */
2620 /* LDV_COMMENT_FUNCTION_CALL Function from field "udc_stop" from driver structure with callbacks "mv_u3d_ops" */
2621 ldv_handler_precall();
2622 mv_u3d_stop( var_group3);
2623 /* LDV_COMMENT_BEGIN_PREP */
2624 #ifdef CONFIG_PM_SLEEP
2625 #endif
2626 /* LDV_COMMENT_END_PREP */
2627
2628
2629
2630
2631 }
2632
2633 break;
2634 case 14: {
2635
2636 /** STRUCT: struct type: platform_driver, struct name: mv_u3d_driver **/
2637 if(ldv_s_mv_u3d_driver_platform_driver==0) {
2638
2639 /* content: static int mv_u3d_probe(struct platform_device *dev)*/
2640 /* LDV_COMMENT_BEGIN_PREP */
2641 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2642 /* LDV_COMMENT_END_PREP */
2643 /* LDV_COMMENT_FUNCTION_CALL Function from field "probe" from driver structure with callbacks "mv_u3d_driver". Standart function test for correct return result. */
2644 res_mv_u3d_probe_41 = mv_u3d_probe( var_group5);
2645 ldv_check_return_value(res_mv_u3d_probe_41);
2646 ldv_check_return_value_probe(res_mv_u3d_probe_41);
2647 if(res_mv_u3d_probe_41)
2648 goto ldv_module_exit;
2649 /* LDV_COMMENT_BEGIN_PREP */
2650 #ifdef CONFIG_PM_SLEEP
2651 #endif
2652 /* LDV_COMMENT_END_PREP */
2653 ldv_s_mv_u3d_driver_platform_driver++;
2654
2655 }
2656
2657 }
2658
2659 break;
2660 case 15: {
2661
2662 /** STRUCT: struct type: platform_driver, struct name: mv_u3d_driver **/
2663 if(ldv_s_mv_u3d_driver_platform_driver==1) {
2664
2665 /* content: static int mv_u3d_remove(struct platform_device *dev)*/
2666 /* LDV_COMMENT_BEGIN_PREP */
2667 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2668 /* LDV_COMMENT_END_PREP */
2669 /* LDV_COMMENT_FUNCTION_CALL Function from field "remove" from driver structure with callbacks "mv_u3d_driver" */
2670 ldv_handler_precall();
2671 mv_u3d_remove( var_group5);
2672 /* LDV_COMMENT_BEGIN_PREP */
2673 #ifdef CONFIG_PM_SLEEP
2674 #endif
2675 /* LDV_COMMENT_END_PREP */
2676 ldv_s_mv_u3d_driver_platform_driver++;
2677
2678 }
2679
2680 }
2681
2682 break;
2683 case 16: {
2684
2685 /** STRUCT: struct type: platform_driver, struct name: mv_u3d_driver **/
2686 if(ldv_s_mv_u3d_driver_platform_driver==2) {
2687
2688 /* content: static void mv_u3d_shutdown(struct platform_device *dev)*/
2689 /* LDV_COMMENT_BEGIN_PREP */
2690 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2691 #ifdef CONFIG_PM_SLEEP
2692 #endif
2693 /* LDV_COMMENT_END_PREP */
2694 /* LDV_COMMENT_FUNCTION_CALL Function from field "shutdown" from driver structure with callbacks "mv_u3d_driver" */
2695 ldv_handler_precall();
2696 mv_u3d_shutdown( var_group5);
2697 ldv_s_mv_u3d_driver_platform_driver=0;
2698
2699 }
2700
2701 }
2702
2703 break;
2704 case 17: {
2705
2706 /** CALLBACK SECTION request_irq **/
2707 LDV_IN_INTERRUPT=2;
2708
2709 /* content: static irqreturn_t mv_u3d_irq(int irq, void *dev)*/
2710 /* LDV_COMMENT_BEGIN_PREP */
2711 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2712 /* LDV_COMMENT_END_PREP */
2713 /* LDV_COMMENT_FUNCTION_CALL */
2714 ldv_handler_precall();
2715 mv_u3d_irq( var_mv_u3d_irq_39_p0, var_mv_u3d_irq_39_p1);
2716 /* LDV_COMMENT_BEGIN_PREP */
2717 #ifdef CONFIG_PM_SLEEP
2718 #endif
2719 /* LDV_COMMENT_END_PREP */
2720 LDV_IN_INTERRUPT=1;
2721
2722
2723
2724 }
2725
2726 break;
2727 default: break;
2728
2729 }
2730
2731 }
2732
2733 ldv_module_exit:
2734
2735 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
2736 ldv_final: ldv_check_final_state();
2737
2738 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
2739 return;
2740
2741 }
2742 #endif
2743
2744 /* LDV_COMMENT_END_MAIN */ 1
2 #include <linux/kernel.h>
3 bool ldv_is_err(const void *ptr);
4 bool ldv_is_err_or_null(const void *ptr);
5 void* ldv_err_ptr(long error);
6 long ldv_ptr_err(const void *ptr);
7
8 extern void ldv_dma_map_page(void);
9 extern void ldv_dma_mapping_error(void);
10 #line 1 "/home/ldvuser/ldv/ref_launches/work/current--X--drivers--X--defaultlinux-4.8-rc1.tar.xz--X--331_1a--X--cpachecker/linux-4.8-rc1.tar.xz/csd_deg_dscv/10667/dscv_tempdir/dscv/ri/331_1a/drivers/usb/gadget/udc/mv_u3d_core.c"
11
12 /*
13 * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
14 *
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms and conditions of the GNU General Public License,
17 * version 2, as published by the Free Software Foundation.
18 */
19
20 #include <linux/module.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/dmapool.h>
23 #include <linux/kernel.h>
24 #include <linux/delay.h>
25 #include <linux/ioport.h>
26 #include <linux/sched.h>
27 #include <linux/slab.h>
28 #include <linux/errno.h>
29 #include <linux/timer.h>
30 #include <linux/list.h>
31 #include <linux/notifier.h>
32 #include <linux/interrupt.h>
33 #include <linux/moduleparam.h>
34 #include <linux/device.h>
35 #include <linux/usb/ch9.h>
36 #include <linux/usb/gadget.h>
37 #include <linux/pm.h>
38 #include <linux/io.h>
39 #include <linux/irq.h>
40 #include <linux/platform_device.h>
41 #include <linux/platform_data/mv_usb.h>
42 #include <linux/clk.h>
43
44 #include "mv_u3d.h"
45
46 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
47
48 static const char driver_name[] = "mv_u3d";
49 static const char driver_desc[] = DRIVER_DESC;
50
51 static void mv_u3d_nuke(struct mv_u3d_ep *ep, int status);
52 static void mv_u3d_stop_activity(struct mv_u3d *u3d,
53 struct usb_gadget_driver *driver);
54
55 /* for endpoint 0 operations */
56 static const struct usb_endpoint_descriptor mv_u3d_ep0_desc = {
57 .bLength = USB_DT_ENDPOINT_SIZE,
58 .bDescriptorType = USB_DT_ENDPOINT,
59 .bEndpointAddress = 0,
60 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
61 .wMaxPacketSize = MV_U3D_EP0_MAX_PKT_SIZE,
62 };
63
64 static void mv_u3d_ep0_reset(struct mv_u3d *u3d)
65 {
66 struct mv_u3d_ep *ep;
67 u32 epxcr;
68 int i;
69
70 for (i = 0; i < 2; i++) {
71 ep = &u3d->eps[i];
72 ep->u3d = u3d;
73
74 /* ep0 ep context, ep0 in and out share the same ep context */
75 ep->ep_context = &u3d->ep_context[1];
76 }
77
78 /* reset ep state machine */
79 /* reset ep0 out */
80 epxcr = ioread32(&u3d->vuc_regs->epcr[0].epxoutcr0);
81 epxcr |= MV_U3D_EPXCR_EP_INIT;
82 iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxoutcr0);
83 udelay(5);
84 epxcr &= ~MV_U3D_EPXCR_EP_INIT;
85 iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxoutcr0);
86
87 epxcr = ((MV_U3D_EP0_MAX_PKT_SIZE
88 << MV_U3D_EPXCR_MAX_PACKET_SIZE_SHIFT)
89 | (1 << MV_U3D_EPXCR_MAX_BURST_SIZE_SHIFT)
90 | (1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT)
91 | MV_U3D_EPXCR_EP_TYPE_CONTROL);
92 iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxoutcr1);
93
94 /* reset ep0 in */
95 epxcr = ioread32(&u3d->vuc_regs->epcr[0].epxincr0);
96 epxcr |= MV_U3D_EPXCR_EP_INIT;
97 iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxincr0);
98 udelay(5);
99 epxcr &= ~MV_U3D_EPXCR_EP_INIT;
100 iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxincr0);
101
102 epxcr = ((MV_U3D_EP0_MAX_PKT_SIZE
103 << MV_U3D_EPXCR_MAX_PACKET_SIZE_SHIFT)
104 | (1 << MV_U3D_EPXCR_MAX_BURST_SIZE_SHIFT)
105 | (1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT)
106 | MV_U3D_EPXCR_EP_TYPE_CONTROL);
107 iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxincr1);
108 }
109
110 static void mv_u3d_ep0_stall(struct mv_u3d *u3d)
111 {
112 u32 tmp;
113 dev_dbg(u3d->dev, "%s\n", __func__);
114
115 /* set TX and RX to stall */
116 tmp = ioread32(&u3d->vuc_regs->epcr[0].epxoutcr0);
117 tmp |= MV_U3D_EPXCR_EP_HALT;
118 iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxoutcr0);
119
120 tmp = ioread32(&u3d->vuc_regs->epcr[0].epxincr0);
121 tmp |= MV_U3D_EPXCR_EP_HALT;
122 iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxincr0);
123
124 /* update ep0 state */
125 u3d->ep0_state = MV_U3D_WAIT_FOR_SETUP;
126 u3d->ep0_dir = MV_U3D_EP_DIR_OUT;
127 }
128
129 static int mv_u3d_process_ep_req(struct mv_u3d *u3d, int index,
130 struct mv_u3d_req *curr_req)
131 {
132 struct mv_u3d_trb *curr_trb;
133 int actual, remaining_length = 0;
134 int direction, ep_num;
135 int retval = 0;
136 u32 tmp, status, length;
137
138 direction = index % 2;
139 ep_num = index / 2;
140
141 actual = curr_req->req.length;
142
143 while (!list_empty(&curr_req->trb_list)) {
144 curr_trb = list_entry(curr_req->trb_list.next,
145 struct mv_u3d_trb, trb_list);
146 if (!curr_trb->trb_hw->ctrl.own) {
147 dev_err(u3d->dev, "%s, TRB own error!\n",
148 u3d->eps[index].name);
149 return 1;
150 }
151
152 curr_trb->trb_hw->ctrl.own = 0;
153 if (direction == MV_U3D_EP_DIR_OUT)
154 tmp = ioread32(&u3d->vuc_regs->rxst[ep_num].statuslo);
155 else
156 tmp = ioread32(&u3d->vuc_regs->txst[ep_num].statuslo);
157
158 status = tmp >> MV_U3D_XFERSTATUS_COMPLETE_SHIFT;
159 length = tmp & MV_U3D_XFERSTATUS_TRB_LENGTH_MASK;
160
161 if (status == MV_U3D_COMPLETE_SUCCESS ||
162 (status == MV_U3D_COMPLETE_SHORT_PACKET &&
163 direction == MV_U3D_EP_DIR_OUT)) {
164 remaining_length += length;
165 actual -= remaining_length;
166 } else {
167 dev_err(u3d->dev,
168 "complete_tr error: ep=%d %s: error = 0x%x\n",
169 index >> 1, direction ? "SEND" : "RECV",
170 status);
171 retval = -EPROTO;
172 }
173
174 list_del_init(&curr_trb->trb_list);
175 }
176 if (retval)
177 return retval;
178
179 curr_req->req.actual = actual;
180 return 0;
181 }
182
183 /*
184 * mv_u3d_done() - retire a request; caller blocked irqs
185 * @status : request status to be set, only works when
186 * request is still in progress.
187 */
188 static
189 void mv_u3d_done(struct mv_u3d_ep *ep, struct mv_u3d_req *req, int status)
190 __releases(&ep->udc->lock)
191 __acquires(&ep->udc->lock)
192 {
193 struct mv_u3d *u3d = (struct mv_u3d *)ep->u3d;
194
195 dev_dbg(u3d->dev, "mv_u3d_done: remove req->queue\n");
196 /* Removed the req from ep queue */
197 list_del_init(&req->queue);
198
199 /* req.status should be set as -EINPROGRESS in ep_queue() */
200 if (req->req.status == -EINPROGRESS)
201 req->req.status = status;
202 else
203 status = req->req.status;
204
205 /* Free trb for the request */
206 if (!req->chain)
207 dma_pool_free(u3d->trb_pool,
208 req->trb_head->trb_hw, req->trb_head->trb_dma);
209 else {
210 dma_unmap_single(ep->u3d->gadget.dev.parent,
211 (dma_addr_t)req->trb_head->trb_dma,
212 req->trb_count * sizeof(struct mv_u3d_trb_hw),
213 DMA_BIDIRECTIONAL);
214 kfree(req->trb_head->trb_hw);
215 }
216 kfree(req->trb_head);
217
218 usb_gadget_unmap_request(&u3d->gadget, &req->req, mv_u3d_ep_dir(ep));
219
220 if (status && (status != -ESHUTDOWN)) {
221 dev_dbg(u3d->dev, "complete %s req %p stat %d len %u/%u",
222 ep->ep.name, &req->req, status,
223 req->req.actual, req->req.length);
224 }
225
226 spin_unlock(&ep->u3d->lock);
227
228 usb_gadget_giveback_request(&ep->ep, &req->req);
229
230 spin_lock(&ep->u3d->lock);
231 }
232
233 static int mv_u3d_queue_trb(struct mv_u3d_ep *ep, struct mv_u3d_req *req)
234 {
235 u32 tmp, direction;
236 struct mv_u3d *u3d;
237 struct mv_u3d_ep_context *ep_context;
238 int retval = 0;
239
240 u3d = ep->u3d;
241 direction = mv_u3d_ep_dir(ep);
242
243 /* ep0 in and out share the same ep context slot 1*/
244 if (ep->ep_num == 0)
245 ep_context = &(u3d->ep_context[1]);
246 else
247 ep_context = &(u3d->ep_context[ep->ep_num * 2 + direction]);
248
249 /* check if the pipe is empty or not */
250 if (!list_empty(&ep->queue)) {
251 dev_err(u3d->dev, "add trb to non-empty queue!\n");
252 retval = -ENOMEM;
253 WARN_ON(1);
254 } else {
255 ep_context->rsvd0 = cpu_to_le32(1);
256 ep_context->rsvd1 = 0;
257
258 /* Configure the trb address and set the DCS bit.
259 * Both DCS bit and own bit in trb should be set.
260 */
261 ep_context->trb_addr_lo =
262 cpu_to_le32(req->trb_head->trb_dma | DCS_ENABLE);
263 ep_context->trb_addr_hi = 0;
264
265 /* Ensure that updates to the EP Context will
266 * occure before Ring Bell.
267 */
268 wmb();
269
270 /* ring bell the ep */
271 if (ep->ep_num == 0)
272 tmp = 0x1;
273 else
274 tmp = ep->ep_num * 2
275 + ((direction == MV_U3D_EP_DIR_OUT) ? 0 : 1);
276
277 iowrite32(tmp, &u3d->op_regs->doorbell);
278 }
279 return retval;
280 }
281
282 static struct mv_u3d_trb *mv_u3d_build_trb_one(struct mv_u3d_req *req,
283 unsigned *length, dma_addr_t *dma)
284 {
285 u32 temp;
286 unsigned int direction;
287 struct mv_u3d_trb *trb;
288 struct mv_u3d_trb_hw *trb_hw;
289 struct mv_u3d *u3d;
290
291 /* how big will this transfer be? */
292 *length = req->req.length - req->req.actual;
293 BUG_ON(*length > (unsigned)MV_U3D_EP_MAX_LENGTH_TRANSFER);
294
295 u3d = req->ep->u3d;
296
297 trb = kzalloc(sizeof(*trb), GFP_ATOMIC);
298 if (!trb)
299 return NULL;
300
301 /*
302 * Be careful that no _GFP_HIGHMEM is set,
303 * or we can not use dma_to_virt
304 * cannot use GFP_KERNEL in spin lock
305 */
306 trb_hw = dma_pool_alloc(u3d->trb_pool, GFP_ATOMIC, dma);
307 if (!trb_hw) {
308 kfree(trb);
309 dev_err(u3d->dev,
310 "%s, dma_pool_alloc fail\n", __func__);
311 return NULL;
312 }
313 trb->trb_dma = *dma;
314 trb->trb_hw = trb_hw;
315
316 /* initialize buffer page pointers */
317 temp = (u32)(req->req.dma + req->req.actual);
318
319 trb_hw->buf_addr_lo = cpu_to_le32(temp);
320 trb_hw->buf_addr_hi = 0;
321 trb_hw->trb_len = cpu_to_le32(*length);
322 trb_hw->ctrl.own = 1;
323
324 if (req->ep->ep_num == 0)
325 trb_hw->ctrl.type = TYPE_DATA;
326 else
327 trb_hw->ctrl.type = TYPE_NORMAL;
328
329 req->req.actual += *length;
330
331 direction = mv_u3d_ep_dir(req->ep);
332 if (direction == MV_U3D_EP_DIR_IN)
333 trb_hw->ctrl.dir = 1;
334 else
335 trb_hw->ctrl.dir = 0;
336
337 /* Enable interrupt for the last trb of a request */
338 if (!req->req.no_interrupt)
339 trb_hw->ctrl.ioc = 1;
340
341 trb_hw->ctrl.chain = 0;
342
343 wmb();
344 return trb;
345 }
346
347 static int mv_u3d_build_trb_chain(struct mv_u3d_req *req, unsigned *length,
348 struct mv_u3d_trb *trb, int *is_last)
349 {
350 u32 temp;
351 unsigned int direction;
352 struct mv_u3d *u3d;
353
354 /* how big will this transfer be? */
355 *length = min(req->req.length - req->req.actual,
356 (unsigned)MV_U3D_EP_MAX_LENGTH_TRANSFER);
357
358 u3d = req->ep->u3d;
359
360 trb->trb_dma = 0;
361
362 /* initialize buffer page pointers */
363 temp = (u32)(req->req.dma + req->req.actual);
364
365 trb->trb_hw->buf_addr_lo = cpu_to_le32(temp);
366 trb->trb_hw->buf_addr_hi = 0;
367 trb->trb_hw->trb_len = cpu_to_le32(*length);
368 trb->trb_hw->ctrl.own = 1;
369
370 if (req->ep->ep_num == 0)
371 trb->trb_hw->ctrl.type = TYPE_DATA;
372 else
373 trb->trb_hw->ctrl.type = TYPE_NORMAL;
374
375 req->req.actual += *length;
376
377 direction = mv_u3d_ep_dir(req->ep);
378 if (direction == MV_U3D_EP_DIR_IN)
379 trb->trb_hw->ctrl.dir = 1;
380 else
381 trb->trb_hw->ctrl.dir = 0;
382
383 /* zlp is needed if req->req.zero is set */
384 if (req->req.zero) {
385 if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
386 *is_last = 1;
387 else
388 *is_last = 0;
389 } else if (req->req.length == req->req.actual)
390 *is_last = 1;
391 else
392 *is_last = 0;
393
394 /* Enable interrupt for the last trb of a request */
395 if (*is_last && !req->req.no_interrupt)
396 trb->trb_hw->ctrl.ioc = 1;
397
398 if (*is_last)
399 trb->trb_hw->ctrl.chain = 0;
400 else {
401 trb->trb_hw->ctrl.chain = 1;
402 dev_dbg(u3d->dev, "chain trb\n");
403 }
404
405 wmb();
406
407 return 0;
408 }
409
410 /* generate TRB linked list for a request
411 * usb controller only supports continous trb chain,
412 * that trb structure physical address should be continous.
413 */
414 static int mv_u3d_req_to_trb(struct mv_u3d_req *req)
415 {
416 unsigned count;
417 int is_last;
418 struct mv_u3d_trb *trb;
419 struct mv_u3d_trb_hw *trb_hw;
420 struct mv_u3d *u3d;
421 dma_addr_t dma;
422 unsigned length;
423 unsigned trb_num;
424
425 u3d = req->ep->u3d;
426
427 INIT_LIST_HEAD(&req->trb_list);
428
429 length = req->req.length - req->req.actual;
430 /* normally the request transfer length is less than 16KB.
431 * we use buil_trb_one() to optimize it.
432 */
433 if (length <= (unsigned)MV_U3D_EP_MAX_LENGTH_TRANSFER) {
434 trb = mv_u3d_build_trb_one(req, &count, &dma);
435 list_add_tail(&trb->trb_list, &req->trb_list);
436 req->trb_head = trb;
437 req->trb_count = 1;
438 req->chain = 0;
439 } else {
440 trb_num = length / MV_U3D_EP_MAX_LENGTH_TRANSFER;
441 if (length % MV_U3D_EP_MAX_LENGTH_TRANSFER)
442 trb_num++;
443
444 trb = kcalloc(trb_num, sizeof(*trb), GFP_ATOMIC);
445 if (!trb)
446 return -ENOMEM;
447
448 trb_hw = kcalloc(trb_num, sizeof(*trb_hw), GFP_ATOMIC);
449 if (!trb_hw) {
450 kfree(trb);
451 return -ENOMEM;
452 }
453
454 do {
455 trb->trb_hw = trb_hw;
456 if (mv_u3d_build_trb_chain(req, &count,
457 trb, &is_last)) {
458 dev_err(u3d->dev,
459 "%s, mv_u3d_build_trb_chain fail\n",
460 __func__);
461 return -EIO;
462 }
463
464 list_add_tail(&trb->trb_list, &req->trb_list);
465 req->trb_count++;
466 trb++;
467 trb_hw++;
468 } while (!is_last);
469
470 req->trb_head = list_entry(req->trb_list.next,
471 struct mv_u3d_trb, trb_list);
472 req->trb_head->trb_dma = dma_map_single(u3d->gadget.dev.parent,
473 req->trb_head->trb_hw,
474 trb_num * sizeof(*trb_hw),
475 DMA_BIDIRECTIONAL);
476
477 req->chain = 1;
478 }
479
480 return 0;
481 }
482
483 static int
484 mv_u3d_start_queue(struct mv_u3d_ep *ep)
485 {
486 struct mv_u3d *u3d = ep->u3d;
487 struct mv_u3d_req *req;
488 int ret;
489
490 if (!list_empty(&ep->req_list) && !ep->processing)
491 req = list_entry(ep->req_list.next, struct mv_u3d_req, list);
492 else
493 return 0;
494
495 ep->processing = 1;
496
497 /* set up dma mapping */
498 ret = usb_gadget_map_request(&u3d->gadget, &req->req,
499 mv_u3d_ep_dir(ep));
500 if (ret)
501 return ret;
502
503 req->req.status = -EINPROGRESS;
504 req->req.actual = 0;
505 req->trb_count = 0;
506
507 /* build trbs and push them to device queue */
508 if (!mv_u3d_req_to_trb(req)) {
509 ret = mv_u3d_queue_trb(ep, req);
510 if (ret) {
511 ep->processing = 0;
512 return ret;
513 }
514 } else {
515 ep->processing = 0;
516 dev_err(u3d->dev, "%s, mv_u3d_req_to_trb fail\n", __func__);
517 return -ENOMEM;
518 }
519
520 /* irq handler advances the queue */
521 if (req)
522 list_add_tail(&req->queue, &ep->queue);
523
524 return 0;
525 }
526
527 static int mv_u3d_ep_enable(struct usb_ep *_ep,
528 const struct usb_endpoint_descriptor *desc)
529 {
530 struct mv_u3d *u3d;
531 struct mv_u3d_ep *ep;
532 u16 max = 0;
533 unsigned maxburst = 0;
534 u32 epxcr, direction;
535
536 if (!_ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT)
537 return -EINVAL;
538
539 ep = container_of(_ep, struct mv_u3d_ep, ep);
540 u3d = ep->u3d;
541
542 if (!u3d->driver || u3d->gadget.speed == USB_SPEED_UNKNOWN)
543 return -ESHUTDOWN;
544
545 direction = mv_u3d_ep_dir(ep);
546 max = le16_to_cpu(desc->wMaxPacketSize);
547
548 if (!_ep->maxburst)
549 _ep->maxburst = 1;
550 maxburst = _ep->maxburst;
551
552 /* Set the max burst size */
553 switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
554 case USB_ENDPOINT_XFER_BULK:
555 if (maxburst > 16) {
556 dev_dbg(u3d->dev,
557 "max burst should not be greater "
558 "than 16 on bulk ep\n");
559 maxburst = 1;
560 _ep->maxburst = maxburst;
561 }
562 dev_dbg(u3d->dev,
563 "maxburst: %d on bulk %s\n", maxburst, ep->name);
564 break;
565 case USB_ENDPOINT_XFER_CONTROL:
566 /* control transfer only supports maxburst as one */
567 maxburst = 1;
568 _ep->maxburst = maxburst;
569 break;
570 case USB_ENDPOINT_XFER_INT:
571 if (maxburst != 1) {
572 dev_dbg(u3d->dev,
573 "max burst should be 1 on int ep "
574 "if transfer size is not 1024\n");
575 maxburst = 1;
576 _ep->maxburst = maxburst;
577 }
578 break;
579 case USB_ENDPOINT_XFER_ISOC:
580 if (maxburst != 1) {
581 dev_dbg(u3d->dev,
582 "max burst should be 1 on isoc ep "
583 "if transfer size is not 1024\n");
584 maxburst = 1;
585 _ep->maxburst = maxburst;
586 }
587 break;
588 default:
589 goto en_done;
590 }
591
592 ep->ep.maxpacket = max;
593 ep->ep.desc = desc;
594 ep->enabled = 1;
595
596 /* Enable the endpoint for Rx or Tx and set the endpoint type */
597 if (direction == MV_U3D_EP_DIR_OUT) {
598 epxcr = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
599 epxcr |= MV_U3D_EPXCR_EP_INIT;
600 iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
601 udelay(5);
602 epxcr &= ~MV_U3D_EPXCR_EP_INIT;
603 iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
604
605 epxcr = ((max << MV_U3D_EPXCR_MAX_PACKET_SIZE_SHIFT)
606 | ((maxburst - 1) << MV_U3D_EPXCR_MAX_BURST_SIZE_SHIFT)
607 | (1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT)
608 | (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK));
609 iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr1);
610 } else {
611 epxcr = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
612 epxcr |= MV_U3D_EPXCR_EP_INIT;
613 iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
614 udelay(5);
615 epxcr &= ~MV_U3D_EPXCR_EP_INIT;
616 iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
617
618 epxcr = ((max << MV_U3D_EPXCR_MAX_PACKET_SIZE_SHIFT)
619 | ((maxburst - 1) << MV_U3D_EPXCR_MAX_BURST_SIZE_SHIFT)
620 | (1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT)
621 | (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK));
622 iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr1);
623 }
624
625 return 0;
626 en_done:
627 return -EINVAL;
628 }
629
630 static int mv_u3d_ep_disable(struct usb_ep *_ep)
631 {
632 struct mv_u3d *u3d;
633 struct mv_u3d_ep *ep;
634 u32 epxcr, direction;
635 unsigned long flags;
636
637 if (!_ep)
638 return -EINVAL;
639
640 ep = container_of(_ep, struct mv_u3d_ep, ep);
641 if (!ep->ep.desc)
642 return -EINVAL;
643
644 u3d = ep->u3d;
645
646 direction = mv_u3d_ep_dir(ep);
647
648 /* nuke all pending requests (does flush) */
649 spin_lock_irqsave(&u3d->lock, flags);
650 mv_u3d_nuke(ep, -ESHUTDOWN);
651 spin_unlock_irqrestore(&u3d->lock, flags);
652
653 /* Disable the endpoint for Rx or Tx and reset the endpoint type */
654 if (direction == MV_U3D_EP_DIR_OUT) {
655 epxcr = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr1);
656 epxcr &= ~((1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT)
657 | USB_ENDPOINT_XFERTYPE_MASK);
658 iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr1);
659 } else {
660 epxcr = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr1);
661 epxcr &= ~((1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT)
662 | USB_ENDPOINT_XFERTYPE_MASK);
663 iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr1);
664 }
665
666 ep->enabled = 0;
667
668 ep->ep.desc = NULL;
669 return 0;
670 }
671
672 static struct usb_request *
673 mv_u3d_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
674 {
675 struct mv_u3d_req *req = NULL;
676
677 req = kzalloc(sizeof *req, gfp_flags);
678 if (!req)
679 return NULL;
680
681 INIT_LIST_HEAD(&req->queue);
682
683 return &req->req;
684 }
685
686 static void mv_u3d_free_request(struct usb_ep *_ep, struct usb_request *_req)
687 {
688 struct mv_u3d_req *req = container_of(_req, struct mv_u3d_req, req);
689
690 kfree(req);
691 }
692
693 static void mv_u3d_ep_fifo_flush(struct usb_ep *_ep)
694 {
695 struct mv_u3d *u3d;
696 u32 direction;
697 struct mv_u3d_ep *ep = container_of(_ep, struct mv_u3d_ep, ep);
698 unsigned int loops;
699 u32 tmp;
700
701 /* if endpoint is not enabled, cannot flush endpoint */
702 if (!ep->enabled)
703 return;
704
705 u3d = ep->u3d;
706 direction = mv_u3d_ep_dir(ep);
707
708 /* ep0 need clear bit after flushing fifo. */
709 if (!ep->ep_num) {
710 if (direction == MV_U3D_EP_DIR_OUT) {
711 tmp = ioread32(&u3d->vuc_regs->epcr[0].epxoutcr0);
712 tmp |= MV_U3D_EPXCR_EP_FLUSH;
713 iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxoutcr0);
714 udelay(10);
715 tmp &= ~MV_U3D_EPXCR_EP_FLUSH;
716 iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxoutcr0);
717 } else {
718 tmp = ioread32(&u3d->vuc_regs->epcr[0].epxincr0);
719 tmp |= MV_U3D_EPXCR_EP_FLUSH;
720 iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxincr0);
721 udelay(10);
722 tmp &= ~MV_U3D_EPXCR_EP_FLUSH;
723 iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxincr0);
724 }
725 return;
726 }
727
728 if (direction == MV_U3D_EP_DIR_OUT) {
729 tmp = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
730 tmp |= MV_U3D_EPXCR_EP_FLUSH;
731 iowrite32(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
732
733 /* Wait until flushing completed */
734 loops = LOOPS(MV_U3D_FLUSH_TIMEOUT);
735 while (ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0) &
736 MV_U3D_EPXCR_EP_FLUSH) {
737 /*
738 * EP_FLUSH bit should be cleared to indicate this
739 * operation is complete
740 */
741 if (loops == 0) {
742 dev_dbg(u3d->dev,
743 "EP FLUSH TIMEOUT for ep%d%s\n", ep->ep_num,
744 direction ? "in" : "out");
745 return;
746 }
747 loops--;
748 udelay(LOOPS_USEC);
749 }
750 } else { /* EP_DIR_IN */
751 tmp = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
752 tmp |= MV_U3D_EPXCR_EP_FLUSH;
753 iowrite32(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
754
755 /* Wait until flushing completed */
756 loops = LOOPS(MV_U3D_FLUSH_TIMEOUT);
757 while (ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0) &
758 MV_U3D_EPXCR_EP_FLUSH) {
759 /*
760 * EP_FLUSH bit should be cleared to indicate this
761 * operation is complete
762 */
763 if (loops == 0) {
764 dev_dbg(u3d->dev,
765 "EP FLUSH TIMEOUT for ep%d%s\n", ep->ep_num,
766 direction ? "in" : "out");
767 return;
768 }
769 loops--;
770 udelay(LOOPS_USEC);
771 }
772 }
773 }
774
775 /* queues (submits) an I/O request to an endpoint */
776 static int
777 mv_u3d_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
778 {
779 struct mv_u3d_ep *ep;
780 struct mv_u3d_req *req;
781 struct mv_u3d *u3d;
782 unsigned long flags;
783 int is_first_req = 0;
784
785 if (unlikely(!_ep || !_req))
786 return -EINVAL;
787
788 ep = container_of(_ep, struct mv_u3d_ep, ep);
789 u3d = ep->u3d;
790
791 req = container_of(_req, struct mv_u3d_req, req);
792
793 if (!ep->ep_num
794 && u3d->ep0_state == MV_U3D_STATUS_STAGE
795 && !_req->length) {
796 dev_dbg(u3d->dev, "ep0 status stage\n");
797 u3d->ep0_state = MV_U3D_WAIT_FOR_SETUP;
798 return 0;
799 }
800
801 dev_dbg(u3d->dev, "%s: %s, req: 0x%p\n",
802 __func__, _ep->name, req);
803
804 /* catch various bogus parameters */
805 if (!req->req.complete || !req->req.buf
806 || !list_empty(&req->queue)) {
807 dev_err(u3d->dev,
808 "%s, bad params, _req: 0x%p,"
809 "req->req.complete: 0x%p, req->req.buf: 0x%p,"
810 "list_empty: 0x%x\n",
811 __func__, _req,
812 req->req.complete, req->req.buf,
813 list_empty(&req->queue));
814 return -EINVAL;
815 }
816 if (unlikely(!ep->ep.desc)) {
817 dev_err(u3d->dev, "%s, bad ep\n", __func__);
818 return -EINVAL;
819 }
820 if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
821 if (req->req.length > ep->ep.maxpacket)
822 return -EMSGSIZE;
823 }
824
825 if (!u3d->driver || u3d->gadget.speed == USB_SPEED_UNKNOWN) {
826 dev_err(u3d->dev,
827 "bad params of driver/speed\n");
828 return -ESHUTDOWN;
829 }
830
831 req->ep = ep;
832
833 /* Software list handles usb request. */
834 spin_lock_irqsave(&ep->req_lock, flags);
835 is_first_req = list_empty(&ep->req_list);
836 list_add_tail(&req->list, &ep->req_list);
837 spin_unlock_irqrestore(&ep->req_lock, flags);
838 if (!is_first_req) {
839 dev_dbg(u3d->dev, "list is not empty\n");
840 return 0;
841 }
842
843 dev_dbg(u3d->dev, "call mv_u3d_start_queue from usb_ep_queue\n");
844 spin_lock_irqsave(&u3d->lock, flags);
845 mv_u3d_start_queue(ep);
846 spin_unlock_irqrestore(&u3d->lock, flags);
847 return 0;
848 }
849
850 /* dequeues (cancels, unlinks) an I/O request from an endpoint */
851 static int mv_u3d_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
852 {
853 struct mv_u3d_ep *ep;
854 struct mv_u3d_req *req;
855 struct mv_u3d *u3d;
856 struct mv_u3d_ep_context *ep_context;
857 struct mv_u3d_req *next_req;
858
859 unsigned long flags;
860 int ret = 0;
861
862 if (!_ep || !_req)
863 return -EINVAL;
864
865 ep = container_of(_ep, struct mv_u3d_ep, ep);
866 u3d = ep->u3d;
867
868 spin_lock_irqsave(&ep->u3d->lock, flags);
869
870 /* make sure it's actually queued on this endpoint */
871 list_for_each_entry(req, &ep->queue, queue) {
872 if (&req->req == _req)
873 break;
874 }
875 if (&req->req != _req) {
876 ret = -EINVAL;
877 goto out;
878 }
879
880 /* The request is in progress, or completed but not dequeued */
881 if (ep->queue.next == &req->queue) {
882 _req->status = -ECONNRESET;
883 mv_u3d_ep_fifo_flush(_ep);
884
885 /* The request isn't the last request in this ep queue */
886 if (req->queue.next != &ep->queue) {
887 dev_dbg(u3d->dev,
888 "it is the last request in this ep queue\n");
889 ep_context = ep->ep_context;
890 next_req = list_entry(req->queue.next,
891 struct mv_u3d_req, queue);
892
893 /* Point first TRB of next request to the EP context. */
894 iowrite32((unsigned long) next_req->trb_head,
895 &ep_context->trb_addr_lo);
896 } else {
897 struct mv_u3d_ep_context *ep_context;
898 ep_context = ep->ep_context;
899 ep_context->trb_addr_lo = 0;
900 ep_context->trb_addr_hi = 0;
901 }
902
903 } else
904 WARN_ON(1);
905
906 mv_u3d_done(ep, req, -ECONNRESET);
907
908 /* remove the req from the ep req list */
909 if (!list_empty(&ep->req_list)) {
910 struct mv_u3d_req *curr_req;
911 curr_req = list_entry(ep->req_list.next,
912 struct mv_u3d_req, list);
913 if (curr_req == req) {
914 list_del_init(&req->list);
915 ep->processing = 0;
916 }
917 }
918
919 out:
920 spin_unlock_irqrestore(&ep->u3d->lock, flags);
921 return ret;
922 }
923
924 static void
925 mv_u3d_ep_set_stall(struct mv_u3d *u3d, u8 ep_num, u8 direction, int stall)
926 {
927 u32 tmp;
928 struct mv_u3d_ep *ep = u3d->eps;
929
930 dev_dbg(u3d->dev, "%s\n", __func__);
931 if (direction == MV_U3D_EP_DIR_OUT) {
932 tmp = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
933 if (stall)
934 tmp |= MV_U3D_EPXCR_EP_HALT;
935 else
936 tmp &= ~MV_U3D_EPXCR_EP_HALT;
937 iowrite32(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
938 } else {
939 tmp = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
940 if (stall)
941 tmp |= MV_U3D_EPXCR_EP_HALT;
942 else
943 tmp &= ~MV_U3D_EPXCR_EP_HALT;
944 iowrite32(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
945 }
946 }
947
948 static int mv_u3d_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
949 {
950 struct mv_u3d_ep *ep;
951 unsigned long flags = 0;
952 int status = 0;
953 struct mv_u3d *u3d;
954
955 ep = container_of(_ep, struct mv_u3d_ep, ep);
956 u3d = ep->u3d;
957 if (!ep->ep.desc) {
958 status = -EINVAL;
959 goto out;
960 }
961
962 if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
963 status = -EOPNOTSUPP;
964 goto out;
965 }
966
967 /*
968 * Attempt to halt IN ep will fail if any transfer requests
969 * are still queue
970 */
971 if (halt && (mv_u3d_ep_dir(ep) == MV_U3D_EP_DIR_IN)
972 && !list_empty(&ep->queue)) {
973 status = -EAGAIN;
974 goto out;
975 }
976
977 spin_lock_irqsave(&ep->u3d->lock, flags);
978 mv_u3d_ep_set_stall(u3d, ep->ep_num, mv_u3d_ep_dir(ep), halt);
979 if (halt && wedge)
980 ep->wedge = 1;
981 else if (!halt)
982 ep->wedge = 0;
983 spin_unlock_irqrestore(&ep->u3d->lock, flags);
984
985 if (ep->ep_num == 0)
986 u3d->ep0_dir = MV_U3D_EP_DIR_OUT;
987 out:
988 return status;
989 }
990
991 static int mv_u3d_ep_set_halt(struct usb_ep *_ep, int halt)
992 {
993 return mv_u3d_ep_set_halt_wedge(_ep, halt, 0);
994 }
995
996 static int mv_u3d_ep_set_wedge(struct usb_ep *_ep)
997 {
998 return mv_u3d_ep_set_halt_wedge(_ep, 1, 1);
999 }
1000
1001 static struct usb_ep_ops mv_u3d_ep_ops = {
1002 .enable = mv_u3d_ep_enable,
1003 .disable = mv_u3d_ep_disable,
1004
1005 .alloc_request = mv_u3d_alloc_request,
1006 .free_request = mv_u3d_free_request,
1007
1008 .queue = mv_u3d_ep_queue,
1009 .dequeue = mv_u3d_ep_dequeue,
1010
1011 .set_wedge = mv_u3d_ep_set_wedge,
1012 .set_halt = mv_u3d_ep_set_halt,
1013 .fifo_flush = mv_u3d_ep_fifo_flush,
1014 };
1015
1016 static void mv_u3d_controller_stop(struct mv_u3d *u3d)
1017 {
1018 u32 tmp;
1019
1020 if (!u3d->clock_gating && u3d->vbus_valid_detect)
1021 iowrite32(MV_U3D_INTR_ENABLE_VBUS_VALID,
1022 &u3d->vuc_regs->intrenable);
1023 else
1024 iowrite32(0, &u3d->vuc_regs->intrenable);
1025 iowrite32(~0x0, &u3d->vuc_regs->endcomplete);
1026 iowrite32(~0x0, &u3d->vuc_regs->trbunderrun);
1027 iowrite32(~0x0, &u3d->vuc_regs->trbcomplete);
1028 iowrite32(~0x0, &u3d->vuc_regs->linkchange);
1029 iowrite32(0x1, &u3d->vuc_regs->setuplock);
1030
1031 /* Reset the RUN bit in the command register to stop USB */
1032 tmp = ioread32(&u3d->op_regs->usbcmd);
1033 tmp &= ~MV_U3D_CMD_RUN_STOP;
1034 iowrite32(tmp, &u3d->op_regs->usbcmd);
1035 dev_dbg(u3d->dev, "after u3d_stop, USBCMD 0x%x\n",
1036 ioread32(&u3d->op_regs->usbcmd));
1037 }
1038
1039 static void mv_u3d_controller_start(struct mv_u3d *u3d)
1040 {
1041 u32 usbintr;
1042 u32 temp;
1043
1044 /* enable link LTSSM state machine */
1045 temp = ioread32(&u3d->vuc_regs->ltssm);
1046 temp |= MV_U3D_LTSSM_PHY_INIT_DONE;
1047 iowrite32(temp, &u3d->vuc_regs->ltssm);
1048
1049 /* Enable interrupts */
1050 usbintr = MV_U3D_INTR_ENABLE_LINK_CHG | MV_U3D_INTR_ENABLE_TXDESC_ERR |
1051 MV_U3D_INTR_ENABLE_RXDESC_ERR | MV_U3D_INTR_ENABLE_TX_COMPLETE |
1052 MV_U3D_INTR_ENABLE_RX_COMPLETE | MV_U3D_INTR_ENABLE_SETUP |
1053 (u3d->vbus_valid_detect ? MV_U3D_INTR_ENABLE_VBUS_VALID : 0);
1054 iowrite32(usbintr, &u3d->vuc_regs->intrenable);
1055
1056 /* Enable ctrl ep */
1057 iowrite32(0x1, &u3d->vuc_regs->ctrlepenable);
1058
1059 /* Set the Run bit in the command register */
1060 iowrite32(MV_U3D_CMD_RUN_STOP, &u3d->op_regs->usbcmd);
1061 dev_dbg(u3d->dev, "after u3d_start, USBCMD 0x%x\n",
1062 ioread32(&u3d->op_regs->usbcmd));
1063 }
1064
1065 static int mv_u3d_controller_reset(struct mv_u3d *u3d)
1066 {
1067 unsigned int loops;
1068 u32 tmp;
1069
1070 /* Stop the controller */
1071 tmp = ioread32(&u3d->op_regs->usbcmd);
1072 tmp &= ~MV_U3D_CMD_RUN_STOP;
1073 iowrite32(tmp, &u3d->op_regs->usbcmd);
1074
1075 /* Reset the controller to get default values */
1076 iowrite32(MV_U3D_CMD_CTRL_RESET, &u3d->op_regs->usbcmd);
1077
1078 /* wait for reset to complete */
1079 loops = LOOPS(MV_U3D_RESET_TIMEOUT);
1080 while (ioread32(&u3d->op_regs->usbcmd) & MV_U3D_CMD_CTRL_RESET) {
1081 if (loops == 0) {
1082 dev_err(u3d->dev,
1083 "Wait for RESET completed TIMEOUT\n");
1084 return -ETIMEDOUT;
1085 }
1086 loops--;
1087 udelay(LOOPS_USEC);
1088 }
1089
1090 /* Configure the Endpoint Context Address */
1091 iowrite32(u3d->ep_context_dma, &u3d->op_regs->dcbaapl);
1092 iowrite32(0, &u3d->op_regs->dcbaaph);
1093
1094 return 0;
1095 }
1096
1097 static int mv_u3d_enable(struct mv_u3d *u3d)
1098 {
1099 struct mv_usb_platform_data *pdata = dev_get_platdata(u3d->dev);
1100 int retval;
1101
1102 if (u3d->active)
1103 return 0;
1104
1105 if (!u3d->clock_gating) {
1106 u3d->active = 1;
1107 return 0;
1108 }
1109
1110 dev_dbg(u3d->dev, "enable u3d\n");
1111 clk_enable(u3d->clk);
1112 if (pdata->phy_init) {
1113 retval = pdata->phy_init(u3d->phy_regs);
1114 if (retval) {
1115 dev_err(u3d->dev,
1116 "init phy error %d\n", retval);
1117 clk_disable(u3d->clk);
1118 return retval;
1119 }
1120 }
1121 u3d->active = 1;
1122
1123 return 0;
1124 }
1125
1126 static void mv_u3d_disable(struct mv_u3d *u3d)
1127 {
1128 struct mv_usb_platform_data *pdata = dev_get_platdata(u3d->dev);
1129 if (u3d->clock_gating && u3d->active) {
1130 dev_dbg(u3d->dev, "disable u3d\n");
1131 if (pdata->phy_deinit)
1132 pdata->phy_deinit(u3d->phy_regs);
1133 clk_disable(u3d->clk);
1134 u3d->active = 0;
1135 }
1136 }
1137
1138 static int mv_u3d_vbus_session(struct usb_gadget *gadget, int is_active)
1139 {
1140 struct mv_u3d *u3d;
1141 unsigned long flags;
1142 int retval = 0;
1143
1144 u3d = container_of(gadget, struct mv_u3d, gadget);
1145
1146 spin_lock_irqsave(&u3d->lock, flags);
1147
1148 u3d->vbus_active = (is_active != 0);
1149 dev_dbg(u3d->dev, "%s: softconnect %d, vbus_active %d\n",
1150 __func__, u3d->softconnect, u3d->vbus_active);
1151 /*
1152 * 1. external VBUS detect: we can disable/enable clock on demand.
1153 * 2. UDC VBUS detect: we have to enable clock all the time.
1154 * 3. No VBUS detect: we have to enable clock all the time.
1155 */
1156 if (u3d->driver && u3d->softconnect && u3d->vbus_active) {
1157 retval = mv_u3d_enable(u3d);
1158 if (retval == 0) {
1159 /*
1160 * after clock is disabled, we lost all the register
1161 * context. We have to re-init registers
1162 */
1163 mv_u3d_controller_reset(u3d);
1164 mv_u3d_ep0_reset(u3d);
1165 mv_u3d_controller_start(u3d);
1166 }
1167 } else if (u3d->driver && u3d->softconnect) {
1168 if (!u3d->active)
1169 goto out;
1170
1171 /* stop all the transfer in queue*/
1172 mv_u3d_stop_activity(u3d, u3d->driver);
1173 mv_u3d_controller_stop(u3d);
1174 mv_u3d_disable(u3d);
1175 }
1176
1177 out:
1178 spin_unlock_irqrestore(&u3d->lock, flags);
1179 return retval;
1180 }
1181
1182 /* constrain controller's VBUS power usage
1183 * This call is used by gadget drivers during SET_CONFIGURATION calls,
1184 * reporting how much power the device may consume. For example, this
1185 * could affect how quickly batteries are recharged.
1186 *
1187 * Returns zero on success, else negative errno.
1188 */
1189 static int mv_u3d_vbus_draw(struct usb_gadget *gadget, unsigned mA)
1190 {
1191 struct mv_u3d *u3d = container_of(gadget, struct mv_u3d, gadget);
1192
1193 u3d->power = mA;
1194
1195 return 0;
1196 }
1197
1198 static int mv_u3d_pullup(struct usb_gadget *gadget, int is_on)
1199 {
1200 struct mv_u3d *u3d = container_of(gadget, struct mv_u3d, gadget);
1201 unsigned long flags;
1202 int retval = 0;
1203
1204 spin_lock_irqsave(&u3d->lock, flags);
1205
1206 dev_dbg(u3d->dev, "%s: softconnect %d, vbus_active %d\n",
1207 __func__, u3d->softconnect, u3d->vbus_active);
1208 u3d->softconnect = (is_on != 0);
1209 if (u3d->driver && u3d->softconnect && u3d->vbus_active) {
1210 retval = mv_u3d_enable(u3d);
1211 if (retval == 0) {
1212 /*
1213 * after clock is disabled, we lost all the register
1214 * context. We have to re-init registers
1215 */
1216 mv_u3d_controller_reset(u3d);
1217 mv_u3d_ep0_reset(u3d);
1218 mv_u3d_controller_start(u3d);
1219 }
1220 } else if (u3d->driver && u3d->vbus_active) {
1221 /* stop all the transfer in queue*/
1222 mv_u3d_stop_activity(u3d, u3d->driver);
1223 mv_u3d_controller_stop(u3d);
1224 mv_u3d_disable(u3d);
1225 }
1226
1227 spin_unlock_irqrestore(&u3d->lock, flags);
1228
1229 return retval;
1230 }
1231
1232 static int mv_u3d_start(struct usb_gadget *g,
1233 struct usb_gadget_driver *driver)
1234 {
1235 struct mv_u3d *u3d = container_of(g, struct mv_u3d, gadget);
1236 struct mv_usb_platform_data *pdata = dev_get_platdata(u3d->dev);
1237 unsigned long flags;
1238
1239 if (u3d->driver)
1240 return -EBUSY;
1241
1242 spin_lock_irqsave(&u3d->lock, flags);
1243
1244 if (!u3d->clock_gating) {
1245 clk_enable(u3d->clk);
1246 if (pdata->phy_init)
1247 pdata->phy_init(u3d->phy_regs);
1248 }
1249
1250 /* hook up the driver ... */
1251 driver->driver.bus = NULL;
1252 u3d->driver = driver;
1253
1254 u3d->ep0_dir = USB_DIR_OUT;
1255
1256 spin_unlock_irqrestore(&u3d->lock, flags);
1257
1258 u3d->vbus_valid_detect = 1;
1259
1260 return 0;
1261 }
1262
1263 static int mv_u3d_stop(struct usb_gadget *g)
1264 {
1265 struct mv_u3d *u3d = container_of(g, struct mv_u3d, gadget);
1266 struct mv_usb_platform_data *pdata = dev_get_platdata(u3d->dev);
1267 unsigned long flags;
1268
1269 u3d->vbus_valid_detect = 0;
1270 spin_lock_irqsave(&u3d->lock, flags);
1271
1272 /* enable clock to access controller register */
1273 clk_enable(u3d->clk);
1274 if (pdata->phy_init)
1275 pdata->phy_init(u3d->phy_regs);
1276
1277 mv_u3d_controller_stop(u3d);
1278 /* stop all usb activities */
1279 u3d->gadget.speed = USB_SPEED_UNKNOWN;
1280 mv_u3d_stop_activity(u3d, NULL);
1281 mv_u3d_disable(u3d);
1282
1283 if (pdata->phy_deinit)
1284 pdata->phy_deinit(u3d->phy_regs);
1285 clk_disable(u3d->clk);
1286
1287 spin_unlock_irqrestore(&u3d->lock, flags);
1288
1289 u3d->driver = NULL;
1290
1291 return 0;
1292 }
1293
1294 /* device controller usb_gadget_ops structure */
1295 static const struct usb_gadget_ops mv_u3d_ops = {
1296 /* notify controller that VBUS is powered or not */
1297 .vbus_session = mv_u3d_vbus_session,
1298
1299 /* constrain controller's VBUS power usage */
1300 .vbus_draw = mv_u3d_vbus_draw,
1301
1302 .pullup = mv_u3d_pullup,
1303 .udc_start = mv_u3d_start,
1304 .udc_stop = mv_u3d_stop,
1305 };
1306
1307 static int mv_u3d_eps_init(struct mv_u3d *u3d)
1308 {
1309 struct mv_u3d_ep *ep;
1310 char name[14];
1311 int i;
1312
1313 /* initialize ep0, ep0 in/out use eps[1] */
1314 ep = &u3d->eps[1];
1315 ep->u3d = u3d;
1316 strncpy(ep->name, "ep0", sizeof(ep->name));
1317 ep->ep.name = ep->name;
1318 ep->ep.ops = &mv_u3d_ep_ops;
1319 ep->wedge = 0;
1320 usb_ep_set_maxpacket_limit(&ep->ep, MV_U3D_EP0_MAX_PKT_SIZE);
1321 ep->ep.caps.type_control = true;
1322 ep->ep.caps.dir_in = true;
1323 ep->ep.caps.dir_out = true;
1324 ep->ep_num = 0;
1325 ep->ep.desc = &mv_u3d_ep0_desc;
1326 INIT_LIST_HEAD(&ep->queue);
1327 INIT_LIST_HEAD(&ep->req_list);
1328 ep->ep_type = USB_ENDPOINT_XFER_CONTROL;
1329
1330 /* add ep0 ep_context */
1331 ep->ep_context = &u3d->ep_context[1];
1332
1333 /* initialize other endpoints */
1334 for (i = 2; i < u3d->max_eps * 2; i++) {
1335 ep = &u3d->eps[i];
1336 if (i & 1) {
1337 snprintf(name, sizeof(name), "ep%din", i >> 1);
1338 ep->direction = MV_U3D_EP_DIR_IN;
1339 ep->ep.caps.dir_in = true;
1340 } else {
1341 snprintf(name, sizeof(name), "ep%dout", i >> 1);
1342 ep->direction = MV_U3D_EP_DIR_OUT;
1343 ep->ep.caps.dir_out = true;
1344 }
1345 ep->u3d = u3d;
1346 strncpy(ep->name, name, sizeof(ep->name));
1347 ep->ep.name = ep->name;
1348
1349 ep->ep.caps.type_iso = true;
1350 ep->ep.caps.type_bulk = true;
1351 ep->ep.caps.type_int = true;
1352
1353 ep->ep.ops = &mv_u3d_ep_ops;
1354 usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0);
1355 ep->ep_num = i / 2;
1356
1357 INIT_LIST_HEAD(&ep->queue);
1358 list_add_tail(&ep->ep.ep_list, &u3d->gadget.ep_list);
1359
1360 INIT_LIST_HEAD(&ep->req_list);
1361 spin_lock_init(&ep->req_lock);
1362 ep->ep_context = &u3d->ep_context[i];
1363 }
1364
1365 return 0;
1366 }
1367
1368 /* delete all endpoint requests, called with spinlock held */
1369 static void mv_u3d_nuke(struct mv_u3d_ep *ep, int status)
1370 {
1371 /* endpoint fifo flush */
1372 mv_u3d_ep_fifo_flush(&ep->ep);
1373
1374 while (!list_empty(&ep->queue)) {
1375 struct mv_u3d_req *req = NULL;
1376 req = list_entry(ep->queue.next, struct mv_u3d_req, queue);
1377 mv_u3d_done(ep, req, status);
1378 }
1379 }
1380
1381 /* stop all USB activities */
1382 static
1383 void mv_u3d_stop_activity(struct mv_u3d *u3d, struct usb_gadget_driver *driver)
1384 {
1385 struct mv_u3d_ep *ep;
1386
1387 mv_u3d_nuke(&u3d->eps[1], -ESHUTDOWN);
1388
1389 list_for_each_entry(ep, &u3d->gadget.ep_list, ep.ep_list) {
1390 mv_u3d_nuke(ep, -ESHUTDOWN);
1391 }
1392
1393 /* report disconnect; the driver is already quiesced */
1394 if (driver) {
1395 spin_unlock(&u3d->lock);
1396 driver->disconnect(&u3d->gadget);
1397 spin_lock(&u3d->lock);
1398 }
1399 }
1400
1401 static void mv_u3d_irq_process_error(struct mv_u3d *u3d)
1402 {
1403 /* Increment the error count */
1404 u3d->errors++;
1405 dev_err(u3d->dev, "%s\n", __func__);
1406 }
1407
1408 static void mv_u3d_irq_process_link_change(struct mv_u3d *u3d)
1409 {
1410 u32 linkchange;
1411
1412 linkchange = ioread32(&u3d->vuc_regs->linkchange);
1413 iowrite32(linkchange, &u3d->vuc_regs->linkchange);
1414
1415 dev_dbg(u3d->dev, "linkchange: 0x%x\n", linkchange);
1416
1417 if (linkchange & MV_U3D_LINK_CHANGE_LINK_UP) {
1418 dev_dbg(u3d->dev, "link up: ltssm state: 0x%x\n",
1419 ioread32(&u3d->vuc_regs->ltssmstate));
1420
1421 u3d->usb_state = USB_STATE_DEFAULT;
1422 u3d->ep0_dir = MV_U3D_EP_DIR_OUT;
1423 u3d->ep0_state = MV_U3D_WAIT_FOR_SETUP;
1424
1425 /* set speed */
1426 u3d->gadget.speed = USB_SPEED_SUPER;
1427 }
1428
1429 if (linkchange & MV_U3D_LINK_CHANGE_SUSPEND) {
1430 dev_dbg(u3d->dev, "link suspend\n");
1431 u3d->resume_state = u3d->usb_state;
1432 u3d->usb_state = USB_STATE_SUSPENDED;
1433 }
1434
1435 if (linkchange & MV_U3D_LINK_CHANGE_RESUME) {
1436 dev_dbg(u3d->dev, "link resume\n");
1437 u3d->usb_state = u3d->resume_state;
1438 u3d->resume_state = 0;
1439 }
1440
1441 if (linkchange & MV_U3D_LINK_CHANGE_WRESET) {
1442 dev_dbg(u3d->dev, "warm reset\n");
1443 u3d->usb_state = USB_STATE_POWERED;
1444 }
1445
1446 if (linkchange & MV_U3D_LINK_CHANGE_HRESET) {
1447 dev_dbg(u3d->dev, "hot reset\n");
1448 u3d->usb_state = USB_STATE_DEFAULT;
1449 }
1450
1451 if (linkchange & MV_U3D_LINK_CHANGE_INACT)
1452 dev_dbg(u3d->dev, "inactive\n");
1453
1454 if (linkchange & MV_U3D_LINK_CHANGE_DISABLE_AFTER_U0)
1455 dev_dbg(u3d->dev, "ss.disabled\n");
1456
1457 if (linkchange & MV_U3D_LINK_CHANGE_VBUS_INVALID) {
1458 dev_dbg(u3d->dev, "vbus invalid\n");
1459 u3d->usb_state = USB_STATE_ATTACHED;
1460 u3d->vbus_valid_detect = 1;
1461 /* if external vbus detect is not supported,
1462 * we handle it here.
1463 */
1464 if (!u3d->vbus) {
1465 spin_unlock(&u3d->lock);
1466 mv_u3d_vbus_session(&u3d->gadget, 0);
1467 spin_lock(&u3d->lock);
1468 }
1469 }
1470 }
1471
1472 static void mv_u3d_ch9setaddress(struct mv_u3d *u3d,
1473 struct usb_ctrlrequest *setup)
1474 {
1475 u32 tmp;
1476
1477 if (u3d->usb_state != USB_STATE_DEFAULT) {
1478 dev_err(u3d->dev,
1479 "%s, cannot setaddr in this state (%d)\n",
1480 __func__, u3d->usb_state);
1481 goto err;
1482 }
1483
1484 u3d->dev_addr = (u8)setup->wValue;
1485
1486 dev_dbg(u3d->dev, "%s: 0x%x\n", __func__, u3d->dev_addr);
1487
1488 if (u3d->dev_addr > 127) {
1489 dev_err(u3d->dev,
1490 "%s, u3d address is wrong (out of range)\n", __func__);
1491 u3d->dev_addr = 0;
1492 goto err;
1493 }
1494
1495 /* update usb state */
1496 u3d->usb_state = USB_STATE_ADDRESS;
1497
1498 /* set the new address */
1499 tmp = ioread32(&u3d->vuc_regs->devaddrtiebrkr);
1500 tmp &= ~0x7F;
1501 tmp |= (u32)u3d->dev_addr;
1502 iowrite32(tmp, &u3d->vuc_regs->devaddrtiebrkr);
1503
1504 return;
1505 err:
1506 mv_u3d_ep0_stall(u3d);
1507 }
1508
1509 static int mv_u3d_is_set_configuration(struct usb_ctrlrequest *setup)
1510 {
1511 if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
1512 if (setup->bRequest == USB_REQ_SET_CONFIGURATION)
1513 return 1;
1514
1515 return 0;
1516 }
1517
1518 static void mv_u3d_handle_setup_packet(struct mv_u3d *u3d, u8 ep_num,
1519 struct usb_ctrlrequest *setup)
1520 __releases(&u3c->lock)
1521 __acquires(&u3c->lock)
1522 {
1523 bool delegate = false;
1524
1525 mv_u3d_nuke(&u3d->eps[ep_num * 2 + MV_U3D_EP_DIR_IN], -ESHUTDOWN);
1526
1527 dev_dbg(u3d->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
1528 setup->bRequestType, setup->bRequest,
1529 setup->wValue, setup->wIndex, setup->wLength);
1530
1531 /* We process some stardard setup requests here */
1532 if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1533 switch (setup->bRequest) {
1534 case USB_REQ_GET_STATUS:
1535 delegate = true;
1536 break;
1537
1538 case USB_REQ_SET_ADDRESS:
1539 mv_u3d_ch9setaddress(u3d, setup);
1540 break;
1541
1542 case USB_REQ_CLEAR_FEATURE:
1543 delegate = true;
1544 break;
1545
1546 case USB_REQ_SET_FEATURE:
1547 delegate = true;
1548 break;
1549
1550 default:
1551 delegate = true;
1552 }
1553 } else
1554 delegate = true;
1555
1556 /* delegate USB standard requests to the gadget driver */
1557 if (delegate == true) {
1558 /* USB requests handled by gadget */
1559 if (setup->wLength) {
1560 /* DATA phase from gadget, STATUS phase from u3d */
1561 u3d->ep0_dir = (setup->bRequestType & USB_DIR_IN)
1562 ? MV_U3D_EP_DIR_IN : MV_U3D_EP_DIR_OUT;
1563 spin_unlock(&u3d->lock);
1564 if (u3d->driver->setup(&u3d->gadget,
1565 &u3d->local_setup_buff) < 0) {
1566 dev_err(u3d->dev, "setup error!\n");
1567 mv_u3d_ep0_stall(u3d);
1568 }
1569 spin_lock(&u3d->lock);
1570 } else {
1571 /* no DATA phase, STATUS phase from gadget */
1572 u3d->ep0_dir = MV_U3D_EP_DIR_IN;
1573 u3d->ep0_state = MV_U3D_STATUS_STAGE;
1574 spin_unlock(&u3d->lock);
1575 if (u3d->driver->setup(&u3d->gadget,
1576 &u3d->local_setup_buff) < 0)
1577 mv_u3d_ep0_stall(u3d);
1578 spin_lock(&u3d->lock);
1579 }
1580
1581 if (mv_u3d_is_set_configuration(setup)) {
1582 dev_dbg(u3d->dev, "u3d configured\n");
1583 u3d->usb_state = USB_STATE_CONFIGURED;
1584 }
1585 }
1586 }
1587
1588 static void mv_u3d_get_setup_data(struct mv_u3d *u3d, u8 ep_num, u8 *buffer_ptr)
1589 {
1590 struct mv_u3d_ep_context *epcontext;
1591
1592 epcontext = &u3d->ep_context[ep_num * 2 + MV_U3D_EP_DIR_IN];
1593
1594 /* Copy the setup packet to local buffer */
1595 memcpy(buffer_ptr, (u8 *) &epcontext->setup_buffer, 8);
1596 }
1597
1598 static void mv_u3d_irq_process_setup(struct mv_u3d *u3d)
1599 {
1600 u32 tmp, i;
1601 /* Process all Setup packet received interrupts */
1602 tmp = ioread32(&u3d->vuc_regs->setuplock);
1603 if (tmp) {
1604 for (i = 0; i < u3d->max_eps; i++) {
1605 if (tmp & (1 << i)) {
1606 mv_u3d_get_setup_data(u3d, i,
1607 (u8 *)(&u3d->local_setup_buff));
1608 mv_u3d_handle_setup_packet(u3d, i,
1609 &u3d->local_setup_buff);
1610 }
1611 }
1612 }
1613
1614 iowrite32(tmp, &u3d->vuc_regs->setuplock);
1615 }
1616
1617 static void mv_u3d_irq_process_tr_complete(struct mv_u3d *u3d)
1618 {
1619 u32 tmp, bit_pos;
1620 int i, ep_num = 0, direction = 0;
1621 struct mv_u3d_ep *curr_ep;
1622 struct mv_u3d_req *curr_req, *temp_req;
1623 int status;
1624
1625 tmp = ioread32(&u3d->vuc_regs->endcomplete);
1626
1627 dev_dbg(u3d->dev, "tr_complete: ep: 0x%x\n", tmp);
1628 if (!tmp)
1629 return;
1630 iowrite32(tmp, &u3d->vuc_regs->endcomplete);
1631
1632 for (i = 0; i < u3d->max_eps * 2; i++) {
1633 ep_num = i >> 1;
1634 direction = i % 2;
1635
1636 bit_pos = 1 << (ep_num + 16 * direction);
1637
1638 if (!(bit_pos & tmp))
1639 continue;
1640
1641 if (i == 0)
1642 curr_ep = &u3d->eps[1];
1643 else
1644 curr_ep = &u3d->eps[i];
1645
1646 /* remove req out of ep request list after completion */
1647 dev_dbg(u3d->dev, "tr comp: check req_list\n");
1648 spin_lock(&curr_ep->req_lock);
1649 if (!list_empty(&curr_ep->req_list)) {
1650 struct mv_u3d_req *req;
1651 req = list_entry(curr_ep->req_list.next,
1652 struct mv_u3d_req, list);
1653 list_del_init(&req->list);
1654 curr_ep->processing = 0;
1655 }
1656 spin_unlock(&curr_ep->req_lock);
1657
1658 /* process the req queue until an uncomplete request */
1659 list_for_each_entry_safe(curr_req, temp_req,
1660 &curr_ep->queue, queue) {
1661 status = mv_u3d_process_ep_req(u3d, i, curr_req);
1662 if (status)
1663 break;
1664 /* write back status to req */
1665 curr_req->req.status = status;
1666
1667 /* ep0 request completion */
1668 if (ep_num == 0) {
1669 mv_u3d_done(curr_ep, curr_req, 0);
1670 break;
1671 } else {
1672 mv_u3d_done(curr_ep, curr_req, status);
1673 }
1674 }
1675
1676 dev_dbg(u3d->dev, "call mv_u3d_start_queue from ep complete\n");
1677 mv_u3d_start_queue(curr_ep);
1678 }
1679 }
1680
1681 static irqreturn_t mv_u3d_irq(int irq, void *dev)
1682 {
1683 struct mv_u3d *u3d = (struct mv_u3d *)dev;
1684 u32 status, intr;
1685 u32 bridgesetting;
1686 u32 trbunderrun;
1687
1688 spin_lock(&u3d->lock);
1689
1690 status = ioread32(&u3d->vuc_regs->intrcause);
1691 intr = ioread32(&u3d->vuc_regs->intrenable);
1692 status &= intr;
1693
1694 if (status == 0) {
1695 spin_unlock(&u3d->lock);
1696 dev_err(u3d->dev, "irq error!\n");
1697 return IRQ_NONE;
1698 }
1699
1700 if (status & MV_U3D_USBINT_VBUS_VALID) {
1701 bridgesetting = ioread32(&u3d->vuc_regs->bridgesetting);
1702 if (bridgesetting & MV_U3D_BRIDGE_SETTING_VBUS_VALID) {
1703 /* write vbus valid bit of bridge setting to clear */
1704 bridgesetting = MV_U3D_BRIDGE_SETTING_VBUS_VALID;
1705 iowrite32(bridgesetting, &u3d->vuc_regs->bridgesetting);
1706 dev_dbg(u3d->dev, "vbus valid\n");
1707
1708 u3d->usb_state = USB_STATE_POWERED;
1709 u3d->vbus_valid_detect = 0;
1710 /* if external vbus detect is not supported,
1711 * we handle it here.
1712 */
1713 if (!u3d->vbus) {
1714 spin_unlock(&u3d->lock);
1715 mv_u3d_vbus_session(&u3d->gadget, 1);
1716 spin_lock(&u3d->lock);
1717 }
1718 } else
1719 dev_err(u3d->dev, "vbus bit is not set\n");
1720 }
1721
1722 /* RX data is already in the 16KB FIFO.*/
1723 if (status & MV_U3D_USBINT_UNDER_RUN) {
1724 trbunderrun = ioread32(&u3d->vuc_regs->trbunderrun);
1725 dev_err(u3d->dev, "under run, ep%d\n", trbunderrun);
1726 iowrite32(trbunderrun, &u3d->vuc_regs->trbunderrun);
1727 mv_u3d_irq_process_error(u3d);
1728 }
1729
1730 if (status & (MV_U3D_USBINT_RXDESC_ERR | MV_U3D_USBINT_TXDESC_ERR)) {
1731 /* write one to clear */
1732 iowrite32(status & (MV_U3D_USBINT_RXDESC_ERR
1733 | MV_U3D_USBINT_TXDESC_ERR),
1734 &u3d->vuc_regs->intrcause);
1735 dev_err(u3d->dev, "desc err 0x%x\n", status);
1736 mv_u3d_irq_process_error(u3d);
1737 }
1738
1739 if (status & MV_U3D_USBINT_LINK_CHG)
1740 mv_u3d_irq_process_link_change(u3d);
1741
1742 if (status & MV_U3D_USBINT_TX_COMPLETE)
1743 mv_u3d_irq_process_tr_complete(u3d);
1744
1745 if (status & MV_U3D_USBINT_RX_COMPLETE)
1746 mv_u3d_irq_process_tr_complete(u3d);
1747
1748 if (status & MV_U3D_USBINT_SETUP)
1749 mv_u3d_irq_process_setup(u3d);
1750
1751 spin_unlock(&u3d->lock);
1752 return IRQ_HANDLED;
1753 }
1754
1755 static int mv_u3d_remove(struct platform_device *dev)
1756 {
1757 struct mv_u3d *u3d = platform_get_drvdata(dev);
1758
1759 BUG_ON(u3d == NULL);
1760
1761 usb_del_gadget_udc(&u3d->gadget);
1762
1763 /* free memory allocated in probe */
1764 dma_pool_destroy(u3d->trb_pool);
1765
1766 if (u3d->ep_context)
1767 dma_free_coherent(&dev->dev, u3d->ep_context_size,
1768 u3d->ep_context, u3d->ep_context_dma);
1769
1770 kfree(u3d->eps);
1771
1772 if (u3d->irq)
1773 free_irq(u3d->irq, u3d);
1774
1775 if (u3d->cap_regs)
1776 iounmap(u3d->cap_regs);
1777 u3d->cap_regs = NULL;
1778
1779 kfree(u3d->status_req);
1780
1781 clk_put(u3d->clk);
1782
1783 kfree(u3d);
1784
1785 return 0;
1786 }
1787
1788 static int mv_u3d_probe(struct platform_device *dev)
1789 {
1790 struct mv_u3d *u3d = NULL;
1791 struct mv_usb_platform_data *pdata = dev_get_platdata(&dev->dev);
1792 int retval = 0;
1793 struct resource *r;
1794 size_t size;
1795
1796 if (!dev_get_platdata(&dev->dev)) {
1797 dev_err(&dev->dev, "missing platform_data\n");
1798 retval = -ENODEV;
1799 goto err_pdata;
1800 }
1801
1802 u3d = kzalloc(sizeof(*u3d), GFP_KERNEL);
1803 if (!u3d) {
1804 retval = -ENOMEM;
1805 goto err_alloc_private;
1806 }
1807
1808 spin_lock_init(&u3d->lock);
1809
1810 platform_set_drvdata(dev, u3d);
1811
1812 u3d->dev = &dev->dev;
1813 u3d->vbus = pdata->vbus;
1814
1815 u3d->clk = clk_get(&dev->dev, NULL);
1816 if (IS_ERR(u3d->clk)) {
1817 retval = PTR_ERR(u3d->clk);
1818 goto err_get_clk;
1819 }
1820
1821 r = platform_get_resource_byname(dev, IORESOURCE_MEM, "capregs");
1822 if (!r) {
1823 dev_err(&dev->dev, "no I/O memory resource defined\n");
1824 retval = -ENODEV;
1825 goto err_get_cap_regs;
1826 }
1827
1828 u3d->cap_regs = (struct mv_u3d_cap_regs __iomem *)
1829 ioremap(r->start, resource_size(r));
1830 if (!u3d->cap_regs) {
1831 dev_err(&dev->dev, "failed to map I/O memory\n");
1832 retval = -EBUSY;
1833 goto err_map_cap_regs;
1834 } else {
1835 dev_dbg(&dev->dev, "cap_regs address: 0x%lx/0x%lx\n",
1836 (unsigned long) r->start,
1837 (unsigned long) u3d->cap_regs);
1838 }
1839
1840 /* we will access controller register, so enable the u3d controller */
1841 clk_enable(u3d->clk);
1842
1843 if (pdata->phy_init) {
1844 retval = pdata->phy_init(u3d->phy_regs);
1845 if (retval) {
1846 dev_err(&dev->dev, "init phy error %d\n", retval);
1847 goto err_u3d_enable;
1848 }
1849 }
1850
1851 u3d->op_regs = (struct mv_u3d_op_regs __iomem *)(u3d->cap_regs
1852 + MV_U3D_USB3_OP_REGS_OFFSET);
1853
1854 u3d->vuc_regs = (struct mv_u3d_vuc_regs __iomem *)(u3d->cap_regs
1855 + ioread32(&u3d->cap_regs->vuoff));
1856
1857 u3d->max_eps = 16;
1858
1859 /*
1860 * some platform will use usb to download image, it may not disconnect
1861 * usb gadget before loading kernel. So first stop u3d here.
1862 */
1863 mv_u3d_controller_stop(u3d);
1864 iowrite32(0xFFFFFFFF, &u3d->vuc_regs->intrcause);
1865
1866 if (pdata->phy_deinit)
1867 pdata->phy_deinit(u3d->phy_regs);
1868 clk_disable(u3d->clk);
1869
1870 size = u3d->max_eps * sizeof(struct mv_u3d_ep_context) * 2;
1871 size = (size + MV_U3D_EP_CONTEXT_ALIGNMENT - 1)
1872 & ~(MV_U3D_EP_CONTEXT_ALIGNMENT - 1);
1873 u3d->ep_context = dma_alloc_coherent(&dev->dev, size,
1874 &u3d->ep_context_dma, GFP_KERNEL);
1875 if (!u3d->ep_context) {
1876 dev_err(&dev->dev, "allocate ep context memory failed\n");
1877 retval = -ENOMEM;
1878 goto err_alloc_ep_context;
1879 }
1880 u3d->ep_context_size = size;
1881
1882 /* create TRB dma_pool resource */
1883 u3d->trb_pool = dma_pool_create("u3d_trb",
1884 &dev->dev,
1885 sizeof(struct mv_u3d_trb_hw),
1886 MV_U3D_TRB_ALIGNMENT,
1887 MV_U3D_DMA_BOUNDARY);
1888
1889 if (!u3d->trb_pool) {
1890 retval = -ENOMEM;
1891 goto err_alloc_trb_pool;
1892 }
1893
1894 size = u3d->max_eps * sizeof(struct mv_u3d_ep) * 2;
1895 u3d->eps = kzalloc(size, GFP_KERNEL);
1896 if (!u3d->eps) {
1897 retval = -ENOMEM;
1898 goto err_alloc_eps;
1899 }
1900
1901 /* initialize ep0 status request structure */
1902 u3d->status_req = kzalloc(sizeof(struct mv_u3d_req) + 8, GFP_KERNEL);
1903 if (!u3d->status_req) {
1904 retval = -ENOMEM;
1905 goto err_alloc_status_req;
1906 }
1907 INIT_LIST_HEAD(&u3d->status_req->queue);
1908
1909 /* allocate a small amount of memory to get valid address */
1910 u3d->status_req->req.buf = (char *)u3d->status_req
1911 + sizeof(struct mv_u3d_req);
1912 u3d->status_req->req.dma = virt_to_phys(u3d->status_req->req.buf);
1913
1914 u3d->resume_state = USB_STATE_NOTATTACHED;
1915 u3d->usb_state = USB_STATE_ATTACHED;
1916 u3d->ep0_dir = MV_U3D_EP_DIR_OUT;
1917 u3d->remote_wakeup = 0;
1918
1919 r = platform_get_resource(dev, IORESOURCE_IRQ, 0);
1920 if (!r) {
1921 dev_err(&dev->dev, "no IRQ resource defined\n");
1922 retval = -ENODEV;
1923 goto err_get_irq;
1924 }
1925 u3d->irq = r->start;
1926 if (request_irq(u3d->irq, mv_u3d_irq,
1927 IRQF_SHARED, driver_name, u3d)) {
1928 u3d->irq = 0;
1929 dev_err(&dev->dev, "Request irq %d for u3d failed\n",
1930 u3d->irq);
1931 retval = -ENODEV;
1932 goto err_request_irq;
1933 }
1934
1935 /* initialize gadget structure */
1936 u3d->gadget.ops = &mv_u3d_ops; /* usb_gadget_ops */
1937 u3d->gadget.ep0 = &u3d->eps[1].ep; /* gadget ep0 */
1938 INIT_LIST_HEAD(&u3d->gadget.ep_list); /* ep_list */
1939 u3d->gadget.speed = USB_SPEED_UNKNOWN; /* speed */
1940
1941 /* the "gadget" abstracts/virtualizes the controller */
1942 u3d->gadget.name = driver_name; /* gadget name */
1943
1944 mv_u3d_eps_init(u3d);
1945
1946 /* external vbus detection */
1947 if (u3d->vbus) {
1948 u3d->clock_gating = 1;
1949 dev_err(&dev->dev, "external vbus detection\n");
1950 }
1951
1952 if (!u3d->clock_gating)
1953 u3d->vbus_active = 1;
1954
1955 /* enable usb3 controller vbus detection */
1956 u3d->vbus_valid_detect = 1;
1957
1958 retval = usb_add_gadget_udc(&dev->dev, &u3d->gadget);
1959 if (retval)
1960 goto err_unregister;
1961
1962 dev_dbg(&dev->dev, "successful probe usb3 device %s clock gating.\n",
1963 u3d->clock_gating ? "with" : "without");
1964
1965 return 0;
1966
1967 err_unregister:
1968 free_irq(u3d->irq, u3d);
1969 err_request_irq:
1970 err_get_irq:
1971 kfree(u3d->status_req);
1972 err_alloc_status_req:
1973 kfree(u3d->eps);
1974 err_alloc_eps:
1975 dma_pool_destroy(u3d->trb_pool);
1976 err_alloc_trb_pool:
1977 dma_free_coherent(&dev->dev, u3d->ep_context_size,
1978 u3d->ep_context, u3d->ep_context_dma);
1979 err_alloc_ep_context:
1980 if (pdata->phy_deinit)
1981 pdata->phy_deinit(u3d->phy_regs);
1982 clk_disable(u3d->clk);
1983 err_u3d_enable:
1984 iounmap(u3d->cap_regs);
1985 err_map_cap_regs:
1986 err_get_cap_regs:
1987 err_get_clk:
1988 clk_put(u3d->clk);
1989 kfree(u3d);
1990 err_alloc_private:
1991 err_pdata:
1992 return retval;
1993 }
1994
1995 #ifdef CONFIG_PM_SLEEP
1996 static int mv_u3d_suspend(struct device *dev)
1997 {
1998 struct mv_u3d *u3d = dev_get_drvdata(dev);
1999
2000 /*
2001 * only cable is unplugged, usb can suspend.
2002 * So do not care about clock_gating == 1, it is handled by
2003 * vbus session.
2004 */
2005 if (!u3d->clock_gating) {
2006 mv_u3d_controller_stop(u3d);
2007
2008 spin_lock_irq(&u3d->lock);
2009 /* stop all usb activities */
2010 mv_u3d_stop_activity(u3d, u3d->driver);
2011 spin_unlock_irq(&u3d->lock);
2012
2013 mv_u3d_disable(u3d);
2014 }
2015
2016 return 0;
2017 }
2018
2019 static int mv_u3d_resume(struct device *dev)
2020 {
2021 struct mv_u3d *u3d = dev_get_drvdata(dev);
2022 int retval;
2023
2024 if (!u3d->clock_gating) {
2025 retval = mv_u3d_enable(u3d);
2026 if (retval)
2027 return retval;
2028
2029 if (u3d->driver && u3d->softconnect) {
2030 mv_u3d_controller_reset(u3d);
2031 mv_u3d_ep0_reset(u3d);
2032 mv_u3d_controller_start(u3d);
2033 }
2034 }
2035
2036 return 0;
2037 }
2038 #endif
2039
2040 static SIMPLE_DEV_PM_OPS(mv_u3d_pm_ops, mv_u3d_suspend, mv_u3d_resume);
2041
2042 static void mv_u3d_shutdown(struct platform_device *dev)
2043 {
2044 struct mv_u3d *u3d = platform_get_drvdata(dev);
2045 u32 tmp;
2046
2047 tmp = ioread32(&u3d->op_regs->usbcmd);
2048 tmp &= ~MV_U3D_CMD_RUN_STOP;
2049 iowrite32(tmp, &u3d->op_regs->usbcmd);
2050 }
2051
2052 static struct platform_driver mv_u3d_driver = {
2053 .probe = mv_u3d_probe,
2054 .remove = mv_u3d_remove,
2055 .shutdown = mv_u3d_shutdown,
2056 .driver = {
2057 .name = "mv-u3d",
2058 .pm = &mv_u3d_pm_ops,
2059 },
2060 };
2061
2062 module_platform_driver(mv_u3d_driver);
2063 MODULE_ALIAS("platform:mv-u3d");
2064 MODULE_DESCRIPTION(DRIVER_DESC);
2065 MODULE_AUTHOR("Yu Xu <yuxu@marvell.com>");
2066 MODULE_LICENSE("GPL");
2067
2068
2069
2070
2071
2072 /* LDV_COMMENT_BEGIN_MAIN */
2073 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful
2074
2075 /*###########################################################################*/
2076
2077 /*############## Driver Environment Generator 0.2 output ####################*/
2078
2079 /*###########################################################################*/
2080
2081
2082
2083 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
2084 void ldv_check_final_state(void);
2085
2086 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
2087 void ldv_check_return_value(int res);
2088
2089 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
2090 void ldv_check_return_value_probe(int res);
2091
2092 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
2093 void ldv_initialize(void);
2094
2095 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
2096 void ldv_handler_precall(void);
2097
2098 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
2099 int nondet_int(void);
2100
2101 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
2102 int LDV_IN_INTERRUPT;
2103
2104 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
2105 void ldv_main0_sequence_infinite_withcheck_stateful(void) {
2106
2107
2108
2109 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
2110 /*============================= VARIABLE DECLARATION PART =============================*/
2111 /** STRUCT: struct type: usb_ep_ops, struct name: mv_u3d_ep_ops **/
2112 /* content: static int mv_u3d_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)*/
2113 /* LDV_COMMENT_BEGIN_PREP */
2114 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2115 /* LDV_COMMENT_END_PREP */
2116 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_ep_enable" */
2117 struct usb_ep * var_group1;
2118 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_ep_enable" */
2119 const struct usb_endpoint_descriptor * var_mv_u3d_ep_enable_8_p1;
2120 /* LDV_COMMENT_BEGIN_PREP */
2121 #ifdef CONFIG_PM_SLEEP
2122 #endif
2123 /* LDV_COMMENT_END_PREP */
2124 /* content: static int mv_u3d_ep_disable(struct usb_ep *_ep)*/
2125 /* LDV_COMMENT_BEGIN_PREP */
2126 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2127 /* LDV_COMMENT_END_PREP */
2128 /* LDV_COMMENT_BEGIN_PREP */
2129 #ifdef CONFIG_PM_SLEEP
2130 #endif
2131 /* LDV_COMMENT_END_PREP */
2132 /* content: static struct usb_request * mv_u3d_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)*/
2133 /* LDV_COMMENT_BEGIN_PREP */
2134 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2135 /* LDV_COMMENT_END_PREP */
2136 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_alloc_request" */
2137 gfp_t var_mv_u3d_alloc_request_10_p1;
2138 /* LDV_COMMENT_BEGIN_PREP */
2139 #ifdef CONFIG_PM_SLEEP
2140 #endif
2141 /* LDV_COMMENT_END_PREP */
2142 /* content: static void mv_u3d_free_request(struct usb_ep *_ep, struct usb_request *_req)*/
2143 /* LDV_COMMENT_BEGIN_PREP */
2144 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2145 /* LDV_COMMENT_END_PREP */
2146 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_free_request" */
2147 struct usb_request * var_group2;
2148 /* LDV_COMMENT_BEGIN_PREP */
2149 #ifdef CONFIG_PM_SLEEP
2150 #endif
2151 /* LDV_COMMENT_END_PREP */
2152 /* content: static int mv_u3d_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)*/
2153 /* LDV_COMMENT_BEGIN_PREP */
2154 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2155 /* LDV_COMMENT_END_PREP */
2156 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_ep_queue" */
2157 gfp_t var_mv_u3d_ep_queue_13_p2;
2158 /* LDV_COMMENT_BEGIN_PREP */
2159 #ifdef CONFIG_PM_SLEEP
2160 #endif
2161 /* LDV_COMMENT_END_PREP */
2162 /* content: static int mv_u3d_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)*/
2163 /* LDV_COMMENT_BEGIN_PREP */
2164 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2165 /* LDV_COMMENT_END_PREP */
2166 /* LDV_COMMENT_BEGIN_PREP */
2167 #ifdef CONFIG_PM_SLEEP
2168 #endif
2169 /* LDV_COMMENT_END_PREP */
2170 /* content: static int mv_u3d_ep_set_wedge(struct usb_ep *_ep)*/
2171 /* LDV_COMMENT_BEGIN_PREP */
2172 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2173 /* LDV_COMMENT_END_PREP */
2174 /* LDV_COMMENT_BEGIN_PREP */
2175 #ifdef CONFIG_PM_SLEEP
2176 #endif
2177 /* LDV_COMMENT_END_PREP */
2178 /* content: static int mv_u3d_ep_set_halt(struct usb_ep *_ep, int halt)*/
2179 /* LDV_COMMENT_BEGIN_PREP */
2180 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2181 /* LDV_COMMENT_END_PREP */
2182 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_ep_set_halt" */
2183 int var_mv_u3d_ep_set_halt_17_p1;
2184 /* LDV_COMMENT_BEGIN_PREP */
2185 #ifdef CONFIG_PM_SLEEP
2186 #endif
2187 /* LDV_COMMENT_END_PREP */
2188 /* content: static void mv_u3d_ep_fifo_flush(struct usb_ep *_ep)*/
2189 /* LDV_COMMENT_BEGIN_PREP */
2190 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2191 /* LDV_COMMENT_END_PREP */
2192 /* LDV_COMMENT_BEGIN_PREP */
2193 #ifdef CONFIG_PM_SLEEP
2194 #endif
2195 /* LDV_COMMENT_END_PREP */
2196
2197 /** STRUCT: struct type: usb_gadget_ops, struct name: mv_u3d_ops **/
2198 /* content: static int mv_u3d_vbus_session(struct usb_gadget *gadget, int is_active)*/
2199 /* LDV_COMMENT_BEGIN_PREP */
2200 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2201 /* LDV_COMMENT_END_PREP */
2202 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_vbus_session" */
2203 struct usb_gadget * var_group3;
2204 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_vbus_session" */
2205 int var_mv_u3d_vbus_session_24_p1;
2206 /* LDV_COMMENT_BEGIN_PREP */
2207 #ifdef CONFIG_PM_SLEEP
2208 #endif
2209 /* LDV_COMMENT_END_PREP */
2210 /* content: static int mv_u3d_vbus_draw(struct usb_gadget *gadget, unsigned mA)*/
2211 /* LDV_COMMENT_BEGIN_PREP */
2212 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2213 /* LDV_COMMENT_END_PREP */
2214 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_vbus_draw" */
2215 unsigned var_mv_u3d_vbus_draw_25_p1;
2216 /* LDV_COMMENT_BEGIN_PREP */
2217 #ifdef CONFIG_PM_SLEEP
2218 #endif
2219 /* LDV_COMMENT_END_PREP */
2220 /* content: static int mv_u3d_pullup(struct usb_gadget *gadget, int is_on)*/
2221 /* LDV_COMMENT_BEGIN_PREP */
2222 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2223 /* LDV_COMMENT_END_PREP */
2224 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_pullup" */
2225 int var_mv_u3d_pullup_26_p1;
2226 /* LDV_COMMENT_BEGIN_PREP */
2227 #ifdef CONFIG_PM_SLEEP
2228 #endif
2229 /* LDV_COMMENT_END_PREP */
2230 /* content: static int mv_u3d_start(struct usb_gadget *g, struct usb_gadget_driver *driver)*/
2231 /* LDV_COMMENT_BEGIN_PREP */
2232 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2233 /* LDV_COMMENT_END_PREP */
2234 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_start" */
2235 struct usb_gadget_driver * var_group4;
2236 /* LDV_COMMENT_BEGIN_PREP */
2237 #ifdef CONFIG_PM_SLEEP
2238 #endif
2239 /* LDV_COMMENT_END_PREP */
2240 /* content: static int mv_u3d_stop(struct usb_gadget *g)*/
2241 /* LDV_COMMENT_BEGIN_PREP */
2242 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2243 /* LDV_COMMENT_END_PREP */
2244 /* LDV_COMMENT_BEGIN_PREP */
2245 #ifdef CONFIG_PM_SLEEP
2246 #endif
2247 /* LDV_COMMENT_END_PREP */
2248
2249 /** STRUCT: struct type: platform_driver, struct name: mv_u3d_driver **/
2250 /* content: static int mv_u3d_probe(struct platform_device *dev)*/
2251 /* LDV_COMMENT_BEGIN_PREP */
2252 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2253 /* LDV_COMMENT_END_PREP */
2254 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_probe" */
2255 struct platform_device * var_group5;
2256 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "mv_u3d_probe" */
2257 static int res_mv_u3d_probe_41;
2258 /* LDV_COMMENT_BEGIN_PREP */
2259 #ifdef CONFIG_PM_SLEEP
2260 #endif
2261 /* LDV_COMMENT_END_PREP */
2262 /* content: static int mv_u3d_remove(struct platform_device *dev)*/
2263 /* LDV_COMMENT_BEGIN_PREP */
2264 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2265 /* LDV_COMMENT_END_PREP */
2266 /* LDV_COMMENT_BEGIN_PREP */
2267 #ifdef CONFIG_PM_SLEEP
2268 #endif
2269 /* LDV_COMMENT_END_PREP */
2270 /* content: static void mv_u3d_shutdown(struct platform_device *dev)*/
2271 /* LDV_COMMENT_BEGIN_PREP */
2272 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2273 #ifdef CONFIG_PM_SLEEP
2274 #endif
2275 /* LDV_COMMENT_END_PREP */
2276
2277 /** CALLBACK SECTION request_irq **/
2278 /* content: static irqreturn_t mv_u3d_irq(int irq, void *dev)*/
2279 /* LDV_COMMENT_BEGIN_PREP */
2280 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2281 /* LDV_COMMENT_END_PREP */
2282 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_irq" */
2283 int var_mv_u3d_irq_39_p0;
2284 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mv_u3d_irq" */
2285 void * var_mv_u3d_irq_39_p1;
2286 /* LDV_COMMENT_BEGIN_PREP */
2287 #ifdef CONFIG_PM_SLEEP
2288 #endif
2289 /* LDV_COMMENT_END_PREP */
2290
2291
2292
2293
2294 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
2295 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
2296 /*============================= VARIABLE INITIALIZING PART =============================*/
2297 LDV_IN_INTERRUPT=1;
2298
2299
2300
2301
2302 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
2303 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
2304 /*============================= FUNCTION CALL SECTION =============================*/
2305 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
2306 ldv_initialize();
2307
2308
2309
2310
2311 int ldv_s_mv_u3d_driver_platform_driver = 0;
2312
2313
2314
2315
2316 while( nondet_int()
2317 || !(ldv_s_mv_u3d_driver_platform_driver == 0)
2318 ) {
2319
2320 switch(nondet_int()) {
2321
2322 case 0: {
2323
2324 /** STRUCT: struct type: usb_ep_ops, struct name: mv_u3d_ep_ops **/
2325
2326
2327 /* content: static int mv_u3d_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)*/
2328 /* LDV_COMMENT_BEGIN_PREP */
2329 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2330 /* LDV_COMMENT_END_PREP */
2331 /* LDV_COMMENT_FUNCTION_CALL Function from field "enable" from driver structure with callbacks "mv_u3d_ep_ops" */
2332 ldv_handler_precall();
2333 mv_u3d_ep_enable( var_group1, var_mv_u3d_ep_enable_8_p1);
2334 /* LDV_COMMENT_BEGIN_PREP */
2335 #ifdef CONFIG_PM_SLEEP
2336 #endif
2337 /* LDV_COMMENT_END_PREP */
2338
2339
2340
2341
2342 }
2343
2344 break;
2345 case 1: {
2346
2347 /** STRUCT: struct type: usb_ep_ops, struct name: mv_u3d_ep_ops **/
2348
2349
2350 /* content: static int mv_u3d_ep_disable(struct usb_ep *_ep)*/
2351 /* LDV_COMMENT_BEGIN_PREP */
2352 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2353 /* LDV_COMMENT_END_PREP */
2354 /* LDV_COMMENT_FUNCTION_CALL Function from field "disable" from driver structure with callbacks "mv_u3d_ep_ops" */
2355 ldv_handler_precall();
2356 mv_u3d_ep_disable( var_group1);
2357 /* LDV_COMMENT_BEGIN_PREP */
2358 #ifdef CONFIG_PM_SLEEP
2359 #endif
2360 /* LDV_COMMENT_END_PREP */
2361
2362
2363
2364
2365 }
2366
2367 break;
2368 case 2: {
2369
2370 /** STRUCT: struct type: usb_ep_ops, struct name: mv_u3d_ep_ops **/
2371
2372
2373 /* content: static struct usb_request * mv_u3d_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)*/
2374 /* LDV_COMMENT_BEGIN_PREP */
2375 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2376 /* LDV_COMMENT_END_PREP */
2377 /* LDV_COMMENT_FUNCTION_CALL Function from field "alloc_request" from driver structure with callbacks "mv_u3d_ep_ops" */
2378 ldv_handler_precall();
2379 mv_u3d_alloc_request( var_group1, var_mv_u3d_alloc_request_10_p1);
2380 /* LDV_COMMENT_BEGIN_PREP */
2381 #ifdef CONFIG_PM_SLEEP
2382 #endif
2383 /* LDV_COMMENT_END_PREP */
2384
2385
2386
2387
2388 }
2389
2390 break;
2391 case 3: {
2392
2393 /** STRUCT: struct type: usb_ep_ops, struct name: mv_u3d_ep_ops **/
2394
2395
2396 /* content: static void mv_u3d_free_request(struct usb_ep *_ep, struct usb_request *_req)*/
2397 /* LDV_COMMENT_BEGIN_PREP */
2398 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2399 /* LDV_COMMENT_END_PREP */
2400 /* LDV_COMMENT_FUNCTION_CALL Function from field "free_request" from driver structure with callbacks "mv_u3d_ep_ops" */
2401 ldv_handler_precall();
2402 mv_u3d_free_request( var_group1, var_group2);
2403 /* LDV_COMMENT_BEGIN_PREP */
2404 #ifdef CONFIG_PM_SLEEP
2405 #endif
2406 /* LDV_COMMENT_END_PREP */
2407
2408
2409
2410
2411 }
2412
2413 break;
2414 case 4: {
2415
2416 /** STRUCT: struct type: usb_ep_ops, struct name: mv_u3d_ep_ops **/
2417
2418
2419 /* content: static int mv_u3d_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)*/
2420 /* LDV_COMMENT_BEGIN_PREP */
2421 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2422 /* LDV_COMMENT_END_PREP */
2423 /* LDV_COMMENT_FUNCTION_CALL Function from field "queue" from driver structure with callbacks "mv_u3d_ep_ops" */
2424 ldv_handler_precall();
2425 mv_u3d_ep_queue( var_group1, var_group2, var_mv_u3d_ep_queue_13_p2);
2426 /* LDV_COMMENT_BEGIN_PREP */
2427 #ifdef CONFIG_PM_SLEEP
2428 #endif
2429 /* LDV_COMMENT_END_PREP */
2430
2431
2432
2433
2434 }
2435
2436 break;
2437 case 5: {
2438
2439 /** STRUCT: struct type: usb_ep_ops, struct name: mv_u3d_ep_ops **/
2440
2441
2442 /* content: static int mv_u3d_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)*/
2443 /* LDV_COMMENT_BEGIN_PREP */
2444 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2445 /* LDV_COMMENT_END_PREP */
2446 /* LDV_COMMENT_FUNCTION_CALL Function from field "dequeue" from driver structure with callbacks "mv_u3d_ep_ops" */
2447 ldv_handler_precall();
2448 mv_u3d_ep_dequeue( var_group1, var_group2);
2449 /* LDV_COMMENT_BEGIN_PREP */
2450 #ifdef CONFIG_PM_SLEEP
2451 #endif
2452 /* LDV_COMMENT_END_PREP */
2453
2454
2455
2456
2457 }
2458
2459 break;
2460 case 6: {
2461
2462 /** STRUCT: struct type: usb_ep_ops, struct name: mv_u3d_ep_ops **/
2463
2464
2465 /* content: static int mv_u3d_ep_set_wedge(struct usb_ep *_ep)*/
2466 /* LDV_COMMENT_BEGIN_PREP */
2467 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2468 /* LDV_COMMENT_END_PREP */
2469 /* LDV_COMMENT_FUNCTION_CALL Function from field "set_wedge" from driver structure with callbacks "mv_u3d_ep_ops" */
2470 ldv_handler_precall();
2471 mv_u3d_ep_set_wedge( var_group1);
2472 /* LDV_COMMENT_BEGIN_PREP */
2473 #ifdef CONFIG_PM_SLEEP
2474 #endif
2475 /* LDV_COMMENT_END_PREP */
2476
2477
2478
2479
2480 }
2481
2482 break;
2483 case 7: {
2484
2485 /** STRUCT: struct type: usb_ep_ops, struct name: mv_u3d_ep_ops **/
2486
2487
2488 /* content: static int mv_u3d_ep_set_halt(struct usb_ep *_ep, int halt)*/
2489 /* LDV_COMMENT_BEGIN_PREP */
2490 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2491 /* LDV_COMMENT_END_PREP */
2492 /* LDV_COMMENT_FUNCTION_CALL Function from field "set_halt" from driver structure with callbacks "mv_u3d_ep_ops" */
2493 ldv_handler_precall();
2494 mv_u3d_ep_set_halt( var_group1, var_mv_u3d_ep_set_halt_17_p1);
2495 /* LDV_COMMENT_BEGIN_PREP */
2496 #ifdef CONFIG_PM_SLEEP
2497 #endif
2498 /* LDV_COMMENT_END_PREP */
2499
2500
2501
2502
2503 }
2504
2505 break;
2506 case 8: {
2507
2508 /** STRUCT: struct type: usb_ep_ops, struct name: mv_u3d_ep_ops **/
2509
2510
2511 /* content: static void mv_u3d_ep_fifo_flush(struct usb_ep *_ep)*/
2512 /* LDV_COMMENT_BEGIN_PREP */
2513 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2514 /* LDV_COMMENT_END_PREP */
2515 /* LDV_COMMENT_FUNCTION_CALL Function from field "fifo_flush" from driver structure with callbacks "mv_u3d_ep_ops" */
2516 ldv_handler_precall();
2517 mv_u3d_ep_fifo_flush( var_group1);
2518 /* LDV_COMMENT_BEGIN_PREP */
2519 #ifdef CONFIG_PM_SLEEP
2520 #endif
2521 /* LDV_COMMENT_END_PREP */
2522
2523
2524
2525
2526 }
2527
2528 break;
2529 case 9: {
2530
2531 /** STRUCT: struct type: usb_gadget_ops, struct name: mv_u3d_ops **/
2532
2533
2534 /* content: static int mv_u3d_vbus_session(struct usb_gadget *gadget, int is_active)*/
2535 /* LDV_COMMENT_BEGIN_PREP */
2536 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2537 /* LDV_COMMENT_END_PREP */
2538 /* LDV_COMMENT_FUNCTION_CALL Function from field "vbus_session" from driver structure with callbacks "mv_u3d_ops" */
2539 ldv_handler_precall();
2540 mv_u3d_vbus_session( var_group3, var_mv_u3d_vbus_session_24_p1);
2541 /* LDV_COMMENT_BEGIN_PREP */
2542 #ifdef CONFIG_PM_SLEEP
2543 #endif
2544 /* LDV_COMMENT_END_PREP */
2545
2546
2547
2548
2549 }
2550
2551 break;
2552 case 10: {
2553
2554 /** STRUCT: struct type: usb_gadget_ops, struct name: mv_u3d_ops **/
2555
2556
2557 /* content: static int mv_u3d_vbus_draw(struct usb_gadget *gadget, unsigned mA)*/
2558 /* LDV_COMMENT_BEGIN_PREP */
2559 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2560 /* LDV_COMMENT_END_PREP */
2561 /* LDV_COMMENT_FUNCTION_CALL Function from field "vbus_draw" from driver structure with callbacks "mv_u3d_ops" */
2562 ldv_handler_precall();
2563 mv_u3d_vbus_draw( var_group3, var_mv_u3d_vbus_draw_25_p1);
2564 /* LDV_COMMENT_BEGIN_PREP */
2565 #ifdef CONFIG_PM_SLEEP
2566 #endif
2567 /* LDV_COMMENT_END_PREP */
2568
2569
2570
2571
2572 }
2573
2574 break;
2575 case 11: {
2576
2577 /** STRUCT: struct type: usb_gadget_ops, struct name: mv_u3d_ops **/
2578
2579
2580 /* content: static int mv_u3d_pullup(struct usb_gadget *gadget, int is_on)*/
2581 /* LDV_COMMENT_BEGIN_PREP */
2582 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2583 /* LDV_COMMENT_END_PREP */
2584 /* LDV_COMMENT_FUNCTION_CALL Function from field "pullup" from driver structure with callbacks "mv_u3d_ops" */
2585 ldv_handler_precall();
2586 mv_u3d_pullup( var_group3, var_mv_u3d_pullup_26_p1);
2587 /* LDV_COMMENT_BEGIN_PREP */
2588 #ifdef CONFIG_PM_SLEEP
2589 #endif
2590 /* LDV_COMMENT_END_PREP */
2591
2592
2593
2594
2595 }
2596
2597 break;
2598 case 12: {
2599
2600 /** STRUCT: struct type: usb_gadget_ops, struct name: mv_u3d_ops **/
2601
2602
2603 /* content: static int mv_u3d_start(struct usb_gadget *g, struct usb_gadget_driver *driver)*/
2604 /* LDV_COMMENT_BEGIN_PREP */
2605 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2606 /* LDV_COMMENT_END_PREP */
2607 /* LDV_COMMENT_FUNCTION_CALL Function from field "udc_start" from driver structure with callbacks "mv_u3d_ops" */
2608 ldv_handler_precall();
2609 mv_u3d_start( var_group3, var_group4);
2610 /* LDV_COMMENT_BEGIN_PREP */
2611 #ifdef CONFIG_PM_SLEEP
2612 #endif
2613 /* LDV_COMMENT_END_PREP */
2614
2615
2616
2617
2618 }
2619
2620 break;
2621 case 13: {
2622
2623 /** STRUCT: struct type: usb_gadget_ops, struct name: mv_u3d_ops **/
2624
2625
2626 /* content: static int mv_u3d_stop(struct usb_gadget *g)*/
2627 /* LDV_COMMENT_BEGIN_PREP */
2628 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2629 /* LDV_COMMENT_END_PREP */
2630 /* LDV_COMMENT_FUNCTION_CALL Function from field "udc_stop" from driver structure with callbacks "mv_u3d_ops" */
2631 ldv_handler_precall();
2632 mv_u3d_stop( var_group3);
2633 /* LDV_COMMENT_BEGIN_PREP */
2634 #ifdef CONFIG_PM_SLEEP
2635 #endif
2636 /* LDV_COMMENT_END_PREP */
2637
2638
2639
2640
2641 }
2642
2643 break;
2644 case 14: {
2645
2646 /** STRUCT: struct type: platform_driver, struct name: mv_u3d_driver **/
2647 if(ldv_s_mv_u3d_driver_platform_driver==0) {
2648
2649 /* content: static int mv_u3d_probe(struct platform_device *dev)*/
2650 /* LDV_COMMENT_BEGIN_PREP */
2651 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2652 /* LDV_COMMENT_END_PREP */
2653 /* LDV_COMMENT_FUNCTION_CALL Function from field "probe" from driver structure with callbacks "mv_u3d_driver". Standart function test for correct return result. */
2654 res_mv_u3d_probe_41 = mv_u3d_probe( var_group5);
2655 ldv_check_return_value(res_mv_u3d_probe_41);
2656 ldv_check_return_value_probe(res_mv_u3d_probe_41);
2657 if(res_mv_u3d_probe_41)
2658 goto ldv_module_exit;
2659 /* LDV_COMMENT_BEGIN_PREP */
2660 #ifdef CONFIG_PM_SLEEP
2661 #endif
2662 /* LDV_COMMENT_END_PREP */
2663 ldv_s_mv_u3d_driver_platform_driver++;
2664
2665 }
2666
2667 }
2668
2669 break;
2670 case 15: {
2671
2672 /** STRUCT: struct type: platform_driver, struct name: mv_u3d_driver **/
2673 if(ldv_s_mv_u3d_driver_platform_driver==1) {
2674
2675 /* content: static int mv_u3d_remove(struct platform_device *dev)*/
2676 /* LDV_COMMENT_BEGIN_PREP */
2677 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2678 /* LDV_COMMENT_END_PREP */
2679 /* LDV_COMMENT_FUNCTION_CALL Function from field "remove" from driver structure with callbacks "mv_u3d_driver" */
2680 ldv_handler_precall();
2681 mv_u3d_remove( var_group5);
2682 /* LDV_COMMENT_BEGIN_PREP */
2683 #ifdef CONFIG_PM_SLEEP
2684 #endif
2685 /* LDV_COMMENT_END_PREP */
2686 ldv_s_mv_u3d_driver_platform_driver++;
2687
2688 }
2689
2690 }
2691
2692 break;
2693 case 16: {
2694
2695 /** STRUCT: struct type: platform_driver, struct name: mv_u3d_driver **/
2696 if(ldv_s_mv_u3d_driver_platform_driver==2) {
2697
2698 /* content: static void mv_u3d_shutdown(struct platform_device *dev)*/
2699 /* LDV_COMMENT_BEGIN_PREP */
2700 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2701 #ifdef CONFIG_PM_SLEEP
2702 #endif
2703 /* LDV_COMMENT_END_PREP */
2704 /* LDV_COMMENT_FUNCTION_CALL Function from field "shutdown" from driver structure with callbacks "mv_u3d_driver" */
2705 ldv_handler_precall();
2706 mv_u3d_shutdown( var_group5);
2707 ldv_s_mv_u3d_driver_platform_driver=0;
2708
2709 }
2710
2711 }
2712
2713 break;
2714 case 17: {
2715
2716 /** CALLBACK SECTION request_irq **/
2717 LDV_IN_INTERRUPT=2;
2718
2719 /* content: static irqreturn_t mv_u3d_irq(int irq, void *dev)*/
2720 /* LDV_COMMENT_BEGIN_PREP */
2721 #define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
2722 /* LDV_COMMENT_END_PREP */
2723 /* LDV_COMMENT_FUNCTION_CALL */
2724 ldv_handler_precall();
2725 mv_u3d_irq( var_mv_u3d_irq_39_p0, var_mv_u3d_irq_39_p1);
2726 /* LDV_COMMENT_BEGIN_PREP */
2727 #ifdef CONFIG_PM_SLEEP
2728 #endif
2729 /* LDV_COMMENT_END_PREP */
2730 LDV_IN_INTERRUPT=1;
2731
2732
2733
2734 }
2735
2736 break;
2737 default: break;
2738
2739 }
2740
2741 }
2742
2743 ldv_module_exit:
2744
2745 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
2746 ldv_final: ldv_check_final_state();
2747
2748 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
2749 return;
2750
2751 }
2752 #endif
2753
2754 /* LDV_COMMENT_END_MAIN */
2755
2756 #line 10 "/home/ldvuser/ldv/ref_launches/work/current--X--drivers--X--defaultlinux-4.8-rc1.tar.xz--X--331_1a--X--cpachecker/linux-4.8-rc1.tar.xz/csd_deg_dscv/10667/dscv_tempdir/dscv/ri/331_1a/drivers/usb/gadget/udc/mv_u3d_core.o.c.prepared" 1
2 #include <verifier/rcv.h>
3 #include <kernel-model/ERR.inc>
4
5 int LDV_DMA_MAP_CALLS = 0;
6
7 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_dma_map_page') maps page */
8 void ldv_dma_map_page(void) {
9 /* LDV_COMMENT_ASSERT Check that previos dma_mapping call was checked */
10 ldv_assert(LDV_DMA_MAP_CALLS == 0);
11 /* LDV_COMMENT_CHANGE_STATE Increase dma_mapping counter */
12 LDV_DMA_MAP_CALLS++;
13 }
14
15 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_dma_mapping_error') unmaps page */
16 void ldv_dma_mapping_error(void) {
17 /* LDV_COMMENT_ASSERT No dma_mapping calls to verify */
18 ldv_assert(LDV_DMA_MAP_CALLS != 0);
19 /* LDV_COMMENT_CHANGE_STATE Check that previos dma_mapping call was checked */
20 LDV_DMA_MAP_CALLS--;
21 }
22
23 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_check_final_state') Check that all module reference counters have their initial values at the end */
24 void ldv_check_final_state(void) {
25 /* LDV_COMMENT_ASSERT All incremented module reference counters should be decremented before module unloading*/
26 ldv_assert(LDV_DMA_MAP_CALLS == 0);
27 } 1 #ifndef _LDV_RCV_H_
2 #define _LDV_RCV_H_
3
4 /* If expr evaluates to zero, ldv_assert() causes a program to reach the error
5 label like the standard assert(). */
6 #define ldv_assert(expr) ((expr) ? 0 : ldv_error())
7
8 /* The error label wrapper. It is used because of some static verifiers (like
9 BLAST) don't accept multiple error labels through a program. */
10 static inline void ldv_error(void)
11 {
12 LDV_ERROR: goto LDV_ERROR;
13 }
14
15 /* If expr evaluates to zero, ldv_assume() causes an infinite loop that is
16 avoided by verifiers. */
17 #define ldv_assume(expr) ((expr) ? 0 : ldv_stop())
18
19 /* Infinite loop, that causes verifiers to skip such paths. */
20 static inline void ldv_stop(void) {
21 LDV_STOP: goto LDV_STOP;
22 }
23
24 /* Special nondeterministic functions. */
25 int ldv_undef_int(void);
26 void *ldv_undef_ptr(void);
27 unsigned long ldv_undef_ulong(void);
28 long ldv_undef_long(void);
29 /* Return nondeterministic negative integer number. */
30 static inline int ldv_undef_int_negative(void)
31 {
32 int ret = ldv_undef_int();
33
34 ldv_assume(ret < 0);
35
36 return ret;
37 }
38 /* Return nondeterministic nonpositive integer number. */
39 static inline int ldv_undef_int_nonpositive(void)
40 {
41 int ret = ldv_undef_int();
42
43 ldv_assume(ret <= 0);
44
45 return ret;
46 }
47
48 /* Add explicit model for __builin_expect GCC function. Without the model a
49 return value will be treated as nondetermined by verifiers. */
50 static inline long __builtin_expect(long exp, long c)
51 {
52 return exp;
53 }
54
55 /* This function causes the program to exit abnormally. GCC implements this
56 function by using a target-dependent mechanism (such as intentionally executing
57 an illegal instruction) or by calling abort. The mechanism used may vary from
58 release to release so you should not rely on any particular implementation.
59 http://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html */
60 static inline void __builtin_trap(void)
61 {
62 ldv_assert(0);
63 }
64
65 /* The constant is for simulating an error of ldv_undef_ptr() function. */
66 #define LDV_PTR_MAX 2012
67
68 #endif /* _LDV_RCV_H_ */ 1 #ifndef __LINUX_COMPILER_H
2 #define __LINUX_COMPILER_H
3
4 #ifndef __ASSEMBLY__
5
6 #ifdef __CHECKER__
7 # define __user __attribute__((noderef, address_space(1)))
8 # define __kernel __attribute__((address_space(0)))
9 # define __safe __attribute__((safe))
10 # define __force __attribute__((force))
11 # define __nocast __attribute__((nocast))
12 # define __iomem __attribute__((noderef, address_space(2)))
13 # define __must_hold(x) __attribute__((context(x,1,1)))
14 # define __acquires(x) __attribute__((context(x,0,1)))
15 # define __releases(x) __attribute__((context(x,1,0)))
16 # define __acquire(x) __context__(x,1)
17 # define __release(x) __context__(x,-1)
18 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
19 # define __percpu __attribute__((noderef, address_space(3)))
20 #ifdef CONFIG_SPARSE_RCU_POINTER
21 # define __rcu __attribute__((noderef, address_space(4)))
22 #else /* CONFIG_SPARSE_RCU_POINTER */
23 # define __rcu
24 #endif /* CONFIG_SPARSE_RCU_POINTER */
25 # define __private __attribute__((noderef))
26 extern void __chk_user_ptr(const volatile void __user *);
27 extern void __chk_io_ptr(const volatile void __iomem *);
28 # define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member))
29 #else /* __CHECKER__ */
30 # define __user
31 # define __kernel
32 # define __safe
33 # define __force
34 # define __nocast
35 # define __iomem
36 # define __chk_user_ptr(x) (void)0
37 # define __chk_io_ptr(x) (void)0
38 # define __builtin_warning(x, y...) (1)
39 # define __must_hold(x)
40 # define __acquires(x)
41 # define __releases(x)
42 # define __acquire(x) (void)0
43 # define __release(x) (void)0
44 # define __cond_lock(x,c) (c)
45 # define __percpu
46 # define __rcu
47 # define __private
48 # define ACCESS_PRIVATE(p, member) ((p)->member)
49 #endif /* __CHECKER__ */
50
51 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
52 #define ___PASTE(a,b) a##b
53 #define __PASTE(a,b) ___PASTE(a,b)
54
55 #ifdef __KERNEL__
56
57 #ifdef __GNUC__
58 #include <linux/compiler-gcc.h>
59 #endif
60
61 #if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__)
62 #define notrace __attribute__((hotpatch(0,0)))
63 #else
64 #define notrace __attribute__((no_instrument_function))
65 #endif
66
67 /* Intel compiler defines __GNUC__. So we will overwrite implementations
68 * coming from above header files here
69 */
70 #ifdef __INTEL_COMPILER
71 # include <linux/compiler-intel.h>
72 #endif
73
74 /* Clang compiler defines __GNUC__. So we will overwrite implementations
75 * coming from above header files here
76 */
77 #ifdef __clang__
78 #include <linux/compiler-clang.h>
79 #endif
80
81 /*
82 * Generic compiler-dependent macros required for kernel
83 * build go below this comment. Actual compiler/compiler version
84 * specific implementations come from the above header files
85 */
86
87 struct ftrace_branch_data {
88 const char *func;
89 const char *file;
90 unsigned line;
91 union {
92 struct {
93 unsigned long correct;
94 unsigned long incorrect;
95 };
96 struct {
97 unsigned long miss;
98 unsigned long hit;
99 };
100 unsigned long miss_hit[2];
101 };
102 };
103
104 /*
105 * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
106 * to disable branch tracing on a per file basis.
107 */
108 #if defined(CONFIG_TRACE_BRANCH_PROFILING) \
109 && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
110 void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
111
112 #define likely_notrace(x) __builtin_expect(!!(x), 1)
113 #define unlikely_notrace(x) __builtin_expect(!!(x), 0)
114
115 #define __branch_check__(x, expect) ({ \
116 int ______r; \
117 static struct ftrace_branch_data \
118 __attribute__((__aligned__(4))) \
119 __attribute__((section("_ftrace_annotated_branch"))) \
120 ______f = { \
121 .func = __func__, \
122 .file = __FILE__, \
123 .line = __LINE__, \
124 }; \
125 ______r = likely_notrace(x); \
126 ftrace_likely_update(&______f, ______r, expect); \
127 ______r; \
128 })
129
130 /*
131 * Using __builtin_constant_p(x) to ignore cases where the return
132 * value is always the same. This idea is taken from a similar patch
133 * written by Daniel Walker.
134 */
135 # ifndef likely
136 # define likely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 1))
137 # endif
138 # ifndef unlikely
139 # define unlikely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 0))
140 # endif
141
142 #ifdef CONFIG_PROFILE_ALL_BRANCHES
143 /*
144 * "Define 'is'", Bill Clinton
145 * "Define 'if'", Steven Rostedt
146 */
147 #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
148 #define __trace_if(cond) \
149 if (__builtin_constant_p(!!(cond)) ? !!(cond) : \
150 ({ \
151 int ______r; \
152 static struct ftrace_branch_data \
153 __attribute__((__aligned__(4))) \
154 __attribute__((section("_ftrace_branch"))) \
155 ______f = { \
156 .func = __func__, \
157 .file = __FILE__, \
158 .line = __LINE__, \
159 }; \
160 ______r = !!(cond); \
161 ______f.miss_hit[______r]++; \
162 ______r; \
163 }))
164 #endif /* CONFIG_PROFILE_ALL_BRANCHES */
165
166 #else
167 # define likely(x) __builtin_expect(!!(x), 1)
168 # define unlikely(x) __builtin_expect(!!(x), 0)
169 #endif
170
171 /* Optimization barrier */
172 #ifndef barrier
173 # define barrier() __memory_barrier()
174 #endif
175
176 #ifndef barrier_data
177 # define barrier_data(ptr) barrier()
178 #endif
179
180 /* Unreachable code */
181 #ifndef unreachable
182 # define unreachable() do { } while (1)
183 #endif
184
185 #ifndef RELOC_HIDE
186 # define RELOC_HIDE(ptr, off) \
187 ({ unsigned long __ptr; \
188 __ptr = (unsigned long) (ptr); \
189 (typeof(ptr)) (__ptr + (off)); })
190 #endif
191
192 #ifndef OPTIMIZER_HIDE_VAR
193 #define OPTIMIZER_HIDE_VAR(var) barrier()
194 #endif
195
196 /* Not-quite-unique ID. */
197 #ifndef __UNIQUE_ID
198 # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
199 #endif
200
201 #include <uapi/linux/types.h>
202
203 #define __READ_ONCE_SIZE \
204 ({ \
205 switch (size) { \
206 case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \
207 case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \
208 case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \
209 case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \
210 default: \
211 barrier(); \
212 __builtin_memcpy((void *)res, (const void *)p, size); \
213 barrier(); \
214 } \
215 })
216
217 static __always_inline
218 void __read_once_size(const volatile void *p, void *res, int size)
219 {
220 __READ_ONCE_SIZE;
221 }
222
223 #ifdef CONFIG_KASAN
224 /*
225 * This function is not 'inline' because __no_sanitize_address confilcts
226 * with inlining. Attempt to inline it may cause a build failure.
227 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
228 * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
229 */
230 static __no_sanitize_address __maybe_unused
231 void __read_once_size_nocheck(const volatile void *p, void *res, int size)
232 {
233 __READ_ONCE_SIZE;
234 }
235 #else
236 static __always_inline
237 void __read_once_size_nocheck(const volatile void *p, void *res, int size)
238 {
239 __READ_ONCE_SIZE;
240 }
241 #endif
242
243 static __always_inline void __write_once_size(volatile void *p, void *res, int size)
244 {
245 switch (size) {
246 case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
247 case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
248 case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
249 case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
250 default:
251 barrier();
252 __builtin_memcpy((void *)p, (const void *)res, size);
253 barrier();
254 }
255 }
256
257 /*
258 * Prevent the compiler from merging or refetching reads or writes. The
259 * compiler is also forbidden from reordering successive instances of
260 * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
261 * compiler is aware of some particular ordering. One way to make the
262 * compiler aware of ordering is to put the two invocations of READ_ONCE,
263 * WRITE_ONCE or ACCESS_ONCE() in different C statements.
264 *
265 * In contrast to ACCESS_ONCE these two macros will also work on aggregate
266 * data types like structs or unions. If the size of the accessed data
267 * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
268 * READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at
269 * least two memcpy()s: one for the __builtin_memcpy() and then one for
270 * the macro doing the copy of variable - '__u' allocated on the stack.
271 *
272 * Their two major use cases are: (1) Mediating communication between
273 * process-level code and irq/NMI handlers, all running on the same CPU,
274 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
275 * mutilate accesses that either do not require ordering or that interact
276 * with an explicit memory barrier or atomic instruction that provides the
277 * required ordering.
278 */
279
280 #define __READ_ONCE(x, check) \
281 ({ \
282 union { typeof(x) __val; char __c[1]; } __u; \
283 if (check) \
284 __read_once_size(&(x), __u.__c, sizeof(x)); \
285 else \
286 __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \
287 __u.__val; \
288 })
289 #define READ_ONCE(x) __READ_ONCE(x, 1)
290
291 /*
292 * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
293 * to hide memory access from KASAN.
294 */
295 #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
296
297 #define WRITE_ONCE(x, val) \
298 ({ \
299 union { typeof(x) __val; char __c[1]; } __u = \
300 { .__val = (__force typeof(x)) (val) }; \
301 __write_once_size(&(x), __u.__c, sizeof(x)); \
302 __u.__val; \
303 })
304
305 #endif /* __KERNEL__ */
306
307 #endif /* __ASSEMBLY__ */
308
309 #ifdef __KERNEL__
310 /*
311 * Allow us to mark functions as 'deprecated' and have gcc emit a nice
312 * warning for each use, in hopes of speeding the functions removal.
313 * Usage is:
314 * int __deprecated foo(void)
315 */
316 #ifndef __deprecated
317 # define __deprecated /* unimplemented */
318 #endif
319
320 #ifdef MODULE
321 #define __deprecated_for_modules __deprecated
322 #else
323 #define __deprecated_for_modules
324 #endif
325
326 #ifndef __must_check
327 #define __must_check
328 #endif
329
330 #ifndef CONFIG_ENABLE_MUST_CHECK
331 #undef __must_check
332 #define __must_check
333 #endif
334 #ifndef CONFIG_ENABLE_WARN_DEPRECATED
335 #undef __deprecated
336 #undef __deprecated_for_modules
337 #define __deprecated
338 #define __deprecated_for_modules
339 #endif
340
341 #ifndef __malloc
342 #define __malloc
343 #endif
344
345 /*
346 * Allow us to avoid 'defined but not used' warnings on functions and data,
347 * as well as force them to be emitted to the assembly file.
348 *
349 * As of gcc 3.4, static functions that are not marked with attribute((used))
350 * may be elided from the assembly file. As of gcc 3.4, static data not so
351 * marked will not be elided, but this may change in a future gcc version.
352 *
353 * NOTE: Because distributions shipped with a backported unit-at-a-time
354 * compiler in gcc 3.3, we must define __used to be __attribute__((used))
355 * for gcc >=3.3 instead of 3.4.
356 *
357 * In prior versions of gcc, such functions and data would be emitted, but
358 * would be warned about except with attribute((unused)).
359 *
360 * Mark functions that are referenced only in inline assembly as __used so
361 * the code is emitted even though it appears to be unreferenced.
362 */
363 #ifndef __used
364 # define __used /* unimplemented */
365 #endif
366
367 #ifndef __maybe_unused
368 # define __maybe_unused /* unimplemented */
369 #endif
370
371 #ifndef __always_unused
372 # define __always_unused /* unimplemented */
373 #endif
374
375 #ifndef noinline
376 #define noinline
377 #endif
378
379 /*
380 * Rather then using noinline to prevent stack consumption, use
381 * noinline_for_stack instead. For documentation reasons.
382 */
383 #define noinline_for_stack noinline
384
385 #ifndef __always_inline
386 #define __always_inline inline
387 #endif
388
389 #endif /* __KERNEL__ */
390
391 /*
392 * From the GCC manual:
393 *
394 * Many functions do not examine any values except their arguments,
395 * and have no effects except the return value. Basically this is
396 * just slightly more strict class than the `pure' attribute above,
397 * since function is not allowed to read global memory.
398 *
399 * Note that a function that has pointer arguments and examines the
400 * data pointed to must _not_ be declared `const'. Likewise, a
401 * function that calls a non-`const' function usually must not be
402 * `const'. It does not make sense for a `const' function to return
403 * `void'.
404 */
405 #ifndef __attribute_const__
406 # define __attribute_const__ /* unimplemented */
407 #endif
408
409 /*
410 * Tell gcc if a function is cold. The compiler will assume any path
411 * directly leading to the call is unlikely.
412 */
413
414 #ifndef __cold
415 #define __cold
416 #endif
417
418 /* Simple shorthand for a section definition */
419 #ifndef __section
420 # define __section(S) __attribute__ ((__section__(#S)))
421 #endif
422
423 #ifndef __visible
424 #define __visible
425 #endif
426
427 /*
428 * Assume alignment of return value.
429 */
430 #ifndef __assume_aligned
431 #define __assume_aligned(a, ...)
432 #endif
433
434
435 /* Are two types/vars the same type (ignoring qualifiers)? */
436 #ifndef __same_type
437 # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
438 #endif
439
440 /* Is this type a native word size -- useful for atomic operations */
441 #ifndef __native_word
442 # define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
443 #endif
444
445 /* Compile time object size, -1 for unknown */
446 #ifndef __compiletime_object_size
447 # define __compiletime_object_size(obj) -1
448 #endif
449 #ifndef __compiletime_warning
450 # define __compiletime_warning(message)
451 #endif
452 #ifndef __compiletime_error
453 # define __compiletime_error(message)
454 /*
455 * Sparse complains of variable sized arrays due to the temporary variable in
456 * __compiletime_assert. Unfortunately we can't just expand it out to make
457 * sparse see a constant array size without breaking compiletime_assert on old
458 * versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether.
459 */
460 # ifndef __CHECKER__
461 # define __compiletime_error_fallback(condition) \
462 do { } while (0)
463 # endif
464 #endif
465 #ifndef __compiletime_error_fallback
466 # define __compiletime_error_fallback(condition) do { } while (0)
467 #endif
468
469 #define __compiletime_assert(condition, msg, prefix, suffix) \
470 do { \
471 bool __cond = !(condition); \
472 extern void prefix ## suffix(void) __compiletime_error(msg); \
473 if (__cond) \
474 prefix ## suffix(); \
475 __compiletime_error_fallback(__cond); \
476 } while (0)
477
478 #define _compiletime_assert(condition, msg, prefix, suffix) \
479 __compiletime_assert(condition, msg, prefix, suffix)
480
481 /**
482 * compiletime_assert - break build and emit msg if condition is false
483 * @condition: a compile-time constant condition to check
484 * @msg: a message to emit if condition is false
485 *
486 * In tradition of POSIX assert, this macro will break the build if the
487 * supplied condition is *false*, emitting the supplied error message if the
488 * compiler has support to do so.
489 */
490 #define compiletime_assert(condition, msg) \
491 _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
492
493 #define compiletime_assert_atomic_type(t) \
494 compiletime_assert(__native_word(t), \
495 "Need native word sized stores/loads for atomicity.")
496
497 /*
498 * Prevent the compiler from merging or refetching accesses. The compiler
499 * is also forbidden from reordering successive instances of ACCESS_ONCE(),
500 * but only when the compiler is aware of some particular ordering. One way
501 * to make the compiler aware of ordering is to put the two invocations of
502 * ACCESS_ONCE() in different C statements.
503 *
504 * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE
505 * on a union member will work as long as the size of the member matches the
506 * size of the union and the size is smaller than word size.
507 *
508 * The major use cases of ACCESS_ONCE used to be (1) Mediating communication
509 * between process-level code and irq/NMI handlers, all running on the same CPU,
510 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
511 * mutilate accesses that either do not require ordering or that interact
512 * with an explicit memory barrier or atomic instruction that provides the
513 * required ordering.
514 *
515 * If possible use READ_ONCE()/WRITE_ONCE() instead.
516 */
517 #define __ACCESS_ONCE(x) ({ \
518 __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
519 (volatile typeof(x) *)&(x); })
520 #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
521
522 /**
523 * lockless_dereference() - safely load a pointer for later dereference
524 * @p: The pointer to load
525 *
526 * Similar to rcu_dereference(), but for situations where the pointed-to
527 * object's lifetime is managed by something other than RCU. That
528 * "something other" might be reference counting or simple immortality.
529 *
530 * The seemingly unused void * variable is to validate @p is indeed a pointer
531 * type. All pointer types silently cast to void *.
532 */
533 #define lockless_dereference(p) \
534 ({ \
535 typeof(p) _________p1 = READ_ONCE(p); \
536 __maybe_unused const void * const _________p2 = _________p1; \
537 smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
538 (_________p1); \
539 })
540
541 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
542 #ifdef CONFIG_KPROBES
543 # define __kprobes __attribute__((__section__(".kprobes.text")))
544 # define nokprobe_inline __always_inline
545 #else
546 # define __kprobes
547 # define nokprobe_inline inline
548 #endif
549 #endif /* __LINUX_COMPILER_H */ 1 #ifndef _LINUX_DMA_MAPPING_H
2 #define _LINUX_DMA_MAPPING_H
3
4 #include <linux/sizes.h>
5 #include <linux/string.h>
6 #include <linux/device.h>
7 #include <linux/err.h>
8 #include <linux/dma-debug.h>
9 #include <linux/dma-direction.h>
10 #include <linux/scatterlist.h>
11 #include <linux/kmemcheck.h>
12 #include <linux/bug.h>
13
14 /**
15 * List of possible attributes associated with a DMA mapping. The semantics
16 * of each attribute should be defined in Documentation/DMA-attributes.txt.
17 *
18 * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute
19 * forces all pending DMA writes to complete.
20 */
21 #define DMA_ATTR_WRITE_BARRIER (1UL << 0)
22 /*
23 * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
24 * may be weakly ordered, that is that reads and writes may pass each other.
25 */
26 #define DMA_ATTR_WEAK_ORDERING (1UL << 1)
27 /*
28 * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
29 * buffered to improve performance.
30 */
31 #define DMA_ATTR_WRITE_COMBINE (1UL << 2)
32 /*
33 * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either
34 * consistent or non-consistent memory as it sees fit.
35 */
36 #define DMA_ATTR_NON_CONSISTENT (1UL << 3)
37 /*
38 * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
39 * virtual mapping for the allocated buffer.
40 */
41 #define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4)
42 /*
43 * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
44 * the CPU cache for the given buffer assuming that it has been already
45 * transferred to 'device' domain.
46 */
47 #define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5)
48 /*
49 * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
50 * in physical memory.
51 */
52 #define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6)
53 /*
54 * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
55 * that it's probably not worth the time to try to allocate memory to in a way
56 * that gives better TLB efficiency.
57 */
58 #define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
59
60 /*
61 * A dma_addr_t can hold any valid DMA or bus address for the platform.
62 * It can be given to a device to use as a DMA source or target. A CPU cannot
63 * reference a dma_addr_t directly because there may be translation between
64 * its physical address space and the bus address space.
65 */
66 struct dma_map_ops {
67 void* (*alloc)(struct device *dev, size_t size,
68 dma_addr_t *dma_handle, gfp_t gfp,
69 unsigned long attrs);
70 void (*free)(struct device *dev, size_t size,
71 void *vaddr, dma_addr_t dma_handle,
72 unsigned long attrs);
73 int (*mmap)(struct device *, struct vm_area_struct *,
74 void *, dma_addr_t, size_t,
75 unsigned long attrs);
76
77 int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
78 dma_addr_t, size_t, unsigned long attrs);
79
80 dma_addr_t (*map_page)(struct device *dev, struct page *page,
81 unsigned long offset, size_t size,
82 enum dma_data_direction dir,
83 unsigned long attrs);
84 void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
85 size_t size, enum dma_data_direction dir,
86 unsigned long attrs);
87 /*
88 * map_sg returns 0 on error and a value > 0 on success.
89 * It should never return a value < 0.
90 */
91 int (*map_sg)(struct device *dev, struct scatterlist *sg,
92 int nents, enum dma_data_direction dir,
93 unsigned long attrs);
94 void (*unmap_sg)(struct device *dev,
95 struct scatterlist *sg, int nents,
96 enum dma_data_direction dir,
97 unsigned long attrs);
98 void (*sync_single_for_cpu)(struct device *dev,
99 dma_addr_t dma_handle, size_t size,
100 enum dma_data_direction dir);
101 void (*sync_single_for_device)(struct device *dev,
102 dma_addr_t dma_handle, size_t size,
103 enum dma_data_direction dir);
104 void (*sync_sg_for_cpu)(struct device *dev,
105 struct scatterlist *sg, int nents,
106 enum dma_data_direction dir);
107 void (*sync_sg_for_device)(struct device *dev,
108 struct scatterlist *sg, int nents,
109 enum dma_data_direction dir);
110 int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
111 int (*dma_supported)(struct device *dev, u64 mask);
112 int (*set_dma_mask)(struct device *dev, u64 mask);
113 #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
114 u64 (*get_required_mask)(struct device *dev);
115 #endif
116 int is_phys;
117 };
118
119 extern struct dma_map_ops dma_noop_ops;
120
121 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
122
123 #define DMA_MASK_NONE 0x0ULL
124
125 static inline int valid_dma_direction(int dma_direction)
126 {
127 return ((dma_direction == DMA_BIDIRECTIONAL) ||
128 (dma_direction == DMA_TO_DEVICE) ||
129 (dma_direction == DMA_FROM_DEVICE));
130 }
131
132 static inline int is_device_dma_capable(struct device *dev)
133 {
134 return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
135 }
136
137 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
138 /*
139 * These three functions are only for dma allocator.
140 * Don't use them in device drivers.
141 */
142 int dma_alloc_from_coherent(struct device *dev, ssize_t size,
143 dma_addr_t *dma_handle, void **ret);
144 int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
145
146 int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
147 void *cpu_addr, size_t size, int *ret);
148 #else
149 #define dma_alloc_from_coherent(dev, size, handle, ret) (0)
150 #define dma_release_from_coherent(dev, order, vaddr) (0)
151 #define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
152 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
153
154 #ifdef CONFIG_HAS_DMA
155 #include <asm/dma-mapping.h>
156 #else
157 /*
158 * Define the dma api to allow compilation but not linking of
159 * dma dependent code. Code that depends on the dma-mapping
160 * API needs to set 'depends on HAS_DMA' in its Kconfig
161 */
162 extern struct dma_map_ops bad_dma_ops;
163 static inline struct dma_map_ops *get_dma_ops(struct device *dev)
164 {
165 return &bad_dma_ops;
166 }
167 #endif
168
169 static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
170 size_t size,
171 enum dma_data_direction dir,
172 unsigned long attrs)
173 {
174 struct dma_map_ops *ops = get_dma_ops(dev);
175 dma_addr_t addr;
176
177 kmemcheck_mark_initialized(ptr, size);
178 BUG_ON(!valid_dma_direction(dir));
179 addr = ops->map_page(dev, virt_to_page(ptr),
180 offset_in_page(ptr), size,
181 dir, attrs);
182 debug_dma_map_page(dev, virt_to_page(ptr),
183 offset_in_page(ptr), size,
184 dir, addr, true);
185 return addr;
186 }
187
188 static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
189 size_t size,
190 enum dma_data_direction dir,
191 unsigned long attrs)
192 {
193 struct dma_map_ops *ops = get_dma_ops(dev);
194
195 BUG_ON(!valid_dma_direction(dir));
196 if (ops->unmap_page)
197 ops->unmap_page(dev, addr, size, dir, attrs);
198 debug_dma_unmap_page(dev, addr, size, dir, true);
199 }
200
201 /*
202 * dma_maps_sg_attrs returns 0 on error and > 0 on success.
203 * It should never return a value < 0.
204 */
205 static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
206 int nents, enum dma_data_direction dir,
207 unsigned long attrs)
208 {
209 struct dma_map_ops *ops = get_dma_ops(dev);
210 int i, ents;
211 struct scatterlist *s;
212
213 for_each_sg(sg, s, nents, i)
214 kmemcheck_mark_initialized(sg_virt(s), s->length);
215 BUG_ON(!valid_dma_direction(dir));
216 ents = ops->map_sg(dev, sg, nents, dir, attrs);
217 BUG_ON(ents < 0);
218 debug_dma_map_sg(dev, sg, nents, ents, dir);
219
220 return ents;
221 }
222
223 static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
224 int nents, enum dma_data_direction dir,
225 unsigned long attrs)
226 {
227 struct dma_map_ops *ops = get_dma_ops(dev);
228
229 BUG_ON(!valid_dma_direction(dir));
230 debug_dma_unmap_sg(dev, sg, nents, dir);
231 if (ops->unmap_sg)
232 ops->unmap_sg(dev, sg, nents, dir, attrs);
233 }
234
235 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
236 size_t offset, size_t size,
237 enum dma_data_direction dir)
238 {
239 struct dma_map_ops *ops = get_dma_ops(dev);
240 dma_addr_t addr;
241
242 kmemcheck_mark_initialized(page_address(page) + offset, size);
243 BUG_ON(!valid_dma_direction(dir));
244 addr = ops->map_page(dev, page, offset, size, dir, 0);
245 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
246
247 return addr;
248 }
249
250 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
251 size_t size, enum dma_data_direction dir)
252 {
253 struct dma_map_ops *ops = get_dma_ops(dev);
254
255 BUG_ON(!valid_dma_direction(dir));
256 if (ops->unmap_page)
257 ops->unmap_page(dev, addr, size, dir, 0);
258 debug_dma_unmap_page(dev, addr, size, dir, false);
259 }
260
261 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
262 size_t size,
263 enum dma_data_direction dir)
264 {
265 struct dma_map_ops *ops = get_dma_ops(dev);
266
267 BUG_ON(!valid_dma_direction(dir));
268 if (ops->sync_single_for_cpu)
269 ops->sync_single_for_cpu(dev, addr, size, dir);
270 debug_dma_sync_single_for_cpu(dev, addr, size, dir);
271 }
272
273 static inline void dma_sync_single_for_device(struct device *dev,
274 dma_addr_t addr, size_t size,
275 enum dma_data_direction dir)
276 {
277 struct dma_map_ops *ops = get_dma_ops(dev);
278
279 BUG_ON(!valid_dma_direction(dir));
280 if (ops->sync_single_for_device)
281 ops->sync_single_for_device(dev, addr, size, dir);
282 debug_dma_sync_single_for_device(dev, addr, size, dir);
283 }
284
285 static inline void dma_sync_single_range_for_cpu(struct device *dev,
286 dma_addr_t addr,
287 unsigned long offset,
288 size_t size,
289 enum dma_data_direction dir)
290 {
291 const struct dma_map_ops *ops = get_dma_ops(dev);
292
293 BUG_ON(!valid_dma_direction(dir));
294 if (ops->sync_single_for_cpu)
295 ops->sync_single_for_cpu(dev, addr + offset, size, dir);
296 debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
297 }
298
299 static inline void dma_sync_single_range_for_device(struct device *dev,
300 dma_addr_t addr,
301 unsigned long offset,
302 size_t size,
303 enum dma_data_direction dir)
304 {
305 const struct dma_map_ops *ops = get_dma_ops(dev);
306
307 BUG_ON(!valid_dma_direction(dir));
308 if (ops->sync_single_for_device)
309 ops->sync_single_for_device(dev, addr + offset, size, dir);
310 debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
311 }
312
313 static inline void
314 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
315 int nelems, enum dma_data_direction dir)
316 {
317 struct dma_map_ops *ops = get_dma_ops(dev);
318
319 BUG_ON(!valid_dma_direction(dir));
320 if (ops->sync_sg_for_cpu)
321 ops->sync_sg_for_cpu(dev, sg, nelems, dir);
322 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
323 }
324
325 static inline void
326 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
327 int nelems, enum dma_data_direction dir)
328 {
329 struct dma_map_ops *ops = get_dma_ops(dev);
330
331 BUG_ON(!valid_dma_direction(dir));
332 if (ops->sync_sg_for_device)
333 ops->sync_sg_for_device(dev, sg, nelems, dir);
334 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
335
336 }
337
338 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
339 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
340 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
341 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
342
343 extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
344 void *cpu_addr, dma_addr_t dma_addr, size_t size);
345
346 void *dma_common_contiguous_remap(struct page *page, size_t size,
347 unsigned long vm_flags,
348 pgprot_t prot, const void *caller);
349
350 void *dma_common_pages_remap(struct page **pages, size_t size,
351 unsigned long vm_flags, pgprot_t prot,
352 const void *caller);
353 void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
354
355 /**
356 * dma_mmap_attrs - map a coherent DMA allocation into user space
357 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
358 * @vma: vm_area_struct describing requested user mapping
359 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
360 * @handle: device-view address returned from dma_alloc_attrs
361 * @size: size of memory originally requested in dma_alloc_attrs
362 * @attrs: attributes of mapping properties requested in dma_alloc_attrs
363 *
364 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
365 * into user space. The coherent DMA buffer must not be freed by the
366 * driver until the user space mapping has been released.
367 */
368 static inline int
369 dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
370 dma_addr_t dma_addr, size_t size, unsigned long attrs)
371 {
372 struct dma_map_ops *ops = get_dma_ops(dev);
373 BUG_ON(!ops);
374 if (ops->mmap)
375 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
376 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
377 }
378
379 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
380
381 int
382 dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
383 void *cpu_addr, dma_addr_t dma_addr, size_t size);
384
385 static inline int
386 dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
387 dma_addr_t dma_addr, size_t size,
388 unsigned long attrs)
389 {
390 struct dma_map_ops *ops = get_dma_ops(dev);
391 BUG_ON(!ops);
392 if (ops->get_sgtable)
393 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
394 attrs);
395 return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
396 }
397
398 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
399
400 #ifndef arch_dma_alloc_attrs
401 #define arch_dma_alloc_attrs(dev, flag) (true)
402 #endif
403
404 static inline void *dma_alloc_attrs(struct device *dev, size_t size,
405 dma_addr_t *dma_handle, gfp_t flag,
406 unsigned long attrs)
407 {
408 struct dma_map_ops *ops = get_dma_ops(dev);
409 void *cpu_addr;
410
411 BUG_ON(!ops);
412
413 if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr))
414 return cpu_addr;
415
416 if (!arch_dma_alloc_attrs(&dev, &flag))
417 return NULL;
418 if (!ops->alloc)
419 return NULL;
420
421 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
422 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
423 return cpu_addr;
424 }
425
426 static inline void dma_free_attrs(struct device *dev, size_t size,
427 void *cpu_addr, dma_addr_t dma_handle,
428 unsigned long attrs)
429 {
430 struct dma_map_ops *ops = get_dma_ops(dev);
431
432 BUG_ON(!ops);
433 WARN_ON(irqs_disabled());
434
435 if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
436 return;
437
438 if (!ops->free || !cpu_addr)
439 return;
440
441 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
442 ops->free(dev, size, cpu_addr, dma_handle, attrs);
443 }
444
445 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
446 dma_addr_t *dma_handle, gfp_t flag)
447 {
448 return dma_alloc_attrs(dev, size, dma_handle, flag, 0);
449 }
450
451 static inline void dma_free_coherent(struct device *dev, size_t size,
452 void *cpu_addr, dma_addr_t dma_handle)
453 {
454 return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
455 }
456
457 static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
458 dma_addr_t *dma_handle, gfp_t gfp)
459 {
460 return dma_alloc_attrs(dev, size, dma_handle, gfp,
461 DMA_ATTR_NON_CONSISTENT);
462 }
463
464 static inline void dma_free_noncoherent(struct device *dev, size_t size,
465 void *cpu_addr, dma_addr_t dma_handle)
466 {
467 dma_free_attrs(dev, size, cpu_addr, dma_handle,
468 DMA_ATTR_NON_CONSISTENT);
469 }
470
471 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
472 {
473 debug_dma_mapping_error(dev, dma_addr);
474
475 if (get_dma_ops(dev)->mapping_error)
476 return get_dma_ops(dev)->mapping_error(dev, dma_addr);
477
478 #ifdef DMA_ERROR_CODE
479 return dma_addr == DMA_ERROR_CODE;
480 #else
481 return 0;
482 #endif
483 }
484
485 #ifndef HAVE_ARCH_DMA_SUPPORTED
486 static inline int dma_supported(struct device *dev, u64 mask)
487 {
488 struct dma_map_ops *ops = get_dma_ops(dev);
489
490 if (!ops)
491 return 0;
492 if (!ops->dma_supported)
493 return 1;
494 return ops->dma_supported(dev, mask);
495 }
496 #endif
497
498 #ifndef HAVE_ARCH_DMA_SET_MASK
499 static inline int dma_set_mask(struct device *dev, u64 mask)
500 {
501 struct dma_map_ops *ops = get_dma_ops(dev);
502
503 if (ops->set_dma_mask)
504 return ops->set_dma_mask(dev, mask);
505
506 if (!dev->dma_mask || !dma_supported(dev, mask))
507 return -EIO;
508 *dev->dma_mask = mask;
509 return 0;
510 }
511 #endif
512
513 static inline u64 dma_get_mask(struct device *dev)
514 {
515 if (dev && dev->dma_mask && *dev->dma_mask)
516 return *dev->dma_mask;
517 return DMA_BIT_MASK(32);
518 }
519
520 #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
521 int dma_set_coherent_mask(struct device *dev, u64 mask);
522 #else
523 static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
524 {
525 if (!dma_supported(dev, mask))
526 return -EIO;
527 dev->coherent_dma_mask = mask;
528 return 0;
529 }
530 #endif
531
532 /*
533 * Set both the DMA mask and the coherent DMA mask to the same thing.
534 * Note that we don't check the return value from dma_set_coherent_mask()
535 * as the DMA API guarantees that the coherent DMA mask can be set to
536 * the same or smaller than the streaming DMA mask.
537 */
538 static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
539 {
540 int rc = dma_set_mask(dev, mask);
541 if (rc == 0)
542 dma_set_coherent_mask(dev, mask);
543 return rc;
544 }
545
546 /*
547 * Similar to the above, except it deals with the case where the device
548 * does not have dev->dma_mask appropriately setup.
549 */
550 static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
551 {
552 dev->dma_mask = &dev->coherent_dma_mask;
553 return dma_set_mask_and_coherent(dev, mask);
554 }
555
556 extern u64 dma_get_required_mask(struct device *dev);
557
558 #ifndef arch_setup_dma_ops
559 static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
560 u64 size, const struct iommu_ops *iommu,
561 bool coherent) { }
562 #endif
563
564 #ifndef arch_teardown_dma_ops
565 static inline void arch_teardown_dma_ops(struct device *dev) { }
566 #endif
567
568 static inline unsigned int dma_get_max_seg_size(struct device *dev)
569 {
570 if (dev->dma_parms && dev->dma_parms->max_segment_size)
571 return dev->dma_parms->max_segment_size;
572 return SZ_64K;
573 }
574
575 static inline unsigned int dma_set_max_seg_size(struct device *dev,
576 unsigned int size)
577 {
578 if (dev->dma_parms) {
579 dev->dma_parms->max_segment_size = size;
580 return 0;
581 }
582 return -EIO;
583 }
584
585 static inline unsigned long dma_get_seg_boundary(struct device *dev)
586 {
587 if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
588 return dev->dma_parms->segment_boundary_mask;
589 return DMA_BIT_MASK(32);
590 }
591
592 static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
593 {
594 if (dev->dma_parms) {
595 dev->dma_parms->segment_boundary_mask = mask;
596 return 0;
597 }
598 return -EIO;
599 }
600
601 #ifndef dma_max_pfn
602 static inline unsigned long dma_max_pfn(struct device *dev)
603 {
604 return *dev->dma_mask >> PAGE_SHIFT;
605 }
606 #endif
607
608 static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
609 dma_addr_t *dma_handle, gfp_t flag)
610 {
611 void *ret = dma_alloc_coherent(dev, size, dma_handle,
612 flag | __GFP_ZERO);
613 return ret;
614 }
615
616 #ifdef CONFIG_HAS_DMA
617 static inline int dma_get_cache_alignment(void)
618 {
619 #ifdef ARCH_DMA_MINALIGN
620 return ARCH_DMA_MINALIGN;
621 #endif
622 return 1;
623 }
624 #endif
625
626 /* flags for the coherent memory api */
627 #define DMA_MEMORY_MAP 0x01
628 #define DMA_MEMORY_IO 0x02
629 #define DMA_MEMORY_INCLUDES_CHILDREN 0x04
630 #define DMA_MEMORY_EXCLUSIVE 0x08
631
632 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
633 int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
634 dma_addr_t device_addr, size_t size, int flags);
635 void dma_release_declared_memory(struct device *dev);
636 void *dma_mark_declared_memory_occupied(struct device *dev,
637 dma_addr_t device_addr, size_t size);
638 #else
639 static inline int
640 dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
641 dma_addr_t device_addr, size_t size, int flags)
642 {
643 return 0;
644 }
645
646 static inline void
647 dma_release_declared_memory(struct device *dev)
648 {
649 }
650
651 static inline void *
652 dma_mark_declared_memory_occupied(struct device *dev,
653 dma_addr_t device_addr, size_t size)
654 {
655 return ERR_PTR(-EBUSY);
656 }
657 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
658
659 /*
660 * Managed DMA API
661 */
662 extern void *dmam_alloc_coherent(struct device *dev, size_t size,
663 dma_addr_t *dma_handle, gfp_t gfp);
664 extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
665 dma_addr_t dma_handle);
666 extern void *dmam_alloc_noncoherent(struct device *dev, size_t size,
667 dma_addr_t *dma_handle, gfp_t gfp);
668 extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
669 dma_addr_t dma_handle);
670 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
671 extern int dmam_declare_coherent_memory(struct device *dev,
672 phys_addr_t phys_addr,
673 dma_addr_t device_addr, size_t size,
674 int flags);
675 extern void dmam_release_declared_memory(struct device *dev);
676 #else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
677 static inline int dmam_declare_coherent_memory(struct device *dev,
678 phys_addr_t phys_addr, dma_addr_t device_addr,
679 size_t size, gfp_t gfp)
680 {
681 return 0;
682 }
683
684 static inline void dmam_release_declared_memory(struct device *dev)
685 {
686 }
687 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
688
689 static inline void *dma_alloc_wc(struct device *dev, size_t size,
690 dma_addr_t *dma_addr, gfp_t gfp)
691 {
692 return dma_alloc_attrs(dev, size, dma_addr, gfp,
693 DMA_ATTR_WRITE_COMBINE);
694 }
695 #ifndef dma_alloc_writecombine
696 #define dma_alloc_writecombine dma_alloc_wc
697 #endif
698
699 static inline void dma_free_wc(struct device *dev, size_t size,
700 void *cpu_addr, dma_addr_t dma_addr)
701 {
702 return dma_free_attrs(dev, size, cpu_addr, dma_addr,
703 DMA_ATTR_WRITE_COMBINE);
704 }
705 #ifndef dma_free_writecombine
706 #define dma_free_writecombine dma_free_wc
707 #endif
708
709 static inline int dma_mmap_wc(struct device *dev,
710 struct vm_area_struct *vma,
711 void *cpu_addr, dma_addr_t dma_addr,
712 size_t size)
713 {
714 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
715 DMA_ATTR_WRITE_COMBINE);
716 }
717 #ifndef dma_mmap_writecombine
718 #define dma_mmap_writecombine dma_mmap_wc
719 #endif
720
721 #ifdef CONFIG_NEED_DMA_MAP_STATE
722 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
723 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
724 #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
725 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
726 #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
727 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
728 #else
729 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
730 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
731 #define dma_unmap_addr(PTR, ADDR_NAME) (0)
732 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
733 #define dma_unmap_len(PTR, LEN_NAME) (0)
734 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
735 #endif
736
737 #endif 1 #ifndef LINUX_KMEMCHECK_H
2 #define LINUX_KMEMCHECK_H
3
4 #include <linux/mm_types.h>
5 #include <linux/types.h>
6
7 #ifdef CONFIG_KMEMCHECK
8 extern int kmemcheck_enabled;
9
10 /* The slab-related functions. */
11 void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node);
12 void kmemcheck_free_shadow(struct page *page, int order);
13 void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
14 size_t size);
15 void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size);
16
17 void kmemcheck_pagealloc_alloc(struct page *p, unsigned int order,
18 gfp_t gfpflags);
19
20 void kmemcheck_show_pages(struct page *p, unsigned int n);
21 void kmemcheck_hide_pages(struct page *p, unsigned int n);
22
23 bool kmemcheck_page_is_tracked(struct page *p);
24
25 void kmemcheck_mark_unallocated(void *address, unsigned int n);
26 void kmemcheck_mark_uninitialized(void *address, unsigned int n);
27 void kmemcheck_mark_initialized(void *address, unsigned int n);
28 void kmemcheck_mark_freed(void *address, unsigned int n);
29
30 void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n);
31 void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n);
32 void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n);
33
34 int kmemcheck_show_addr(unsigned long address);
35 int kmemcheck_hide_addr(unsigned long address);
36
37 bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size);
38
39 /*
40 * Bitfield annotations
41 *
42 * How to use: If you have a struct using bitfields, for example
43 *
44 * struct a {
45 * int x:8, y:8;
46 * };
47 *
48 * then this should be rewritten as
49 *
50 * struct a {
51 * kmemcheck_bitfield_begin(flags);
52 * int x:8, y:8;
53 * kmemcheck_bitfield_end(flags);
54 * };
55 *
56 * Now the "flags_begin" and "flags_end" members may be used to refer to the
57 * beginning and end, respectively, of the bitfield (and things like
58 * &x.flags_begin is allowed). As soon as the struct is allocated, the bit-
59 * fields should be annotated:
60 *
61 * struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL);
62 * kmemcheck_annotate_bitfield(a, flags);
63 */
64 #define kmemcheck_bitfield_begin(name) \
65 int name##_begin[0];
66
67 #define kmemcheck_bitfield_end(name) \
68 int name##_end[0];
69
70 #define kmemcheck_annotate_bitfield(ptr, name) \
71 do { \
72 int _n; \
73 \
74 if (!ptr) \
75 break; \
76 \
77 _n = (long) &((ptr)->name##_end) \
78 - (long) &((ptr)->name##_begin); \
79 BUILD_BUG_ON(_n < 0); \
80 \
81 kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \
82 } while (0)
83
84 #define kmemcheck_annotate_variable(var) \
85 do { \
86 kmemcheck_mark_initialized(&(var), sizeof(var)); \
87 } while (0) \
88
89 #else
90 #define kmemcheck_enabled 0
91
92 static inline void
93 kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
94 {
95 }
96
97 static inline void
98 kmemcheck_free_shadow(struct page *page, int order)
99 {
100 }
101
102 static inline void
103 kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
104 size_t size)
105 {
106 }
107
108 static inline void kmemcheck_slab_free(struct kmem_cache *s, void *object,
109 size_t size)
110 {
111 }
112
113 static inline void kmemcheck_pagealloc_alloc(struct page *p,
114 unsigned int order, gfp_t gfpflags)
115 {
116 }
117
118 static inline bool kmemcheck_page_is_tracked(struct page *p)
119 {
120 return false;
121 }
122
123 static inline void kmemcheck_mark_unallocated(void *address, unsigned int n)
124 {
125 }
126
127 static inline void kmemcheck_mark_uninitialized(void *address, unsigned int n)
128 {
129 }
130
131 static inline void kmemcheck_mark_initialized(void *address, unsigned int n)
132 {
133 }
134
135 static inline void kmemcheck_mark_freed(void *address, unsigned int n)
136 {
137 }
138
139 static inline void kmemcheck_mark_unallocated_pages(struct page *p,
140 unsigned int n)
141 {
142 }
143
144 static inline void kmemcheck_mark_uninitialized_pages(struct page *p,
145 unsigned int n)
146 {
147 }
148
149 static inline void kmemcheck_mark_initialized_pages(struct page *p,
150 unsigned int n)
151 {
152 }
153
154 static inline bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size)
155 {
156 return true;
157 }
158
159 #define kmemcheck_bitfield_begin(name)
160 #define kmemcheck_bitfield_end(name)
161 #define kmemcheck_annotate_bitfield(ptr, name) \
162 do { \
163 } while (0)
164
165 #define kmemcheck_annotate_variable(var) \
166 do { \
167 } while (0)
168
169 #endif /* CONFIG_KMEMCHECK */
170
171 #endif /* LINUX_KMEMCHECK_H */ 1 #ifndef _LINUX_LIST_H
2 #define _LINUX_LIST_H
3
4 #include <linux/types.h>
5 #include <linux/stddef.h>
6 #include <linux/poison.h>
7 #include <linux/const.h>
8 #include <linux/kernel.h>
9
10 /*
11 * Simple doubly linked list implementation.
12 *
13 * Some of the internal functions ("__xxx") are useful when
14 * manipulating whole lists rather than single entries, as
15 * sometimes we already know the next/prev entries and we can
16 * generate better code by using them directly rather than
17 * using the generic single-entry routines.
18 */
19
20 #define LIST_HEAD_INIT(name) { &(name), &(name) }
21
22 #define LIST_HEAD(name) \
23 struct list_head name = LIST_HEAD_INIT(name)
24
25 static inline void INIT_LIST_HEAD(struct list_head *list)
26 {
27 WRITE_ONCE(list->next, list);
28 list->prev = list;
29 }
30
31 /*
32 * Insert a new entry between two known consecutive entries.
33 *
34 * This is only for internal list manipulation where we know
35 * the prev/next entries already!
36 */
37 #ifndef CONFIG_DEBUG_LIST
38 static inline void __list_add(struct list_head *new,
39 struct list_head *prev,
40 struct list_head *next)
41 {
42 next->prev = new;
43 new->next = next;
44 new->prev = prev;
45 WRITE_ONCE(prev->next, new);
46 }
47 #else
48 extern void __list_add(struct list_head *new,
49 struct list_head *prev,
50 struct list_head *next);
51 #endif
52
53 /**
54 * list_add - add a new entry
55 * @new: new entry to be added
56 * @head: list head to add it after
57 *
58 * Insert a new entry after the specified head.
59 * This is good for implementing stacks.
60 */
61 static inline void list_add(struct list_head *new, struct list_head *head)
62 {
63 __list_add(new, head, head->next);
64 }
65
66
67 /**
68 * list_add_tail - add a new entry
69 * @new: new entry to be added
70 * @head: list head to add it before
71 *
72 * Insert a new entry before the specified head.
73 * This is useful for implementing queues.
74 */
75 static inline void list_add_tail(struct list_head *new, struct list_head *head)
76 {
77 __list_add(new, head->prev, head);
78 }
79
80 /*
81 * Delete a list entry by making the prev/next entries
82 * point to each other.
83 *
84 * This is only for internal list manipulation where we know
85 * the prev/next entries already!
86 */
87 static inline void __list_del(struct list_head * prev, struct list_head * next)
88 {
89 next->prev = prev;
90 WRITE_ONCE(prev->next, next);
91 }
92
93 /**
94 * list_del - deletes entry from list.
95 * @entry: the element to delete from the list.
96 * Note: list_empty() on entry does not return true after this, the entry is
97 * in an undefined state.
98 */
99 #ifndef CONFIG_DEBUG_LIST
100 static inline void __list_del_entry(struct list_head *entry)
101 {
102 __list_del(entry->prev, entry->next);
103 }
104
105 static inline void list_del(struct list_head *entry)
106 {
107 __list_del(entry->prev, entry->next);
108 entry->next = LIST_POISON1;
109 entry->prev = LIST_POISON2;
110 }
111 #else
112 extern void __list_del_entry(struct list_head *entry);
113 extern void list_del(struct list_head *entry);
114 #endif
115
116 /**
117 * list_replace - replace old entry by new one
118 * @old : the element to be replaced
119 * @new : the new element to insert
120 *
121 * If @old was empty, it will be overwritten.
122 */
123 static inline void list_replace(struct list_head *old,
124 struct list_head *new)
125 {
126 new->next = old->next;
127 new->next->prev = new;
128 new->prev = old->prev;
129 new->prev->next = new;
130 }
131
132 static inline void list_replace_init(struct list_head *old,
133 struct list_head *new)
134 {
135 list_replace(old, new);
136 INIT_LIST_HEAD(old);
137 }
138
139 /**
140 * list_del_init - deletes entry from list and reinitialize it.
141 * @entry: the element to delete from the list.
142 */
143 static inline void list_del_init(struct list_head *entry)
144 {
145 __list_del_entry(entry);
146 INIT_LIST_HEAD(entry);
147 }
148
149 /**
150 * list_move - delete from one list and add as another's head
151 * @list: the entry to move
152 * @head: the head that will precede our entry
153 */
154 static inline void list_move(struct list_head *list, struct list_head *head)
155 {
156 __list_del_entry(list);
157 list_add(list, head);
158 }
159
160 /**
161 * list_move_tail - delete from one list and add as another's tail
162 * @list: the entry to move
163 * @head: the head that will follow our entry
164 */
165 static inline void list_move_tail(struct list_head *list,
166 struct list_head *head)
167 {
168 __list_del_entry(list);
169 list_add_tail(list, head);
170 }
171
172 /**
173 * list_is_last - tests whether @list is the last entry in list @head
174 * @list: the entry to test
175 * @head: the head of the list
176 */
177 static inline int list_is_last(const struct list_head *list,
178 const struct list_head *head)
179 {
180 return list->next == head;
181 }
182
183 /**
184 * list_empty - tests whether a list is empty
185 * @head: the list to test.
186 */
187 static inline int list_empty(const struct list_head *head)
188 {
189 return READ_ONCE(head->next) == head;
190 }
191
192 /**
193 * list_empty_careful - tests whether a list is empty and not being modified
194 * @head: the list to test
195 *
196 * Description:
197 * tests whether a list is empty _and_ checks that no other CPU might be
198 * in the process of modifying either member (next or prev)
199 *
200 * NOTE: using list_empty_careful() without synchronization
201 * can only be safe if the only activity that can happen
202 * to the list entry is list_del_init(). Eg. it cannot be used
203 * if another CPU could re-list_add() it.
204 */
205 static inline int list_empty_careful(const struct list_head *head)
206 {
207 struct list_head *next = head->next;
208 return (next == head) && (next == head->prev);
209 }
210
211 /**
212 * list_rotate_left - rotate the list to the left
213 * @head: the head of the list
214 */
215 static inline void list_rotate_left(struct list_head *head)
216 {
217 struct list_head *first;
218
219 if (!list_empty(head)) {
220 first = head->next;
221 list_move_tail(first, head);
222 }
223 }
224
225 /**
226 * list_is_singular - tests whether a list has just one entry.
227 * @head: the list to test.
228 */
229 static inline int list_is_singular(const struct list_head *head)
230 {
231 return !list_empty(head) && (head->next == head->prev);
232 }
233
234 static inline void __list_cut_position(struct list_head *list,
235 struct list_head *head, struct list_head *entry)
236 {
237 struct list_head *new_first = entry->next;
238 list->next = head->next;
239 list->next->prev = list;
240 list->prev = entry;
241 entry->next = list;
242 head->next = new_first;
243 new_first->prev = head;
244 }
245
246 /**
247 * list_cut_position - cut a list into two
248 * @list: a new list to add all removed entries
249 * @head: a list with entries
250 * @entry: an entry within head, could be the head itself
251 * and if so we won't cut the list
252 *
253 * This helper moves the initial part of @head, up to and
254 * including @entry, from @head to @list. You should
255 * pass on @entry an element you know is on @head. @list
256 * should be an empty list or a list you do not care about
257 * losing its data.
258 *
259 */
260 static inline void list_cut_position(struct list_head *list,
261 struct list_head *head, struct list_head *entry)
262 {
263 if (list_empty(head))
264 return;
265 if (list_is_singular(head) &&
266 (head->next != entry && head != entry))
267 return;
268 if (entry == head)
269 INIT_LIST_HEAD(list);
270 else
271 __list_cut_position(list, head, entry);
272 }
273
274 static inline void __list_splice(const struct list_head *list,
275 struct list_head *prev,
276 struct list_head *next)
277 {
278 struct list_head *first = list->next;
279 struct list_head *last = list->prev;
280
281 first->prev = prev;
282 prev->next = first;
283
284 last->next = next;
285 next->prev = last;
286 }
287
288 /**
289 * list_splice - join two lists, this is designed for stacks
290 * @list: the new list to add.
291 * @head: the place to add it in the first list.
292 */
293 static inline void list_splice(const struct list_head *list,
294 struct list_head *head)
295 {
296 if (!list_empty(list))
297 __list_splice(list, head, head->next);
298 }
299
300 /**
301 * list_splice_tail - join two lists, each list being a queue
302 * @list: the new list to add.
303 * @head: the place to add it in the first list.
304 */
305 static inline void list_splice_tail(struct list_head *list,
306 struct list_head *head)
307 {
308 if (!list_empty(list))
309 __list_splice(list, head->prev, head);
310 }
311
312 /**
313 * list_splice_init - join two lists and reinitialise the emptied list.
314 * @list: the new list to add.
315 * @head: the place to add it in the first list.
316 *
317 * The list at @list is reinitialised
318 */
319 static inline void list_splice_init(struct list_head *list,
320 struct list_head *head)
321 {
322 if (!list_empty(list)) {
323 __list_splice(list, head, head->next);
324 INIT_LIST_HEAD(list);
325 }
326 }
327
328 /**
329 * list_splice_tail_init - join two lists and reinitialise the emptied list
330 * @list: the new list to add.
331 * @head: the place to add it in the first list.
332 *
333 * Each of the lists is a queue.
334 * The list at @list is reinitialised
335 */
336 static inline void list_splice_tail_init(struct list_head *list,
337 struct list_head *head)
338 {
339 if (!list_empty(list)) {
340 __list_splice(list, head->prev, head);
341 INIT_LIST_HEAD(list);
342 }
343 }
344
345 /**
346 * list_entry - get the struct for this entry
347 * @ptr: the &struct list_head pointer.
348 * @type: the type of the struct this is embedded in.
349 * @member: the name of the list_head within the struct.
350 */
351 #define list_entry(ptr, type, member) \
352 container_of(ptr, type, member)
353
354 /**
355 * list_first_entry - get the first element from a list
356 * @ptr: the list head to take the element from.
357 * @type: the type of the struct this is embedded in.
358 * @member: the name of the list_head within the struct.
359 *
360 * Note, that list is expected to be not empty.
361 */
362 #define list_first_entry(ptr, type, member) \
363 list_entry((ptr)->next, type, member)
364
365 /**
366 * list_last_entry - get the last element from a list
367 * @ptr: the list head to take the element from.
368 * @type: the type of the struct this is embedded in.
369 * @member: the name of the list_head within the struct.
370 *
371 * Note, that list is expected to be not empty.
372 */
373 #define list_last_entry(ptr, type, member) \
374 list_entry((ptr)->prev, type, member)
375
376 /**
377 * list_first_entry_or_null - get the first element from a list
378 * @ptr: the list head to take the element from.
379 * @type: the type of the struct this is embedded in.
380 * @member: the name of the list_head within the struct.
381 *
382 * Note that if the list is empty, it returns NULL.
383 */
384 #define list_first_entry_or_null(ptr, type, member) \
385 (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL)
386
387 /**
388 * list_next_entry - get the next element in list
389 * @pos: the type * to cursor
390 * @member: the name of the list_head within the struct.
391 */
392 #define list_next_entry(pos, member) \
393 list_entry((pos)->member.next, typeof(*(pos)), member)
394
395 /**
396 * list_prev_entry - get the prev element in list
397 * @pos: the type * to cursor
398 * @member: the name of the list_head within the struct.
399 */
400 #define list_prev_entry(pos, member) \
401 list_entry((pos)->member.prev, typeof(*(pos)), member)
402
403 /**
404 * list_for_each - iterate over a list
405 * @pos: the &struct list_head to use as a loop cursor.
406 * @head: the head for your list.
407 */
408 #define list_for_each(pos, head) \
409 for (pos = (head)->next; pos != (head); pos = pos->next)
410
411 /**
412 * list_for_each_prev - iterate over a list backwards
413 * @pos: the &struct list_head to use as a loop cursor.
414 * @head: the head for your list.
415 */
416 #define list_for_each_prev(pos, head) \
417 for (pos = (head)->prev; pos != (head); pos = pos->prev)
418
419 /**
420 * list_for_each_safe - iterate over a list safe against removal of list entry
421 * @pos: the &struct list_head to use as a loop cursor.
422 * @n: another &struct list_head to use as temporary storage
423 * @head: the head for your list.
424 */
425 #define list_for_each_safe(pos, n, head) \
426 for (pos = (head)->next, n = pos->next; pos != (head); \
427 pos = n, n = pos->next)
428
429 /**
430 * list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry
431 * @pos: the &struct list_head to use as a loop cursor.
432 * @n: another &struct list_head to use as temporary storage
433 * @head: the head for your list.
434 */
435 #define list_for_each_prev_safe(pos, n, head) \
436 for (pos = (head)->prev, n = pos->prev; \
437 pos != (head); \
438 pos = n, n = pos->prev)
439
440 /**
441 * list_for_each_entry - iterate over list of given type
442 * @pos: the type * to use as a loop cursor.
443 * @head: the head for your list.
444 * @member: the name of the list_head within the struct.
445 */
446 #define list_for_each_entry(pos, head, member) \
447 for (pos = list_first_entry(head, typeof(*pos), member); \
448 &pos->member != (head); \
449 pos = list_next_entry(pos, member))
450
451 /**
452 * list_for_each_entry_reverse - iterate backwards over list of given type.
453 * @pos: the type * to use as a loop cursor.
454 * @head: the head for your list.
455 * @member: the name of the list_head within the struct.
456 */
457 #define list_for_each_entry_reverse(pos, head, member) \
458 for (pos = list_last_entry(head, typeof(*pos), member); \
459 &pos->member != (head); \
460 pos = list_prev_entry(pos, member))
461
462 /**
463 * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue()
464 * @pos: the type * to use as a start point
465 * @head: the head of the list
466 * @member: the name of the list_head within the struct.
467 *
468 * Prepares a pos entry for use as a start point in list_for_each_entry_continue().
469 */
470 #define list_prepare_entry(pos, head, member) \
471 ((pos) ? : list_entry(head, typeof(*pos), member))
472
473 /**
474 * list_for_each_entry_continue - continue iteration over list of given type
475 * @pos: the type * to use as a loop cursor.
476 * @head: the head for your list.
477 * @member: the name of the list_head within the struct.
478 *
479 * Continue to iterate over list of given type, continuing after
480 * the current position.
481 */
482 #define list_for_each_entry_continue(pos, head, member) \
483 for (pos = list_next_entry(pos, member); \
484 &pos->member != (head); \
485 pos = list_next_entry(pos, member))
486
487 /**
488 * list_for_each_entry_continue_reverse - iterate backwards from the given point
489 * @pos: the type * to use as a loop cursor.
490 * @head: the head for your list.
491 * @member: the name of the list_head within the struct.
492 *
493 * Start to iterate over list of given type backwards, continuing after
494 * the current position.
495 */
496 #define list_for_each_entry_continue_reverse(pos, head, member) \
497 for (pos = list_prev_entry(pos, member); \
498 &pos->member != (head); \
499 pos = list_prev_entry(pos, member))
500
501 /**
502 * list_for_each_entry_from - iterate over list of given type from the current point
503 * @pos: the type * to use as a loop cursor.
504 * @head: the head for your list.
505 * @member: the name of the list_head within the struct.
506 *
507 * Iterate over list of given type, continuing from current position.
508 */
509 #define list_for_each_entry_from(pos, head, member) \
510 for (; &pos->member != (head); \
511 pos = list_next_entry(pos, member))
512
513 /**
514 * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
515 * @pos: the type * to use as a loop cursor.
516 * @n: another type * to use as temporary storage
517 * @head: the head for your list.
518 * @member: the name of the list_head within the struct.
519 */
520 #define list_for_each_entry_safe(pos, n, head, member) \
521 for (pos = list_first_entry(head, typeof(*pos), member), \
522 n = list_next_entry(pos, member); \
523 &pos->member != (head); \
524 pos = n, n = list_next_entry(n, member))
525
526 /**
527 * list_for_each_entry_safe_continue - continue list iteration safe against removal
528 * @pos: the type * to use as a loop cursor.
529 * @n: another type * to use as temporary storage
530 * @head: the head for your list.
531 * @member: the name of the list_head within the struct.
532 *
533 * Iterate over list of given type, continuing after current point,
534 * safe against removal of list entry.
535 */
536 #define list_for_each_entry_safe_continue(pos, n, head, member) \
537 for (pos = list_next_entry(pos, member), \
538 n = list_next_entry(pos, member); \
539 &pos->member != (head); \
540 pos = n, n = list_next_entry(n, member))
541
542 /**
543 * list_for_each_entry_safe_from - iterate over list from current point safe against removal
544 * @pos: the type * to use as a loop cursor.
545 * @n: another type * to use as temporary storage
546 * @head: the head for your list.
547 * @member: the name of the list_head within the struct.
548 *
549 * Iterate over list of given type from current point, safe against
550 * removal of list entry.
551 */
552 #define list_for_each_entry_safe_from(pos, n, head, member) \
553 for (n = list_next_entry(pos, member); \
554 &pos->member != (head); \
555 pos = n, n = list_next_entry(n, member))
556
557 /**
558 * list_for_each_entry_safe_reverse - iterate backwards over list safe against removal
559 * @pos: the type * to use as a loop cursor.
560 * @n: another type * to use as temporary storage
561 * @head: the head for your list.
562 * @member: the name of the list_head within the struct.
563 *
564 * Iterate backwards over list of given type, safe against removal
565 * of list entry.
566 */
567 #define list_for_each_entry_safe_reverse(pos, n, head, member) \
568 for (pos = list_last_entry(head, typeof(*pos), member), \
569 n = list_prev_entry(pos, member); \
570 &pos->member != (head); \
571 pos = n, n = list_prev_entry(n, member))
572
573 /**
574 * list_safe_reset_next - reset a stale list_for_each_entry_safe loop
575 * @pos: the loop cursor used in the list_for_each_entry_safe loop
576 * @n: temporary storage used in list_for_each_entry_safe
577 * @member: the name of the list_head within the struct.
578 *
579 * list_safe_reset_next is not safe to use in general if the list may be
580 * modified concurrently (eg. the lock is dropped in the loop body). An
581 * exception to this is if the cursor element (pos) is pinned in the list,
582 * and list_safe_reset_next is called after re-taking the lock and before
583 * completing the current iteration of the loop body.
584 */
585 #define list_safe_reset_next(pos, n, member) \
586 n = list_next_entry(pos, member)
587
588 /*
589 * Double linked lists with a single pointer list head.
590 * Mostly useful for hash tables where the two pointer list head is
591 * too wasteful.
592 * You lose the ability to access the tail in O(1).
593 */
594
595 #define HLIST_HEAD_INIT { .first = NULL }
596 #define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
597 #define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
598 static inline void INIT_HLIST_NODE(struct hlist_node *h)
599 {
600 h->next = NULL;
601 h->pprev = NULL;
602 }
603
604 static inline int hlist_unhashed(const struct hlist_node *h)
605 {
606 return !h->pprev;
607 }
608
609 static inline int hlist_empty(const struct hlist_head *h)
610 {
611 return !READ_ONCE(h->first);
612 }
613
614 static inline void __hlist_del(struct hlist_node *n)
615 {
616 struct hlist_node *next = n->next;
617 struct hlist_node **pprev = n->pprev;
618
619 WRITE_ONCE(*pprev, next);
620 if (next)
621 next->pprev = pprev;
622 }
623
624 static inline void hlist_del(struct hlist_node *n)
625 {
626 __hlist_del(n);
627 n->next = LIST_POISON1;
628 n->pprev = LIST_POISON2;
629 }
630
631 static inline void hlist_del_init(struct hlist_node *n)
632 {
633 if (!hlist_unhashed(n)) {
634 __hlist_del(n);
635 INIT_HLIST_NODE(n);
636 }
637 }
638
639 static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
640 {
641 struct hlist_node *first = h->first;
642 n->next = first;
643 if (first)
644 first->pprev = &n->next;
645 WRITE_ONCE(h->first, n);
646 n->pprev = &h->first;
647 }
648
649 /* next must be != NULL */
650 static inline void hlist_add_before(struct hlist_node *n,
651 struct hlist_node *next)
652 {
653 n->pprev = next->pprev;
654 n->next = next;
655 next->pprev = &n->next;
656 WRITE_ONCE(*(n->pprev), n);
657 }
658
659 static inline void hlist_add_behind(struct hlist_node *n,
660 struct hlist_node *prev)
661 {
662 n->next = prev->next;
663 WRITE_ONCE(prev->next, n);
664 n->pprev = &prev->next;
665
666 if (n->next)
667 n->next->pprev = &n->next;
668 }
669
670 /* after that we'll appear to be on some hlist and hlist_del will work */
671 static inline void hlist_add_fake(struct hlist_node *n)
672 {
673 n->pprev = &n->next;
674 }
675
676 static inline bool hlist_fake(struct hlist_node *h)
677 {
678 return h->pprev == &h->next;
679 }
680
681 /*
682 * Check whether the node is the only node of the head without
683 * accessing head:
684 */
685 static inline bool
686 hlist_is_singular_node(struct hlist_node *n, struct hlist_head *h)
687 {
688 return !n->next && n->pprev == &h->first;
689 }
690
691 /*
692 * Move a list from one list head to another. Fixup the pprev
693 * reference of the first entry if it exists.
694 */
695 static inline void hlist_move_list(struct hlist_head *old,
696 struct hlist_head *new)
697 {
698 new->first = old->first;
699 if (new->first)
700 new->first->pprev = &new->first;
701 old->first = NULL;
702 }
703
704 #define hlist_entry(ptr, type, member) container_of(ptr,type,member)
705
706 #define hlist_for_each(pos, head) \
707 for (pos = (head)->first; pos ; pos = pos->next)
708
709 #define hlist_for_each_safe(pos, n, head) \
710 for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
711 pos = n)
712
713 #define hlist_entry_safe(ptr, type, member) \
714 ({ typeof(ptr) ____ptr = (ptr); \
715 ____ptr ? hlist_entry(____ptr, type, member) : NULL; \
716 })
717
718 /**
719 * hlist_for_each_entry - iterate over list of given type
720 * @pos: the type * to use as a loop cursor.
721 * @head: the head for your list.
722 * @member: the name of the hlist_node within the struct.
723 */
724 #define hlist_for_each_entry(pos, head, member) \
725 for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member);\
726 pos; \
727 pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
728
729 /**
730 * hlist_for_each_entry_continue - iterate over a hlist continuing after current point
731 * @pos: the type * to use as a loop cursor.
732 * @member: the name of the hlist_node within the struct.
733 */
734 #define hlist_for_each_entry_continue(pos, member) \
735 for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\
736 pos; \
737 pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
738
739 /**
740 * hlist_for_each_entry_from - iterate over a hlist continuing from current point
741 * @pos: the type * to use as a loop cursor.
742 * @member: the name of the hlist_node within the struct.
743 */
744 #define hlist_for_each_entry_from(pos, member) \
745 for (; pos; \
746 pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
747
748 /**
749 * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
750 * @pos: the type * to use as a loop cursor.
751 * @n: another &struct hlist_node to use as temporary storage
752 * @head: the head for your list.
753 * @member: the name of the hlist_node within the struct.
754 */
755 #define hlist_for_each_entry_safe(pos, n, head, member) \
756 for (pos = hlist_entry_safe((head)->first, typeof(*pos), member);\
757 pos && ({ n = pos->member.next; 1; }); \
758 pos = hlist_entry_safe(n, typeof(*pos), member))
759
760 #endif 1 /*
2 * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
3 *
4 * (C) SGI 2006, Christoph Lameter
5 * Cleaned up and restructured to ease the addition of alternative
6 * implementations of SLAB allocators.
7 * (C) Linux Foundation 2008-2013
8 * Unified interface for all slab allocators
9 */
10
11 #ifndef _LINUX_SLAB_H
12 #define _LINUX_SLAB_H
13
14 #include <linux/gfp.h>
15 #include <linux/types.h>
16 #include <linux/workqueue.h>
17
18
19 /*
20 * Flags to pass to kmem_cache_create().
21 * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
22 */
23 #define SLAB_CONSISTENCY_CHECKS 0x00000100UL /* DEBUG: Perform (expensive) checks on alloc/free */
24 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
25 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
26 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
27 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
28 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
29 #define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */
30 /*
31 * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS!
32 *
33 * This delays freeing the SLAB page by a grace period, it does _NOT_
34 * delay object freeing. This means that if you do kmem_cache_free()
35 * that memory location is free to be reused at any time. Thus it may
36 * be possible to see another object there in the same RCU grace period.
37 *
38 * This feature only ensures the memory location backing the object
39 * stays valid, the trick to using this is relying on an independent
40 * object validation pass. Something like:
41 *
42 * rcu_read_lock()
43 * again:
44 * obj = lockless_lookup(key);
45 * if (obj) {
46 * if (!try_get_ref(obj)) // might fail for free objects
47 * goto again;
48 *
49 * if (obj->key != key) { // not the object we expected
50 * put_ref(obj);
51 * goto again;
52 * }
53 * }
54 * rcu_read_unlock();
55 *
56 * This is useful if we need to approach a kernel structure obliquely,
57 * from its address obtained without the usual locking. We can lock
58 * the structure to stabilize it and check it's still at the given address,
59 * only if we can be sure that the memory has not been meanwhile reused
60 * for some other kind of object (which our subsystem's lock might corrupt).
61 *
62 * rcu_read_lock before reading the address, then rcu_read_unlock after
63 * taking the spinlock within the structure expected at that address.
64 */
65 #define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
66 #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
67 #define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */
68
69 /* Flag to prevent checks on free */
70 #ifdef CONFIG_DEBUG_OBJECTS
71 # define SLAB_DEBUG_OBJECTS 0x00400000UL
72 #else
73 # define SLAB_DEBUG_OBJECTS 0x00000000UL
74 #endif
75
76 #define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */
77
78 /* Don't track use of uninitialized memory */
79 #ifdef CONFIG_KMEMCHECK
80 # define SLAB_NOTRACK 0x01000000UL
81 #else
82 # define SLAB_NOTRACK 0x00000000UL
83 #endif
84 #ifdef CONFIG_FAILSLAB
85 # define SLAB_FAILSLAB 0x02000000UL /* Fault injection mark */
86 #else
87 # define SLAB_FAILSLAB 0x00000000UL
88 #endif
89 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
90 # define SLAB_ACCOUNT 0x04000000UL /* Account to memcg */
91 #else
92 # define SLAB_ACCOUNT 0x00000000UL
93 #endif
94
95 #ifdef CONFIG_KASAN
96 #define SLAB_KASAN 0x08000000UL
97 #else
98 #define SLAB_KASAN 0x00000000UL
99 #endif
100
101 /* The following flags affect the page allocator grouping pages by mobility */
102 #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
103 #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
104 /*
105 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
106 *
107 * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
108 *
109 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
110 * Both make kfree a no-op.
111 */
112 #define ZERO_SIZE_PTR ((void *)16)
113
114 #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
115 (unsigned long)ZERO_SIZE_PTR)
116
117 #include <linux/kmemleak.h>
118 #include <linux/kasan.h>
119
120 struct mem_cgroup;
121 /*
122 * struct kmem_cache related prototypes
123 */
124 void __init kmem_cache_init(void);
125 bool slab_is_available(void);
126
127 struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
128 unsigned long,
129 void (*)(void *));
130 void kmem_cache_destroy(struct kmem_cache *);
131 int kmem_cache_shrink(struct kmem_cache *);
132
133 void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *);
134 void memcg_deactivate_kmem_caches(struct mem_cgroup *);
135 void memcg_destroy_kmem_caches(struct mem_cgroup *);
136
137 /*
138 * Please use this macro to create slab caches. Simply specify the
139 * name of the structure and maybe some flags that are listed above.
140 *
141 * The alignment of the struct determines object alignment. If you
142 * f.e. add ____cacheline_aligned_in_smp to the struct declaration
143 * then the objects will be properly aligned in SMP configurations.
144 */
145 #define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\
146 sizeof(struct __struct), __alignof__(struct __struct),\
147 (__flags), NULL)
148
149 /*
150 * Common kmalloc functions provided by all allocators
151 */
152 void * __must_check __krealloc(const void *, size_t, gfp_t);
153 void * __must_check krealloc(const void *, size_t, gfp_t);
154 void kfree(const void *);
155 void kzfree(const void *);
156 size_t ksize(const void *);
157
158 /*
159 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
160 * alignment larger than the alignment of a 64-bit integer.
161 * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
162 */
163 #if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
164 #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
165 #define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
166 #define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
167 #else
168 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
169 #endif
170
171 /*
172 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
173 * Intended for arches that get misalignment faults even for 64 bit integer
174 * aligned buffers.
175 */
176 #ifndef ARCH_SLAB_MINALIGN
177 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
178 #endif
179
180 /*
181 * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned
182 * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN
183 * aligned pointers.
184 */
185 #define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
186 #define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
187 #define __assume_page_alignment __assume_aligned(PAGE_SIZE)
188
189 /*
190 * Kmalloc array related definitions
191 */
192
193 #ifdef CONFIG_SLAB
194 /*
195 * The largest kmalloc size supported by the SLAB allocators is
196 * 32 megabyte (2^25) or the maximum allocatable page order if that is
197 * less than 32 MB.
198 *
199 * WARNING: Its not easy to increase this value since the allocators have
200 * to do various tricks to work around compiler limitations in order to
201 * ensure proper constant folding.
202 */
203 #define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
204 (MAX_ORDER + PAGE_SHIFT - 1) : 25)
205 #define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH
206 #ifndef KMALLOC_SHIFT_LOW
207 #define KMALLOC_SHIFT_LOW 5
208 #endif
209 #endif
210
211 #ifdef CONFIG_SLUB
212 /*
213 * SLUB directly allocates requests fitting in to an order-1 page
214 * (PAGE_SIZE*2). Larger requests are passed to the page allocator.
215 */
216 #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
217 #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
218 #ifndef KMALLOC_SHIFT_LOW
219 #define KMALLOC_SHIFT_LOW 3
220 #endif
221 #endif
222
223 #ifdef CONFIG_SLOB
224 /*
225 * SLOB passes all requests larger than one page to the page allocator.
226 * No kmalloc array is necessary since objects of different sizes can
227 * be allocated from the same page.
228 */
229 #define KMALLOC_SHIFT_HIGH PAGE_SHIFT
230 #define KMALLOC_SHIFT_MAX 30
231 #ifndef KMALLOC_SHIFT_LOW
232 #define KMALLOC_SHIFT_LOW 3
233 #endif
234 #endif
235
236 /* Maximum allocatable size */
237 #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
238 /* Maximum size for which we actually use a slab cache */
239 #define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH)
240 /* Maximum order allocatable via the slab allocagtor */
241 #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
242
243 /*
244 * Kmalloc subsystem.
245 */
246 #ifndef KMALLOC_MIN_SIZE
247 #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
248 #endif
249
250 /*
251 * This restriction comes from byte sized index implementation.
252 * Page size is normally 2^12 bytes and, in this case, if we want to use
253 * byte sized index which can represent 2^8 entries, the size of the object
254 * should be equal or greater to 2^12 / 2^8 = 2^4 = 16.
255 * If minimum size of kmalloc is less than 16, we use it as minimum object
256 * size and give up to use byte sized index.
257 */
258 #define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \
259 (KMALLOC_MIN_SIZE) : 16)
260
261 #ifndef CONFIG_SLOB
262 extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
263 #ifdef CONFIG_ZONE_DMA
264 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
265 #endif
266
267 /*
268 * Figure out which kmalloc slab an allocation of a certain size
269 * belongs to.
270 * 0 = zero alloc
271 * 1 = 65 .. 96 bytes
272 * 2 = 129 .. 192 bytes
273 * n = 2^(n-1)+1 .. 2^n
274 */
275 static __always_inline int kmalloc_index(size_t size)
276 {
277 if (!size)
278 return 0;
279
280 if (size <= KMALLOC_MIN_SIZE)
281 return KMALLOC_SHIFT_LOW;
282
283 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
284 return 1;
285 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
286 return 2;
287 if (size <= 8) return 3;
288 if (size <= 16) return 4;
289 if (size <= 32) return 5;
290 if (size <= 64) return 6;
291 if (size <= 128) return 7;
292 if (size <= 256) return 8;
293 if (size <= 512) return 9;
294 if (size <= 1024) return 10;
295 if (size <= 2 * 1024) return 11;
296 if (size <= 4 * 1024) return 12;
297 if (size <= 8 * 1024) return 13;
298 if (size <= 16 * 1024) return 14;
299 if (size <= 32 * 1024) return 15;
300 if (size <= 64 * 1024) return 16;
301 if (size <= 128 * 1024) return 17;
302 if (size <= 256 * 1024) return 18;
303 if (size <= 512 * 1024) return 19;
304 if (size <= 1024 * 1024) return 20;
305 if (size <= 2 * 1024 * 1024) return 21;
306 if (size <= 4 * 1024 * 1024) return 22;
307 if (size <= 8 * 1024 * 1024) return 23;
308 if (size <= 16 * 1024 * 1024) return 24;
309 if (size <= 32 * 1024 * 1024) return 25;
310 if (size <= 64 * 1024 * 1024) return 26;
311 BUG();
312
313 /* Will never be reached. Needed because the compiler may complain */
314 return -1;
315 }
316 #endif /* !CONFIG_SLOB */
317
318 void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
319 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc;
320 void kmem_cache_free(struct kmem_cache *, void *);
321
322 /*
323 * Bulk allocation and freeing operations. These are accelerated in an
324 * allocator specific way to avoid taking locks repeatedly or building
325 * metadata structures unnecessarily.
326 *
327 * Note that interrupts must be enabled when calling these functions.
328 */
329 void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
330 int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
331
332 /*
333 * Caller must not use kfree_bulk() on memory not originally allocated
334 * by kmalloc(), because the SLOB allocator cannot handle this.
335 */
336 static __always_inline void kfree_bulk(size_t size, void **p)
337 {
338 kmem_cache_free_bulk(NULL, size, p);
339 }
340
341 #ifdef CONFIG_NUMA
342 void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
343 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc;
344 #else
345 static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
346 {
347 return __kmalloc(size, flags);
348 }
349
350 static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
351 {
352 return kmem_cache_alloc(s, flags);
353 }
354 #endif
355
356 #ifdef CONFIG_TRACING
357 extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc;
358
359 #ifdef CONFIG_NUMA
360 extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
361 gfp_t gfpflags,
362 int node, size_t size) __assume_slab_alignment __malloc;
363 #else
364 static __always_inline void *
365 kmem_cache_alloc_node_trace(struct kmem_cache *s,
366 gfp_t gfpflags,
367 int node, size_t size)
368 {
369 return kmem_cache_alloc_trace(s, gfpflags, size);
370 }
371 #endif /* CONFIG_NUMA */
372
373 #else /* CONFIG_TRACING */
374 static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
375 gfp_t flags, size_t size)
376 {
377 void *ret = kmem_cache_alloc(s, flags);
378
379 kasan_kmalloc(s, ret, size, flags);
380 return ret;
381 }
382
383 static __always_inline void *
384 kmem_cache_alloc_node_trace(struct kmem_cache *s,
385 gfp_t gfpflags,
386 int node, size_t size)
387 {
388 void *ret = kmem_cache_alloc_node(s, gfpflags, node);
389
390 kasan_kmalloc(s, ret, size, gfpflags);
391 return ret;
392 }
393 #endif /* CONFIG_TRACING */
394
395 extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
396
397 #ifdef CONFIG_TRACING
398 extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
399 #else
400 static __always_inline void *
401 kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
402 {
403 return kmalloc_order(size, flags, order);
404 }
405 #endif
406
407 static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
408 {
409 unsigned int order = get_order(size);
410 return kmalloc_order_trace(size, flags, order);
411 }
412
413 /**
414 * kmalloc - allocate memory
415 * @size: how many bytes of memory are required.
416 * @flags: the type of memory to allocate.
417 *
418 * kmalloc is the normal method of allocating memory
419 * for objects smaller than page size in the kernel.
420 *
421 * The @flags argument may be one of:
422 *
423 * %GFP_USER - Allocate memory on behalf of user. May sleep.
424 *
425 * %GFP_KERNEL - Allocate normal kernel ram. May sleep.
426 *
427 * %GFP_ATOMIC - Allocation will not sleep. May use emergency pools.
428 * For example, use this inside interrupt handlers.
429 *
430 * %GFP_HIGHUSER - Allocate pages from high memory.
431 *
432 * %GFP_NOIO - Do not do any I/O at all while trying to get memory.
433 *
434 * %GFP_NOFS - Do not make any fs calls while trying to get memory.
435 *
436 * %GFP_NOWAIT - Allocation will not sleep.
437 *
438 * %__GFP_THISNODE - Allocate node-local memory only.
439 *
440 * %GFP_DMA - Allocation suitable for DMA.
441 * Should only be used for kmalloc() caches. Otherwise, use a
442 * slab created with SLAB_DMA.
443 *
444 * Also it is possible to set different flags by OR'ing
445 * in one or more of the following additional @flags:
446 *
447 * %__GFP_COLD - Request cache-cold pages instead of
448 * trying to return cache-warm pages.
449 *
450 * %__GFP_HIGH - This allocation has high priority and may use emergency pools.
451 *
452 * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
453 * (think twice before using).
454 *
455 * %__GFP_NORETRY - If memory is not immediately available,
456 * then give up at once.
457 *
458 * %__GFP_NOWARN - If allocation fails, don't issue any warnings.
459 *
460 * %__GFP_REPEAT - If allocation fails initially, try once more before failing.
461 *
462 * There are other flags available as well, but these are not intended
463 * for general use, and so are not documented here. For a full list of
464 * potential flags, always refer to linux/gfp.h.
465 */
466 static __always_inline void *kmalloc(size_t size, gfp_t flags)
467 {
468 if (__builtin_constant_p(size)) {
469 if (size > KMALLOC_MAX_CACHE_SIZE)
470 return kmalloc_large(size, flags);
471 #ifndef CONFIG_SLOB
472 if (!(flags & GFP_DMA)) {
473 int index = kmalloc_index(size);
474
475 if (!index)
476 return ZERO_SIZE_PTR;
477
478 return kmem_cache_alloc_trace(kmalloc_caches[index],
479 flags, size);
480 }
481 #endif
482 }
483 return __kmalloc(size, flags);
484 }
485
486 /*
487 * Determine size used for the nth kmalloc cache.
488 * return size or 0 if a kmalloc cache for that
489 * size does not exist
490 */
491 static __always_inline int kmalloc_size(int n)
492 {
493 #ifndef CONFIG_SLOB
494 if (n > 2)
495 return 1 << n;
496
497 if (n == 1 && KMALLOC_MIN_SIZE <= 32)
498 return 96;
499
500 if (n == 2 && KMALLOC_MIN_SIZE <= 64)
501 return 192;
502 #endif
503 return 0;
504 }
505
506 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
507 {
508 #ifndef CONFIG_SLOB
509 if (__builtin_constant_p(size) &&
510 size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
511 int i = kmalloc_index(size);
512
513 if (!i)
514 return ZERO_SIZE_PTR;
515
516 return kmem_cache_alloc_node_trace(kmalloc_caches[i],
517 flags, node, size);
518 }
519 #endif
520 return __kmalloc_node(size, flags, node);
521 }
522
523 struct memcg_cache_array {
524 struct rcu_head rcu;
525 struct kmem_cache *entries[0];
526 };
527
528 /*
529 * This is the main placeholder for memcg-related information in kmem caches.
530 * Both the root cache and the child caches will have it. For the root cache,
531 * this will hold a dynamically allocated array large enough to hold
532 * information about the currently limited memcgs in the system. To allow the
533 * array to be accessed without taking any locks, on relocation we free the old
534 * version only after a grace period.
535 *
536 * Child caches will hold extra metadata needed for its operation. Fields are:
537 *
538 * @memcg: pointer to the memcg this cache belongs to
539 * @root_cache: pointer to the global, root cache, this cache was derived from
540 *
541 * Both root and child caches of the same kind are linked into a list chained
542 * through @list.
543 */
544 struct memcg_cache_params {
545 bool is_root_cache;
546 struct list_head list;
547 union {
548 struct memcg_cache_array __rcu *memcg_caches;
549 struct {
550 struct mem_cgroup *memcg;
551 struct kmem_cache *root_cache;
552 };
553 };
554 };
555
556 int memcg_update_all_caches(int num_memcgs);
557
558 /**
559 * kmalloc_array - allocate memory for an array.
560 * @n: number of elements.
561 * @size: element size.
562 * @flags: the type of memory to allocate (see kmalloc).
563 */
564 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
565 {
566 if (size != 0 && n > SIZE_MAX / size)
567 return NULL;
568 if (__builtin_constant_p(n) && __builtin_constant_p(size))
569 return kmalloc(n * size, flags);
570 return __kmalloc(n * size, flags);
571 }
572
573 /**
574 * kcalloc - allocate memory for an array. The memory is set to zero.
575 * @n: number of elements.
576 * @size: element size.
577 * @flags: the type of memory to allocate (see kmalloc).
578 */
579 static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
580 {
581 return kmalloc_array(n, size, flags | __GFP_ZERO);
582 }
583
584 /*
585 * kmalloc_track_caller is a special version of kmalloc that records the
586 * calling function of the routine calling it for slab leak tracking instead
587 * of just the calling function (confusing, eh?).
588 * It's useful when the call to kmalloc comes from a widely-used standard
589 * allocator where we care about the real place the memory allocation
590 * request comes from.
591 */
592 extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
593 #define kmalloc_track_caller(size, flags) \
594 __kmalloc_track_caller(size, flags, _RET_IP_)
595
596 #ifdef CONFIG_NUMA
597 extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
598 #define kmalloc_node_track_caller(size, flags, node) \
599 __kmalloc_node_track_caller(size, flags, node, \
600 _RET_IP_)
601
602 #else /* CONFIG_NUMA */
603
604 #define kmalloc_node_track_caller(size, flags, node) \
605 kmalloc_track_caller(size, flags)
606
607 #endif /* CONFIG_NUMA */
608
609 /*
610 * Shortcuts
611 */
612 static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
613 {
614 return kmem_cache_alloc(k, flags | __GFP_ZERO);
615 }
616
617 /**
618 * kzalloc - allocate memory. The memory is set to zero.
619 * @size: how many bytes of memory are required.
620 * @flags: the type of memory to allocate (see kmalloc).
621 */
622 static inline void *kzalloc(size_t size, gfp_t flags)
623 {
624 return kmalloc(size, flags | __GFP_ZERO);
625 }
626
627 /**
628 * kzalloc_node - allocate zeroed memory from a particular memory node.
629 * @size: how many bytes of memory are required.
630 * @flags: the type of memory to allocate (see kmalloc).
631 * @node: memory node from which to allocate
632 */
633 static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
634 {
635 return kmalloc_node(size, flags | __GFP_ZERO, node);
636 }
637
638 unsigned int kmem_cache_size(struct kmem_cache *s);
639 void __init kmem_cache_init_late(void);
640
641 #endif /* _LINUX_SLAB_H */ 1 #ifndef __LINUX_SPINLOCK_H
2 #define __LINUX_SPINLOCK_H
3
4 /*
5 * include/linux/spinlock.h - generic spinlock/rwlock declarations
6 *
7 * here's the role of the various spinlock/rwlock related include files:
8 *
9 * on SMP builds:
10 *
11 * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
12 * initializers
13 *
14 * linux/spinlock_types.h:
15 * defines the generic type and initializers
16 *
17 * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel
18 * implementations, mostly inline assembly code
19 *
20 * (also included on UP-debug builds:)
21 *
22 * linux/spinlock_api_smp.h:
23 * contains the prototypes for the _spin_*() APIs.
24 *
25 * linux/spinlock.h: builds the final spin_*() APIs.
26 *
27 * on UP builds:
28 *
29 * linux/spinlock_type_up.h:
30 * contains the generic, simplified UP spinlock type.
31 * (which is an empty structure on non-debug builds)
32 *
33 * linux/spinlock_types.h:
34 * defines the generic type and initializers
35 *
36 * linux/spinlock_up.h:
37 * contains the arch_spin_*()/etc. version of UP
38 * builds. (which are NOPs on non-debug, non-preempt
39 * builds)
40 *
41 * (included on UP-non-debug builds:)
42 *
43 * linux/spinlock_api_up.h:
44 * builds the _spin_*() APIs.
45 *
46 * linux/spinlock.h: builds the final spin_*() APIs.
47 */
48
49 #include <linux/typecheck.h>
50 #include <linux/preempt.h>
51 #include <linux/linkage.h>
52 #include <linux/compiler.h>
53 #include <linux/irqflags.h>
54 #include <linux/thread_info.h>
55 #include <linux/kernel.h>
56 #include <linux/stringify.h>
57 #include <linux/bottom_half.h>
58 #include <asm/barrier.h>
59
60
61 /*
62 * Must define these before including other files, inline functions need them
63 */
64 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
65
66 #define LOCK_SECTION_START(extra) \
67 ".subsection 1\n\t" \
68 extra \
69 ".ifndef " LOCK_SECTION_NAME "\n\t" \
70 LOCK_SECTION_NAME ":\n\t" \
71 ".endif\n"
72
73 #define LOCK_SECTION_END \
74 ".previous\n\t"
75
76 #define __lockfunc __attribute__((section(".spinlock.text")))
77
78 /*
79 * Pull the arch_spinlock_t and arch_rwlock_t definitions:
80 */
81 #include <linux/spinlock_types.h>
82
83 /*
84 * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
85 */
86 #ifdef CONFIG_SMP
87 # include <asm/spinlock.h>
88 #else
89 # include <linux/spinlock_up.h>
90 #endif
91
92 #ifdef CONFIG_DEBUG_SPINLOCK
93 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
94 struct lock_class_key *key);
95 # define raw_spin_lock_init(lock) \
96 do { \
97 static struct lock_class_key __key; \
98 \
99 __raw_spin_lock_init((lock), #lock, &__key); \
100 } while (0)
101
102 #else
103 # define raw_spin_lock_init(lock) \
104 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
105 #endif
106
107 #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
108
109 #ifdef CONFIG_GENERIC_LOCKBREAK
110 #define raw_spin_is_contended(lock) ((lock)->break_lock)
111 #else
112
113 #ifdef arch_spin_is_contended
114 #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
115 #else
116 #define raw_spin_is_contended(lock) (((void)(lock), 0))
117 #endif /*arch_spin_is_contended*/
118 #endif
119
120 /*
121 * Despite its name it doesn't necessarily has to be a full barrier.
122 * It should only guarantee that a STORE before the critical section
123 * can not be reordered with LOADs and STOREs inside this section.
124 * spin_lock() is the one-way barrier, this LOAD can not escape out
125 * of the region. So the default implementation simply ensures that
126 * a STORE can not move into the critical section, smp_wmb() should
127 * serialize it with another STORE done by spin_lock().
128 */
129 #ifndef smp_mb__before_spinlock
130 #define smp_mb__before_spinlock() smp_wmb()
131 #endif
132
133 /**
134 * raw_spin_unlock_wait - wait until the spinlock gets unlocked
135 * @lock: the spinlock in question.
136 */
137 #define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock)
138
139 #ifdef CONFIG_DEBUG_SPINLOCK
140 extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
141 #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
142 extern int do_raw_spin_trylock(raw_spinlock_t *lock);
143 extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
144 #else
145 static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
146 {
147 __acquire(lock);
148 arch_spin_lock(&lock->raw_lock);
149 }
150
151 static inline void
152 do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
153 {
154 __acquire(lock);
155 arch_spin_lock_flags(&lock->raw_lock, *flags);
156 }
157
158 static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
159 {
160 return arch_spin_trylock(&(lock)->raw_lock);
161 }
162
163 static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
164 {
165 arch_spin_unlock(&lock->raw_lock);
166 __release(lock);
167 }
168 #endif
169
170 /*
171 * Define the various spin_lock methods. Note we define these
172 * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
173 * various methods are defined as nops in the case they are not
174 * required.
175 */
176 #define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
177
178 #define raw_spin_lock(lock) _raw_spin_lock(lock)
179
180 #ifdef CONFIG_DEBUG_LOCK_ALLOC
181 # define raw_spin_lock_nested(lock, subclass) \
182 _raw_spin_lock_nested(lock, subclass)
183 # define raw_spin_lock_bh_nested(lock, subclass) \
184 _raw_spin_lock_bh_nested(lock, subclass)
185
186 # define raw_spin_lock_nest_lock(lock, nest_lock) \
187 do { \
188 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
189 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
190 } while (0)
191 #else
192 /*
193 * Always evaluate the 'subclass' argument to avoid that the compiler
194 * warns about set-but-not-used variables when building with
195 * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
196 */
197 # define raw_spin_lock_nested(lock, subclass) \
198 _raw_spin_lock(((void)(subclass), (lock)))
199 # define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
200 # define raw_spin_lock_bh_nested(lock, subclass) _raw_spin_lock_bh(lock)
201 #endif
202
203 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
204
205 #define raw_spin_lock_irqsave(lock, flags) \
206 do { \
207 typecheck(unsigned long, flags); \
208 flags = _raw_spin_lock_irqsave(lock); \
209 } while (0)
210
211 #ifdef CONFIG_DEBUG_LOCK_ALLOC
212 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
213 do { \
214 typecheck(unsigned long, flags); \
215 flags = _raw_spin_lock_irqsave_nested(lock, subclass); \
216 } while (0)
217 #else
218 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
219 do { \
220 typecheck(unsigned long, flags); \
221 flags = _raw_spin_lock_irqsave(lock); \
222 } while (0)
223 #endif
224
225 #else
226
227 #define raw_spin_lock_irqsave(lock, flags) \
228 do { \
229 typecheck(unsigned long, flags); \
230 _raw_spin_lock_irqsave(lock, flags); \
231 } while (0)
232
233 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
234 raw_spin_lock_irqsave(lock, flags)
235
236 #endif
237
238 #define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
239 #define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock)
240 #define raw_spin_unlock(lock) _raw_spin_unlock(lock)
241 #define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
242
243 #define raw_spin_unlock_irqrestore(lock, flags) \
244 do { \
245 typecheck(unsigned long, flags); \
246 _raw_spin_unlock_irqrestore(lock, flags); \
247 } while (0)
248 #define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
249
250 #define raw_spin_trylock_bh(lock) \
251 __cond_lock(lock, _raw_spin_trylock_bh(lock))
252
253 #define raw_spin_trylock_irq(lock) \
254 ({ \
255 local_irq_disable(); \
256 raw_spin_trylock(lock) ? \
257 1 : ({ local_irq_enable(); 0; }); \
258 })
259
260 #define raw_spin_trylock_irqsave(lock, flags) \
261 ({ \
262 local_irq_save(flags); \
263 raw_spin_trylock(lock) ? \
264 1 : ({ local_irq_restore(flags); 0; }); \
265 })
266
267 /**
268 * raw_spin_can_lock - would raw_spin_trylock() succeed?
269 * @lock: the spinlock in question.
270 */
271 #define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
272
273 /* Include rwlock functions */
274 #include <linux/rwlock.h>
275
276 /*
277 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
278 */
279 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
280 # include <linux/spinlock_api_smp.h>
281 #else
282 # include <linux/spinlock_api_up.h>
283 #endif
284
285 /*
286 * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
287 */
288
289 static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
290 {
291 return &lock->rlock;
292 }
293
294 #define spin_lock_init(_lock) \
295 do { \
296 spinlock_check(_lock); \
297 raw_spin_lock_init(&(_lock)->rlock); \
298 } while (0)
299
300 static __always_inline void spin_lock(spinlock_t *lock)
301 {
302 raw_spin_lock(&lock->rlock);
303 }
304
305 static __always_inline void spin_lock_bh(spinlock_t *lock)
306 {
307 raw_spin_lock_bh(&lock->rlock);
308 }
309
310 static __always_inline int spin_trylock(spinlock_t *lock)
311 {
312 return raw_spin_trylock(&lock->rlock);
313 }
314
315 #define spin_lock_nested(lock, subclass) \
316 do { \
317 raw_spin_lock_nested(spinlock_check(lock), subclass); \
318 } while (0)
319
320 #define spin_lock_bh_nested(lock, subclass) \
321 do { \
322 raw_spin_lock_bh_nested(spinlock_check(lock), subclass);\
323 } while (0)
324
325 #define spin_lock_nest_lock(lock, nest_lock) \
326 do { \
327 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
328 } while (0)
329
330 static __always_inline void spin_lock_irq(spinlock_t *lock)
331 {
332 raw_spin_lock_irq(&lock->rlock);
333 }
334
335 #define spin_lock_irqsave(lock, flags) \
336 do { \
337 raw_spin_lock_irqsave(spinlock_check(lock), flags); \
338 } while (0)
339
340 #define spin_lock_irqsave_nested(lock, flags, subclass) \
341 do { \
342 raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
343 } while (0)
344
345 static __always_inline void spin_unlock(spinlock_t *lock)
346 {
347 raw_spin_unlock(&lock->rlock);
348 }
349
350 static __always_inline void spin_unlock_bh(spinlock_t *lock)
351 {
352 raw_spin_unlock_bh(&lock->rlock);
353 }
354
355 static __always_inline void spin_unlock_irq(spinlock_t *lock)
356 {
357 raw_spin_unlock_irq(&lock->rlock);
358 }
359
360 static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
361 {
362 raw_spin_unlock_irqrestore(&lock->rlock, flags);
363 }
364
365 static __always_inline int spin_trylock_bh(spinlock_t *lock)
366 {
367 return raw_spin_trylock_bh(&lock->rlock);
368 }
369
370 static __always_inline int spin_trylock_irq(spinlock_t *lock)
371 {
372 return raw_spin_trylock_irq(&lock->rlock);
373 }
374
375 #define spin_trylock_irqsave(lock, flags) \
376 ({ \
377 raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
378 })
379
380 static __always_inline void spin_unlock_wait(spinlock_t *lock)
381 {
382 raw_spin_unlock_wait(&lock->rlock);
383 }
384
385 static __always_inline int spin_is_locked(spinlock_t *lock)
386 {
387 return raw_spin_is_locked(&lock->rlock);
388 }
389
390 static __always_inline int spin_is_contended(spinlock_t *lock)
391 {
392 return raw_spin_is_contended(&lock->rlock);
393 }
394
395 static __always_inline int spin_can_lock(spinlock_t *lock)
396 {
397 return raw_spin_can_lock(&lock->rlock);
398 }
399
400 #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
401
402 /*
403 * Pull the atomic_t declaration:
404 * (asm-mips/atomic.h needs above definitions)
405 */
406 #include <linux/atomic.h>
407 /**
408 * atomic_dec_and_lock - lock on reaching reference count zero
409 * @atomic: the atomic counter
410 * @lock: the spinlock in question
411 *
412 * Decrements @atomic by 1. If the result is 0, returns true and locks
413 * @lock. Returns false for all other cases.
414 */
415 extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
416 #define atomic_dec_and_lock(atomic, lock) \
417 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
418
419 #endif /* __LINUX_SPINLOCK_H */ |
Here is an explanation of a rule violation arisen while checking your driver against a corresponding kernel.
Note that it may be false positive, i.e. there isn't a real error indeed. Please analyze a given error trace and related source code to understand whether there is an error in your driver.
Error trace column contains a path on which the given rule is violated. You can expand/collapse some entity classes by clicking on corresponding checkboxes in a main menu or in an advanced Others menu. Also you can expand/collapse each particular entity by clicking on +/-. In hovering on some entities you can see some tips. Also the error trace is bound with related source code. Line numbers may be shown as links on the left. You can click on them to open corresponding lines in source code.
Source code column contains a content of files related with the error trace. There is source code of your driver (note that there are some LDV modifications at the end), kernel headers and rule model. Tabs show a currently opened file and other available files. In hovering on them you can see full file names. On clicking a corresponding file content will be shown.
Ядро | Модуль | Правило | Верификатор | Вердикт | Статус | Время создания | Описание проблемы |
linux-4.8-rc1.tar.xz | drivers/usb/gadget/udc/mv_u3d_core.ko | 331_1a | CPAchecker | Bug | Fixed | 2016-10-29 02:14:15 | L0254 |
Комментарий
Reported: 29 Oct 2016
[В начало]