 Bug
 Bug
        
                          [В начало]
Ошибка # 90
Показать/спрятать трассу ошибок|       Error trace     
         {    20     typedef unsigned char __u8;    23     typedef unsigned short __u16;    25     typedef int __s32;    26     typedef unsigned int __u32;    30     typedef unsigned long long __u64;    15     typedef signed char s8;    16     typedef unsigned char u8;    19     typedef unsigned short u16;    21     typedef int s32;    22     typedef unsigned int u32;    24     typedef long long s64;    25     typedef unsigned long long u64;    14     typedef long __kernel_long_t;    15     typedef unsigned long __kernel_ulong_t;    27     typedef int __kernel_pid_t;    48     typedef unsigned int __kernel_uid32_t;    49     typedef unsigned int __kernel_gid32_t;    71     typedef __kernel_ulong_t __kernel_size_t;    72     typedef __kernel_long_t __kernel_ssize_t;    87     typedef long long __kernel_loff_t;    88     typedef __kernel_long_t __kernel_time_t;    89     typedef __kernel_long_t __kernel_clock_t;    90     typedef int __kernel_timer_t;    91     typedef int __kernel_clockid_t;    32     typedef __u16 __le16;    12     typedef __u32 __kernel_dev_t;    15     typedef __kernel_dev_t dev_t;    18     typedef unsigned short umode_t;    21     typedef __kernel_pid_t pid_t;    26     typedef __kernel_clockid_t clockid_t;    29     typedef _Bool bool;    31     typedef __kernel_uid32_t uid_t;    32     typedef __kernel_gid32_t gid_t;    45     typedef __kernel_loff_t loff_t;    54     typedef __kernel_size_t size_t;    59     typedef __kernel_ssize_t ssize_t;    69     typedef __kernel_time_t time_t;   102     typedef __s32 int32_t;   108     typedef __u32 uint32_t;   133     typedef unsigned long sector_t;   134     typedef unsigned long blkcnt_t;   144     typedef u64 dma_addr_t;   149     typedef unsigned int gfp_t;   150     typedef unsigned int fmode_t;   151     typedef unsigned int oom_flags_t;   154     typedef u64 phys_addr_t;   159     typedef phys_addr_t resource_size_t;   169     struct __anonstruct_atomic_t_6 {   int counter; } ;   169     typedef struct __anonstruct_atomic_t_6 atomic_t;   174     struct __anonstruct_atomic64_t_7 {   long counter; } ;   174     typedef struct __anonstruct_atomic64_t_7 atomic64_t;   175     struct list_head {   struct list_head *next;   struct list_head *prev; } ;   180     struct hlist_node ;   180     struct hlist_head {   struct hlist_node *first; } ;   184     struct hlist_node {   struct hlist_node *next;   struct hlist_node **pprev; } ;   195     struct callback_head {   struct callback_head *next;   void (*func)(struct callback_head *); } ;    39     struct page ;    31     struct kernel_symbol {   unsigned long value;   const char *name; } ;    33     struct module ;   237     struct pt_regs {   unsigned long r15;   unsigned long r14;   unsigned long r13;   unsigned long r12;   unsigned long bp;   unsigned long bx;   unsigned long r11;   unsigned long r10;   unsigned long r9;   unsigned long r8;   unsigned long ax;   unsigned long cx;   unsigned long dx;   unsigned long si;   unsigned long di;   unsigned long orig_ax;   unsigned long ip;   unsigned long cs;   unsigned long flags;   unsigned long sp;   unsigned long ss; } ;    66     struct __anonstruct____missing_field_name_9 {   unsigned int a;   unsigned int b; } ;    66     struct __anonstruct____missing_field_name_10 {   u16 limit0;   u16 base0;   unsigned char base1;   unsigned char type;   unsigned char s;   unsigned char dpl;   unsigned char p;   unsigned char limit;   unsigned char avl;   unsigned char l;   unsigned char d;   unsigned char g;   unsigned char base2; } ;    66     union __anonunion____missing_field_name_8 {   struct __anonstruct____missing_field_name_9 __annonCompField4;   struct __anonstruct____missing_field_name_10 __annonCompField5; } ;    66     struct desc_struct {   union __anonunion____missing_field_name_8 __annonCompField6; } ;    12     typedef unsigned long pteval_t;    15     typedef unsigned long pgdval_t;    16     typedef unsigned long pgprotval_t;    18     struct __anonstruct_pte_t_11 {   pteval_t pte; } ;    18     typedef struct __anonstruct_pte_t_11 pte_t;    20     struct pgprot {   pgprotval_t pgprot; } ;   218     typedef struct pgprot pgprot_t;   220     struct __anonstruct_pgd_t_12 {   pgdval_t pgd; } ;   220     typedef struct __anonstruct_pgd_t_12 pgd_t;   361     typedef struct page *pgtable_t;   369     struct file ;   382     struct seq_file ;   420     struct thread_struct ;   422     struct mm_struct ;   423     struct task_struct ;   424     struct cpumask ;   327     struct arch_spinlock ;    18     typedef u16 __ticket_t;    19     typedef u32 __ticketpair_t;    20     struct __raw_tickets {   __ticket_t head;   __ticket_t tail; } ;    32     union __anonunion____missing_field_name_15 {   __ticketpair_t head_tail;   struct __raw_tickets tickets; } ;    32     struct arch_spinlock {   union __anonunion____missing_field_name_15 __annonCompField7; } ;    33     typedef struct arch_spinlock arch_spinlock_t;    34     struct qrwlock {   atomic_t cnts;   arch_spinlock_t lock; } ;    14     typedef struct qrwlock arch_rwlock_t;    63     struct pci_dev ;   142     typedef void (*ctor_fn_t)();   232     struct _ddebug {   const char *modname;   const char *function;   const char *filename;   const char *format;   unsigned int lineno;   unsigned char flags; } ;    48     struct device ;   418     struct file_operations ;   430     struct completion ;   555     struct bug_entry {   int bug_addr_disp;   int file_disp;   unsigned short line;   unsigned short flags; } ;   102     struct timespec ;   103     struct compat_timespec ;   104     struct __anonstruct_futex_17 {   u32 *uaddr;   u32 val;   u32 flags;   u32 bitset;   u64 time;   u32 *uaddr2; } ;   104     struct __anonstruct_nanosleep_18 {   clockid_t clockid;   struct timespec *rmtp;   struct compat_timespec *compat_rmtp;   u64 expires; } ;   104     struct pollfd ;   104     struct __anonstruct_poll_19 {   struct pollfd *ufds;   int nfds;   int has_timeout;   unsigned long tv_sec;   unsigned long tv_nsec; } ;   104     union __anonunion____missing_field_name_16 {   struct __anonstruct_futex_17 futex;   struct __anonstruct_nanosleep_18 nanosleep;   struct __anonstruct_poll_19 poll; } ;   104     struct restart_block {   long int (*fn)(struct restart_block *);   union __anonunion____missing_field_name_16 __annonCompField8; } ;   127     struct kernel_vm86_regs {   struct pt_regs pt;   unsigned short es;   unsigned short __esh;   unsigned short ds;   unsigned short __dsh;   unsigned short fs;   unsigned short __fsh;   unsigned short gs;   unsigned short __gsh; } ;    79     union __anonunion____missing_field_name_20 {   struct pt_regs *regs;   struct kernel_vm86_regs *vm86; } ;    79     struct math_emu_info {   long ___orig_eip;   union __anonunion____missing_field_name_20 __annonCompField9; } ;   328     struct cpumask {   unsigned long bits[128U]; } ;    15     typedef struct cpumask cpumask_t;   654     typedef struct cpumask *cpumask_var_t;   164     struct seq_operations ;   315     struct i387_fsave_struct {   u32 cwd;   u32 swd;   u32 twd;   u32 fip;   u32 fcs;   u32 foo;   u32 fos;   u32 st_space[20U];   u32 status; } ;   333     struct __anonstruct____missing_field_name_30 {   u64 rip;   u64 rdp; } ;   333     struct __anonstruct____missing_field_name_31 {   u32 fip;   u32 fcs;   u32 foo;   u32 fos; } ;   333     union __anonunion____missing_field_name_29 {   struct __anonstruct____missing_field_name_30 __annonCompField13;   struct __anonstruct____missing_field_name_31 __annonCompField14; } ;   333     union __anonunion____missing_field_name_32 {   u32 padding1[12U];   u32 sw_reserved[12U]; } ;   333     struct i387_fxsave_struct {   u16 cwd;   u16 swd;   u16 twd;   u16 fop;   union __anonunion____missing_field_name_29 __annonCompField15;   u32 mxcsr;   u32 mxcsr_mask;   u32 st_space[32U];   u32 xmm_space[64U];   u32 padding[12U];   union __anonunion____missing_field_name_32 __annonCompField16; } ;   367     struct i387_soft_struct {   u32 cwd;   u32 swd;   u32 twd;   u32 fip;   u32 fcs;   u32 foo;   u32 fos;   u32 st_space[20U];   u8 ftop;   u8 changed;   u8 lookahead;   u8 no_update;   u8 rm;   u8 alimit;   struct math_emu_info *info;   u32 entry_eip; } ;   388     struct ymmh_struct {   u32 ymmh_space[64U]; } ;   393     struct lwp_struct {   u8 reserved[128U]; } ;   398     struct bndreg {   u64 lower_bound;   u64 upper_bound; } ;   403     struct bndcsr {   u64 bndcfgu;   u64 bndstatus; } ;   408     struct xsave_hdr_struct {   u64 xstate_bv;   u64 xcomp_bv;   u64 reserved[6U]; } ;   414     struct xsave_struct {   struct i387_fxsave_struct i387;   struct xsave_hdr_struct xsave_hdr;   struct ymmh_struct ymmh;   struct lwp_struct lwp;   struct bndreg bndreg[4U];   struct bndcsr bndcsr; } ;   423     union thread_xstate {   struct i387_fsave_struct fsave;   struct i387_fxsave_struct fxsave;   struct i387_soft_struct soft;   struct xsave_struct xsave; } ;   431     struct fpu {   unsigned int last_cpu;   unsigned int has_fpu;   union thread_xstate *state; } ;   487     struct kmem_cache ;   488     struct perf_event ;   489     struct thread_struct {   struct desc_struct tls_array[3U];   unsigned long sp0;   unsigned long sp;   unsigned short es;   unsigned short ds;   unsigned short fsindex;   unsigned short gsindex;   unsigned long fs;   unsigned long gs;   struct perf_event *ptrace_bps[4U];   unsigned long debugreg6;   unsigned long ptrace_dr7;   unsigned long cr2;   unsigned long trap_nr;   unsigned long error_code;   struct fpu fpu;   unsigned long *io_bitmap_ptr;   unsigned long iopl;   unsigned int io_bitmap_max;   unsigned char fpu_counter; } ;    23     typedef atomic64_t atomic_long_t;    34     struct lockdep_map ;    55     struct stack_trace {   unsigned int nr_entries;   unsigned int max_entries;   unsigned long *entries;   int skip; } ;    28     struct lockdep_subclass_key {   char __one_byte; } ;    53     struct lock_class_key {   struct lockdep_subclass_key subkeys[8U]; } ;    59     struct lock_class {   struct list_head hash_entry;   struct list_head lock_entry;   struct lockdep_subclass_key *key;   unsigned int subclass;   unsigned int dep_gen_id;   unsigned long usage_mask;   struct stack_trace usage_traces[13U];   struct list_head locks_after;   struct list_head locks_before;   unsigned int version;   unsigned long ops;   const char *name;   int name_version;   unsigned long contention_point[4U];   unsigned long contending_point[4U]; } ;   144     struct lockdep_map {   struct lock_class_key *key;   struct lock_class *class_cache[2U];   const char *name;   int cpu;   unsigned long ip; } ;   205     struct held_lock {   u64 prev_chain_key;   unsigned long acquire_ip;   struct lockdep_map *instance;   struct lockdep_map *nest_lock;   u64 waittime_stamp;   u64 holdtime_stamp;   unsigned short class_idx;   unsigned char irq_context;   unsigned char trylock;   unsigned char read;   unsigned char check;   unsigned char hardirqs_off;   unsigned short references; } ;   536     struct raw_spinlock {   arch_spinlock_t raw_lock;   unsigned int magic;   unsigned int owner_cpu;   void *owner;   struct lockdep_map dep_map; } ;    32     typedef struct raw_spinlock raw_spinlock_t;    33     struct __anonstruct____missing_field_name_36 {   u8 __padding[24U];   struct lockdep_map dep_map; } ;    33     union __anonunion____missing_field_name_35 {   struct raw_spinlock rlock;   struct __anonstruct____missing_field_name_36 __annonCompField18; } ;    33     struct spinlock {   union __anonunion____missing_field_name_35 __annonCompField19; } ;    76     typedef struct spinlock spinlock_t;    23     struct __anonstruct_rwlock_t_37 {   arch_rwlock_t raw_lock;   unsigned int magic;   unsigned int owner_cpu;   void *owner;   struct lockdep_map dep_map; } ;    23     typedef struct __anonstruct_rwlock_t_37 rwlock_t;   426     struct rb_node {   unsigned long __rb_parent_color;   struct rb_node *rb_right;   struct rb_node *rb_left; } ;    40     struct rb_root {   struct rb_node *rb_node; } ;    87     struct vm_area_struct ;    38     typedef int Set;   135     struct seqcount {   unsigned int sequence;   struct lockdep_map dep_map; } ;    51     typedef struct seqcount seqcount_t;   284     struct __anonstruct_seqlock_t_78 {   struct seqcount seqcount;   spinlock_t lock; } ;   284     typedef struct __anonstruct_seqlock_t_78 seqlock_t;   478     struct timespec {   __kernel_time_t tv_sec;   long tv_nsec; } ;    83     struct user_namespace ;    22     struct __anonstruct_kuid_t_79 {   uid_t val; } ;    22     typedef struct __anonstruct_kuid_t_79 kuid_t;    27     struct __anonstruct_kgid_t_80 {   gid_t val; } ;    27     typedef struct __anonstruct_kgid_t_80 kgid_t;   139     struct kstat {   u64 ino;   dev_t dev;   umode_t mode;   unsigned int nlink;   kuid_t uid;   kgid_t gid;   dev_t rdev;   loff_t size;   struct timespec atime;   struct timespec mtime;   struct timespec ctime;   unsigned long blksize;   unsigned long long blocks; } ;    38     struct __wait_queue_head {   spinlock_t lock;   struct list_head task_list; } ;    43     typedef struct __wait_queue_head wait_queue_head_t;    95     struct __anonstruct_nodemask_t_81 {   unsigned long bits[16U]; } ;    95     typedef struct __anonstruct_nodemask_t_81 nodemask_t;    13     struct optimistic_spin_queue {   atomic_t tail; } ;    34     struct mutex {   atomic_t count;   spinlock_t wait_lock;   struct list_head wait_list;   struct task_struct *owner;   void *magic;   struct lockdep_map dep_map; } ;    67     struct mutex_waiter {   struct list_head list;   struct task_struct *task;   void *magic; } ;   177     struct rw_semaphore ;   178     struct rw_semaphore {   long count;   struct list_head wait_list;   raw_spinlock_t wait_lock;   struct optimistic_spin_queue osq;   struct task_struct *owner;   struct lockdep_map dep_map; } ;   172     struct completion {   unsigned int done;   wait_queue_head_t wait; } ;   311     union ktime {   s64 tv64; } ;    41     typedef union ktime ktime_t;   273     struct tvec_base ;   274     struct timer_list {   struct list_head entry;   unsigned long expires;   struct tvec_base *base;   void (*function)(unsigned long);   unsigned long data;   int slack;   int start_pid;   void *start_site;   char start_comm[16U];   struct lockdep_map lockdep_map; } ;   254     struct hrtimer ;   255     enum hrtimer_restart ;   266     struct workqueue_struct ;   267     struct work_struct ;    54     struct work_struct {   atomic_long_t data;   struct list_head entry;   void (*func)(struct work_struct *);   struct lockdep_map lockdep_map; } ;   107     struct delayed_work {   struct work_struct work;   struct timer_list timer;   struct workqueue_struct *wq;   int cpu; } ;    64     struct resource {   resource_size_t start;   resource_size_t end;   const char *name;   unsigned long flags;   struct resource *parent;   struct resource *sibling;   struct resource *child; } ;    58     struct pm_message {   int event; } ;    64     typedef struct pm_message pm_message_t;    65     struct dev_pm_ops {   int (*prepare)(struct device *);   void (*complete)(struct device *);   int (*suspend)(struct device *);   int (*resume)(struct device *);   int (*freeze)(struct device *);   int (*thaw)(struct device *);   int (*poweroff)(struct device *);   int (*restore)(struct device *);   int (*suspend_late)(struct device *);   int (*resume_early)(struct device *);   int (*freeze_late)(struct device *);   int (*thaw_early)(struct device *);   int (*poweroff_late)(struct device *);   int (*restore_early)(struct device *);   int (*suspend_noirq)(struct device *);   int (*resume_noirq)(struct device *);   int (*freeze_noirq)(struct device *);   int (*thaw_noirq)(struct device *);   int (*poweroff_noirq)(struct device *);   int (*restore_noirq)(struct device *);   int (*runtime_suspend)(struct device *);   int (*runtime_resume)(struct device *);   int (*runtime_idle)(struct device *); } ;   320     enum rpm_status {   RPM_ACTIVE = 0,   RPM_RESUMING = 1,   RPM_SUSPENDED = 2,   RPM_SUSPENDING = 3 } ;   327     enum rpm_request {   RPM_REQ_NONE = 0,   RPM_REQ_IDLE = 1,   RPM_REQ_SUSPEND = 2,   RPM_REQ_AUTOSUSPEND = 3,   RPM_REQ_RESUME = 4 } ;   335     struct wakeup_source ;   337     struct pm_subsys_data {   spinlock_t lock;   unsigned int refcount;   struct list_head clock_list; } ;   540     struct dev_pm_qos ;   540     struct dev_pm_info {   pm_message_t power_state;   unsigned char can_wakeup;   unsigned char async_suspend;   bool is_prepared;   bool is_suspended;   bool is_noirq_suspended;   bool is_late_suspended;   bool ignore_children;   bool early_init;   bool direct_complete;   spinlock_t lock;   struct list_head entry;   struct completion completion;   struct wakeup_source *wakeup;   bool wakeup_path;   bool syscore;   struct timer_list suspend_timer;   unsigned long timer_expires;   struct work_struct work;   wait_queue_head_t wait_queue;   atomic_t usage_count;   atomic_t child_count;   unsigned char disable_depth;   unsigned char idle_notification;   unsigned char request_pending;   unsigned char deferred_resume;   unsigned char run_wake;   unsigned char runtime_auto;   unsigned char no_callbacks;   unsigned char irq_safe;   unsigned char use_autosuspend;   unsigned char timer_autosuspends;   unsigned char memalloc_noio;   enum rpm_request request;   enum rpm_status runtime_status;   int runtime_error;   int autosuspend_delay;   unsigned long last_busy;   unsigned long active_jiffies;   unsigned long suspended_jiffies;   unsigned long accounting_timestamp;   struct pm_subsys_data *subsys_data;   void (*set_latency_tolerance)(struct device *, s32 );   struct dev_pm_qos *qos; } ;   601     struct dev_pm_domain {   struct dev_pm_ops ops;   void (*detach)(struct device *, bool );   int (*activate)(struct device *);   void (*sync)(struct device *);   void (*dismiss)(struct device *); } ;   133     struct pci_bus ;    24     struct __anonstruct_mm_context_t_146 {   void *ldt;   int size;   unsigned short ia32_compat;   struct mutex lock;   void *vdso;   atomic_t perf_rdpmc_allowed; } ;    24     typedef struct __anonstruct_mm_context_t_146 mm_context_t;   177     struct device_node ;  1268     struct llist_node ;    64     struct llist_node {   struct llist_node *next; } ;   837     struct nsproxy ;    37     struct cred ;    19     struct inode ;    58     struct arch_uprobe_task {   unsigned long saved_scratch_register;   unsigned int saved_trap_nr;   unsigned int saved_tf; } ;    66     enum uprobe_task_state {   UTASK_RUNNING = 0,   UTASK_SSTEP = 1,   UTASK_SSTEP_ACK = 2,   UTASK_SSTEP_TRAPPED = 3 } ;    73     struct __anonstruct____missing_field_name_159 {   struct arch_uprobe_task autask;   unsigned long vaddr; } ;    73     struct __anonstruct____missing_field_name_160 {   struct callback_head dup_xol_work;   unsigned long dup_xol_addr; } ;    73     union __anonunion____missing_field_name_158 {   struct __anonstruct____missing_field_name_159 __annonCompField34;   struct __anonstruct____missing_field_name_160 __annonCompField35; } ;    73     struct uprobe ;    73     struct return_instance ;    73     struct uprobe_task {   enum uprobe_task_state state;   union __anonunion____missing_field_name_158 __annonCompField36;   struct uprobe *active_uprobe;   unsigned long xol_vaddr;   struct return_instance *return_instances;   unsigned int depth; } ;    94     struct xol_area ;    95     struct uprobes_state {   struct xol_area *xol_area; } ;   133     struct address_space ;   134     struct mem_cgroup ;    31     typedef void compound_page_dtor(struct page *);    32     union __anonunion____missing_field_name_161 {   struct address_space *mapping;   void *s_mem; } ;    32     union __anonunion____missing_field_name_163 {   unsigned long index;   void *freelist;   bool pfmemalloc; } ;    32     struct __anonstruct____missing_field_name_167 {   unsigned short inuse;   unsigned short objects;   unsigned char frozen; } ;    32     union __anonunion____missing_field_name_166 {   atomic_t _mapcount;   struct __anonstruct____missing_field_name_167 __annonCompField39;   int units; } ;    32     struct __anonstruct____missing_field_name_165 {   union __anonunion____missing_field_name_166 __annonCompField40;   atomic_t _count; } ;    32     union __anonunion____missing_field_name_164 {   unsigned long counters;   struct __anonstruct____missing_field_name_165 __annonCompField41;   unsigned int active; } ;    32     struct __anonstruct____missing_field_name_162 {   union __anonunion____missing_field_name_163 __annonCompField38;   union __anonunion____missing_field_name_164 __annonCompField42; } ;    32     struct __anonstruct____missing_field_name_169 {   struct page *next;   int pages;   int pobjects; } ;    32     struct slab ;    32     struct __anonstruct____missing_field_name_170 {   compound_page_dtor *compound_dtor;   unsigned long compound_order; } ;    32     union __anonunion____missing_field_name_168 {   struct list_head lru;   struct __anonstruct____missing_field_name_169 __annonCompField44;   struct slab *slab_page;   struct callback_head callback_head;   struct __anonstruct____missing_field_name_170 __annonCompField45;   pgtable_t pmd_huge_pte; } ;    32     union __anonunion____missing_field_name_171 {   unsigned long private;   spinlock_t *ptl;   struct kmem_cache *slab_cache;   struct page *first_page; } ;    32     struct page {   unsigned long flags;   union __anonunion____missing_field_name_161 __annonCompField37;   struct __anonstruct____missing_field_name_162 __annonCompField43;   union __anonunion____missing_field_name_168 __annonCompField46;   union __anonunion____missing_field_name_171 __annonCompField47;   struct mem_cgroup *mem_cgroup; } ;   181     struct page_frag {   struct page *page;   __u32 offset;   __u32 size; } ;   248     struct __anonstruct_shared_172 {   struct rb_node rb;   unsigned long rb_subtree_last; } ;   248     struct anon_vma ;   248     struct vm_operations_struct ;   248     struct mempolicy ;   248     struct vm_area_struct {   unsigned long vm_start;   unsigned long vm_end;   struct vm_area_struct *vm_next;   struct vm_area_struct *vm_prev;   struct rb_node vm_rb;   unsigned long rb_subtree_gap;   struct mm_struct *vm_mm;   pgprot_t vm_page_prot;   unsigned long vm_flags;   struct __anonstruct_shared_172 shared;   struct list_head anon_vma_chain;   struct anon_vma *anon_vma;   const struct vm_operations_struct *vm_ops;   unsigned long vm_pgoff;   struct file *vm_file;   void *vm_private_data;   struct mempolicy *vm_policy; } ;   316     struct core_thread {   struct task_struct *task;   struct core_thread *next; } ;   322     struct core_state {   atomic_t nr_threads;   struct core_thread dumper;   struct completion startup; } ;   335     struct task_rss_stat {   int events;   int count[3U]; } ;   343     struct mm_rss_stat {   atomic_long_t count[3U]; } ;   348     struct kioctx_table ;   349     struct linux_binfmt ;   349     struct mmu_notifier_mm ;   349     struct mm_struct {   struct vm_area_struct *mmap;   struct rb_root mm_rb;   u32 vmacache_seqnum;   unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);   unsigned long mmap_base;   unsigned long mmap_legacy_base;   unsigned long task_size;   unsigned long highest_vm_end;   pgd_t *pgd;   atomic_t mm_users;   atomic_t mm_count;   atomic_long_t nr_ptes;   atomic_long_t nr_pmds;   int map_count;   spinlock_t page_table_lock;   struct rw_semaphore mmap_sem;   struct list_head mmlist;   unsigned long hiwater_rss;   unsigned long hiwater_vm;   unsigned long total_vm;   unsigned long locked_vm;   unsigned long pinned_vm;   unsigned long shared_vm;   unsigned long exec_vm;   unsigned long stack_vm;   unsigned long def_flags;   unsigned long start_code;   unsigned long end_code;   unsigned long start_data;   unsigned long end_data;   unsigned long start_brk;   unsigned long brk;   unsigned long start_stack;   unsigned long arg_start;   unsigned long arg_end;   unsigned long env_start;   unsigned long env_end;   unsigned long saved_auxv[46U];   struct mm_rss_stat rss_stat;   struct linux_binfmt *binfmt;   cpumask_var_t cpu_vm_mask_var;   mm_context_t context;   unsigned long flags;   struct core_state *core_state;   spinlock_t ioctx_lock;   struct kioctx_table *ioctx_table;   struct task_struct *owner;   struct file *exe_file;   struct mmu_notifier_mm *mmu_notifier_mm;   struct cpumask cpumask_allocation;   unsigned long numa_next_scan;   unsigned long numa_scan_offset;   int numa_scan_seq;   bool tlb_flush_pending;   struct uprobes_state uprobes_state;   void *bd_addr; } ;    15     typedef __u64 Elf64_Addr;    16     typedef __u16 Elf64_Half;    20     typedef __u32 Elf64_Word;    21     typedef __u64 Elf64_Xword;   190     struct elf64_sym {   Elf64_Word st_name;   unsigned char st_info;   unsigned char st_other;   Elf64_Half st_shndx;   Elf64_Addr st_value;   Elf64_Xword st_size; } ;   198     typedef struct elf64_sym Elf64_Sym;    53     union __anonunion____missing_field_name_177 {   unsigned long bitmap[4U];   struct callback_head callback_head; } ;    53     struct idr_layer {   int prefix;   int layer;   struct idr_layer *ary[256U];   int count;   union __anonunion____missing_field_name_177 __annonCompField48; } ;    41     struct idr {   struct idr_layer *hint;   struct idr_layer *top;   int layers;   int cur;   spinlock_t lock;   int id_free_cnt;   struct idr_layer *id_free; } ;   124     struct ida_bitmap {   long nr_busy;   unsigned long bitmap[15U]; } ;   153     struct ida {   struct idr idr;   struct ida_bitmap *free_bitmap; } ;   185     struct dentry ;   186     struct iattr ;   187     struct super_block ;   188     struct file_system_type ;   189     struct kernfs_open_node ;   190     struct kernfs_iattrs ;   212     struct kernfs_root ;   212     struct kernfs_elem_dir {   unsigned long subdirs;   struct rb_root children;   struct kernfs_root *root; } ;    84     struct kernfs_node ;    84     struct kernfs_elem_symlink {   struct kernfs_node *target_kn; } ;    88     struct kernfs_ops ;    88     struct kernfs_elem_attr {   const struct kernfs_ops *ops;   struct kernfs_open_node *open;   loff_t size;   struct kernfs_node *notify_next; } ;    95     union __anonunion____missing_field_name_178 {   struct kernfs_elem_dir dir;   struct kernfs_elem_symlink symlink;   struct kernfs_elem_attr attr; } ;    95     struct kernfs_node {   atomic_t count;   atomic_t active;   struct lockdep_map dep_map;   struct kernfs_node *parent;   const char *name;   struct rb_node rb;   const void *ns;   unsigned int hash;   union __anonunion____missing_field_name_178 __annonCompField49;   void *priv;   unsigned short flags;   umode_t mode;   unsigned int ino;   struct kernfs_iattrs *iattr; } ;   137     struct kernfs_syscall_ops {   int (*remount_fs)(struct kernfs_root *, int *, char *);   int (*show_options)(struct seq_file *, struct kernfs_root *);   int (*mkdir)(struct kernfs_node *, const char *, umode_t );   int (*rmdir)(struct kernfs_node *);   int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *); } ;   154     struct kernfs_root {   struct kernfs_node *kn;   unsigned int flags;   struct ida ino_ida;   struct kernfs_syscall_ops *syscall_ops;   struct list_head supers;   wait_queue_head_t deactivate_waitq; } ;   170     struct kernfs_open_file {   struct kernfs_node *kn;   struct file *file;   void *priv;   struct mutex mutex;   int event;   struct list_head list;   char *prealloc_buf;   size_t atomic_write_len;   bool mmapped;   const struct vm_operations_struct *vm_ops; } ;   187     struct kernfs_ops {   int (*seq_show)(struct seq_file *, void *);   void * (*seq_start)(struct seq_file *, loff_t *);   void * (*seq_next)(struct seq_file *, void *, loff_t *);   void (*seq_stop)(struct seq_file *, void *);   ssize_t  (*read)(struct kernfs_open_file *, char *, size_t , loff_t );   size_t atomic_write_len;   bool prealloc;   ssize_t  (*write)(struct kernfs_open_file *, char *, size_t , loff_t );   int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *);   struct lock_class_key lockdep_key; } ;   469     struct sock ;   470     struct kobject ;   471     enum kobj_ns_type {   KOBJ_NS_TYPE_NONE = 0,   KOBJ_NS_TYPE_NET = 1,   KOBJ_NS_TYPES = 2 } ;   477     struct kobj_ns_type_operations {   enum kobj_ns_type type;   bool  (*current_may_mount)();   void * (*grab_current_ns)();   const void * (*netlink_ns)(struct sock *);   const void * (*initial_ns)();   void (*drop_ns)(void *); } ;    59     struct bin_attribute ;    60     struct attribute {   const char *name;   umode_t mode;   bool ignore_lockdep;   struct lock_class_key *key;   struct lock_class_key skey; } ;    37     struct attribute_group {   const char *name;   umode_t  (*is_visible)(struct kobject *, struct attribute *, int);   struct attribute **attrs;   struct bin_attribute **bin_attrs; } ;    82     struct bin_attribute {   struct attribute attr;   size_t size;   void *private;   ssize_t  (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t );   ssize_t  (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t );   int (*mmap)(struct file *, struct kobject *, struct bin_attribute *, struct vm_area_struct *); } ;   155     struct sysfs_ops {   ssize_t  (*show)(struct kobject *, struct attribute *, char *);   ssize_t  (*store)(struct kobject *, struct attribute *, const char *, size_t ); } ;   494     struct kref {   atomic_t refcount; } ;    52     struct kset ;    52     struct kobj_type ;    52     struct kobject {   const char *name;   struct list_head entry;   struct kobject *parent;   struct kset *kset;   struct kobj_type *ktype;   struct kernfs_node *sd;   struct kref kref;   struct delayed_work release;   unsigned char state_initialized;   unsigned char state_in_sysfs;   unsigned char state_add_uevent_sent;   unsigned char state_remove_uevent_sent;   unsigned char uevent_suppress; } ;   114     struct kobj_type {   void (*release)(struct kobject *);   const struct sysfs_ops *sysfs_ops;   struct attribute **default_attrs;   const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject *);   const void * (*namespace)(struct kobject *); } ;   122     struct kobj_uevent_env {   char *argv[3U];   char *envp[32U];   int envp_idx;   char buf[2048U];   int buflen; } ;   130     struct kset_uevent_ops {   const int (*filter)(struct kset *, struct kobject *);   const const char * (*name)(struct kset *, struct kobject *);   const int (*uevent)(struct kset *, struct kobject *, struct kobj_uevent_env *); } ;   147     struct kset {   struct list_head list;   spinlock_t list_lock;   struct kobject kobj;   const struct kset_uevent_ops *uevent_ops; } ;   222     struct kernel_param ;   227     struct kernel_param_ops {   unsigned int flags;   int (*set)(const char *, const struct kernel_param *);   int (*get)(char *, const struct kernel_param *);   void (*free)(void *); } ;    62     struct kparam_string ;    62     struct kparam_array ;    62     union __anonunion____missing_field_name_179 {   void *arg;   const struct kparam_string *str;   const struct kparam_array *arr; } ;    62     struct kernel_param {   const char *name;   const struct kernel_param_ops *ops;   u16 perm;   s8 level;   u8 flags;   union __anonunion____missing_field_name_179 __annonCompField50; } ;    82     struct kparam_string {   unsigned int maxlen;   char *string; } ;    88     struct kparam_array {   unsigned int max;   unsigned int elemsize;   unsigned int *num;   const struct kernel_param_ops *ops;   void *elem; } ;   496     struct mod_arch_specific { } ;    36     struct module_param_attrs ;    36     struct module_kobject {   struct kobject kobj;   struct module *mod;   struct kobject *drivers_dir;   struct module_param_attrs *mp;   struct completion *kobj_completion; } ;    46     struct module_attribute {   struct attribute attr;   ssize_t  (*show)(struct module_attribute *, struct module_kobject *, char *);   ssize_t  (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t );   void (*setup)(struct module *, const char *);   int (*test)(struct module *);   void (*free)(struct module *); } ;    72     struct exception_table_entry ;   205     enum module_state {   MODULE_STATE_LIVE = 0,   MODULE_STATE_COMING = 1,   MODULE_STATE_GOING = 2,   MODULE_STATE_UNFORMED = 3 } ;   212     struct module_sect_attrs ;   212     struct module_notes_attrs ;   212     struct tracepoint ;   212     struct ftrace_event_call ;   212     struct trace_enum_map ;   212     struct module {   enum module_state state;   struct list_head list;   char name[56U];   struct module_kobject mkobj;   struct module_attribute *modinfo_attrs;   const char *version;   const char *srcversion;   struct kobject *holders_dir;   const struct kernel_symbol *syms;   const unsigned long *crcs;   unsigned int num_syms;   struct kernel_param *kp;   unsigned int num_kp;   unsigned int num_gpl_syms;   const struct kernel_symbol *gpl_syms;   const unsigned long *gpl_crcs;   const struct kernel_symbol *unused_syms;   const unsigned long *unused_crcs;   unsigned int num_unused_syms;   unsigned int num_unused_gpl_syms;   const struct kernel_symbol *unused_gpl_syms;   const unsigned long *unused_gpl_crcs;   bool sig_ok;   const struct kernel_symbol *gpl_future_syms;   const unsigned long *gpl_future_crcs;   unsigned int num_gpl_future_syms;   unsigned int num_exentries;   struct exception_table_entry *extable;   int (*init)();   void *module_init;   void *module_core;   unsigned int init_size;   unsigned int core_size;   unsigned int init_text_size;   unsigned int core_text_size;   unsigned int init_ro_size;   unsigned int core_ro_size;   struct mod_arch_specific arch;   unsigned int taints;   unsigned int num_bugs;   struct list_head bug_list;   struct bug_entry *bug_table;   Elf64_Sym *symtab;   Elf64_Sym *core_symtab;   unsigned int num_symtab;   unsigned int core_num_syms;   char *strtab;   char *core_strtab;   struct module_sect_attrs *sect_attrs;   struct module_notes_attrs *notes_attrs;   char *args;   void *percpu;   unsigned int percpu_size;   unsigned int num_tracepoints;   const struct tracepoint **tracepoints_ptrs;   unsigned int num_trace_bprintk_fmt;   const char **trace_bprintk_fmt_start;   struct ftrace_event_call **trace_events;   unsigned int num_trace_events;   struct trace_enum_map **trace_enums;   unsigned int num_trace_enums;   bool klp_alive;   struct list_head source_list;   struct list_head target_list;   void (*exit)();   atomic_t refcnt;   ctor_fn_t  (**ctors)();   unsigned int num_ctors; } ;    13     typedef unsigned long kernel_ulong_t;    14     struct pci_device_id {   __u32 vendor;   __u32 device;   __u32 subvendor;   __u32 subdevice;   __u32 class;   __u32 class_mask;   kernel_ulong_t driver_data; } ;   186     struct acpi_device_id {   __u8 id[9U];   kernel_ulong_t driver_data; } ;   219     struct of_device_id {   char name[32U];   char type[32U];   char compatible[128U];   const void *data; } ;   631     struct klist_node ;    37     struct klist_node {   void *n_klist;   struct list_head n_node;   struct kref n_ref; } ;    67     struct path ;    68     struct seq_file {   char *buf;   size_t size;   size_t from;   size_t count;   size_t pad_until;   loff_t index;   loff_t read_pos;   u64 version;   struct mutex lock;   const struct seq_operations *op;   int poll_event;   struct user_namespace *user_ns;   void *private; } ;    35     struct seq_operations {   void * (*start)(struct seq_file *, loff_t *);   void (*stop)(struct seq_file *, void *);   void * (*next)(struct seq_file *, void *, loff_t *);   int (*show)(struct seq_file *, void *); } ;   186     struct pinctrl ;   187     struct pinctrl_state ;   194     struct dev_pin_info {   struct pinctrl *p;   struct pinctrl_state *default_state;   struct pinctrl_state *sleep_state;   struct pinctrl_state *idle_state; } ;    48     struct dma_map_ops ;    48     struct dev_archdata {   struct dma_map_ops *dma_ops;   void *iommu; } ;    14     struct device_private ;    15     struct device_driver ;    16     struct driver_private ;    17     struct class ;    18     struct subsys_private ;    19     struct bus_type ;    20     struct fwnode_handle ;    21     struct iommu_ops ;    22     struct iommu_group ;    61     struct device_attribute ;    61     struct bus_type {   const char *name;   const char *dev_name;   struct device *dev_root;   struct device_attribute *dev_attrs;   const struct attribute_group **bus_groups;   const struct attribute_group **dev_groups;   const struct attribute_group **drv_groups;   int (*match)(struct device *, struct device_driver *);   int (*uevent)(struct device *, struct kobj_uevent_env *);   int (*probe)(struct device *);   int (*remove)(struct device *);   void (*shutdown)(struct device *);   int (*online)(struct device *);   int (*offline)(struct device *);   int (*suspend)(struct device *, pm_message_t );   int (*resume)(struct device *);   const struct dev_pm_ops *pm;   const struct iommu_ops *iommu_ops;   struct subsys_private *p;   struct lock_class_key lock_key; } ;   139     struct device_type ;   197     struct device_driver {   const char *name;   struct bus_type *bus;   struct module *owner;   const char *mod_name;   bool suppress_bind_attrs;   const struct of_device_id *of_match_table;   const struct acpi_device_id *acpi_match_table;   int (*probe)(struct device *);   int (*remove)(struct device *);   void (*shutdown)(struct device *);   int (*suspend)(struct device *, pm_message_t );   int (*resume)(struct device *);   const struct attribute_group **groups;   const struct dev_pm_ops *pm;   struct driver_private *p; } ;   323     struct class_attribute ;   323     struct class {   const char *name;   struct module *owner;   struct class_attribute *class_attrs;   const struct attribute_group **dev_groups;   struct kobject *dev_kobj;   int (*dev_uevent)(struct device *, struct kobj_uevent_env *);   char * (*devnode)(struct device *, umode_t *);   void (*class_release)(struct class *);   void (*dev_release)(struct device *);   int (*suspend)(struct device *, pm_message_t );   int (*resume)(struct device *);   const struct kobj_ns_type_operations *ns_type;   const void * (*namespace)(struct device *);   const struct dev_pm_ops *pm;   struct subsys_private *p; } ;   416     struct class_attribute {   struct attribute attr;   ssize_t  (*show)(struct class *, struct class_attribute *, char *);   ssize_t  (*store)(struct class *, struct class_attribute *, const char *, size_t ); } ;   484     struct device_type {   const char *name;   const struct attribute_group **groups;   int (*uevent)(struct device *, struct kobj_uevent_env *);   char * (*devnode)(struct device *, umode_t *, kuid_t *, kgid_t *);   void (*release)(struct device *);   const struct dev_pm_ops *pm; } ;   512     struct device_attribute {   struct attribute attr;   ssize_t  (*show)(struct device *, struct device_attribute *, char *);   ssize_t  (*store)(struct device *, struct device_attribute *, const char *, size_t ); } ;   644     struct device_dma_parameters {   unsigned int max_segment_size;   unsigned long segment_boundary_mask; } ;   653     struct dma_coherent_mem ;   653     struct cma ;   653     struct device {   struct device *parent;   struct device_private *p;   struct kobject kobj;   const char *init_name;   const struct device_type *type;   struct mutex mutex;   struct bus_type *bus;   struct device_driver *driver;   void *platform_data;   void *driver_data;   struct dev_pm_info power;   struct dev_pm_domain *pm_domain;   struct dev_pin_info *pins;   int numa_node;   u64 *dma_mask;   u64 coherent_dma_mask;   unsigned long dma_pfn_offset;   struct device_dma_parameters *dma_parms;   struct list_head dma_pools;   struct dma_coherent_mem *dma_mem;   struct cma *cma_area;   struct dev_archdata archdata;   struct device_node *of_node;   struct fwnode_handle *fwnode;   dev_t devt;   u32 id;   spinlock_t devres_lock;   struct list_head devres_head;   struct klist_node knode_class;   struct class *class;   const struct attribute_group **groups;   void (*release)(struct device *);   struct iommu_group *iommu_group;   bool offline_disabled;   bool offline; } ;   799     struct wakeup_source {   const char *name;   struct list_head entry;   spinlock_t lock;   struct timer_list timer;   unsigned long timer_expires;   ktime_t total_time;   ktime_t max_time;   ktime_t last_time;   ktime_t start_prevent_time;   ktime_t prevent_sleep_time;   unsigned long event_count;   unsigned long active_count;   unsigned long relax_count;   unsigned long expire_count;   unsigned long wakeup_count;   bool active;   bool autosleep_enabled; } ;    22     struct kernel_cap_struct {   __u32 cap[2U]; } ;    25     typedef struct kernel_cap_struct kernel_cap_t;    84     struct plist_node {   int prio;   struct list_head prio_list;   struct list_head node_list; } ;     4     typedef unsigned long cputime_t;    25     struct sem_undo_list ;    25     struct sysv_sem {   struct sem_undo_list *undo_list; } ;    78     struct user_struct ;    26     struct sysv_shm {   struct list_head shm_clist; } ;    24     struct __anonstruct_sigset_t_184 {   unsigned long sig[1U]; } ;    24     typedef struct __anonstruct_sigset_t_184 sigset_t;    25     struct siginfo ;    17     typedef void __signalfn_t(int);    18     typedef __signalfn_t *__sighandler_t;    20     typedef void __restorefn_t();    21     typedef __restorefn_t *__sigrestore_t;    34     union sigval {   int sival_int;   void *sival_ptr; } ;    10     typedef union sigval sigval_t;    11     struct __anonstruct__kill_186 {   __kernel_pid_t _pid;   __kernel_uid32_t _uid; } ;    11     struct __anonstruct__timer_187 {   __kernel_timer_t _tid;   int _overrun;   char _pad[0U];   sigval_t _sigval;   int _sys_private; } ;    11     struct __anonstruct__rt_188 {   __kernel_pid_t _pid;   __kernel_uid32_t _uid;   sigval_t _sigval; } ;    11     struct __anonstruct__sigchld_189 {   __kernel_pid_t _pid;   __kernel_uid32_t _uid;   int _status;   __kernel_clock_t _utime;   __kernel_clock_t _stime; } ;    11     struct __anonstruct__addr_bnd_191 {   void *_lower;   void *_upper; } ;    11     struct __anonstruct__sigfault_190 {   void *_addr;   short _addr_lsb;   struct __anonstruct__addr_bnd_191 _addr_bnd; } ;    11     struct __anonstruct__sigpoll_192 {   long _band;   int _fd; } ;    11     struct __anonstruct__sigsys_193 {   void *_call_addr;   int _syscall;   unsigned int _arch; } ;    11     union __anonunion__sifields_185 {   int _pad[28U];   struct __anonstruct__kill_186 _kill;   struct __anonstruct__timer_187 _timer;   struct __anonstruct__rt_188 _rt;   struct __anonstruct__sigchld_189 _sigchld;   struct __anonstruct__sigfault_190 _sigfault;   struct __anonstruct__sigpoll_192 _sigpoll;   struct __anonstruct__sigsys_193 _sigsys; } ;    11     struct siginfo {   int si_signo;   int si_errno;   int si_code;   union __anonunion__sifields_185 _sifields; } ;   113     typedef struct siginfo siginfo_t;    22     struct sigpending {   struct list_head list;   sigset_t signal; } ;   243     struct sigaction {   __sighandler_t sa_handler;   unsigned long sa_flags;   __sigrestore_t sa_restorer;   sigset_t sa_mask; } ;   257     struct k_sigaction {   struct sigaction sa; } ;   443     enum pid_type {   PIDTYPE_PID = 0,   PIDTYPE_PGID = 1,   PIDTYPE_SID = 2,   PIDTYPE_MAX = 3 } ;   450     struct pid_namespace ;   450     struct upid {   int nr;   struct pid_namespace *ns;   struct hlist_node pid_chain; } ;    56     struct pid {   atomic_t count;   unsigned int level;   struct hlist_head tasks[3U];   struct callback_head rcu;   struct upid numbers[1U]; } ;    68     struct pid_link {   struct hlist_node node;   struct pid *pid; } ;   174     struct percpu_counter {   raw_spinlock_t lock;   s64 count;   struct list_head list;   s32 *counters; } ;    53     struct seccomp_filter ;    54     struct seccomp {   int mode;   struct seccomp_filter *filter; } ;    40     struct rt_mutex_waiter ;    41     struct rlimit {   __kernel_ulong_t rlim_cur;   __kernel_ulong_t rlim_max; } ;    11     struct timerqueue_node {   struct rb_node node;   ktime_t expires; } ;    12     struct timerqueue_head {   struct rb_root head;   struct timerqueue_node *next; } ;    50     struct hrtimer_clock_base ;    51     struct hrtimer_cpu_base ;    60     enum hrtimer_restart {   HRTIMER_NORESTART = 0,   HRTIMER_RESTART = 1 } ;    65     struct hrtimer {   struct timerqueue_node node;   ktime_t _softexpires;   enum hrtimer_restart  (*function)(struct hrtimer *);   struct hrtimer_clock_base *base;   unsigned long state;   int start_pid;   void *start_site;   char start_comm[16U]; } ;   132     struct hrtimer_clock_base {   struct hrtimer_cpu_base *cpu_base;   int index;   clockid_t clockid;   struct timerqueue_head active;   ktime_t resolution;   ktime_t  (*get_time)();   ktime_t softirq_time;   ktime_t offset; } ;   163     struct hrtimer_cpu_base {   raw_spinlock_t lock;   unsigned int cpu;   unsigned int active_bases;   unsigned int clock_was_set;   ktime_t expires_next;   int in_hrtirq;   int hres_active;   int hang_detected;   unsigned long nr_events;   unsigned long nr_retries;   unsigned long nr_hangs;   ktime_t max_hang_time;   struct hrtimer_clock_base clock_base[4U]; } ;   453     struct task_io_accounting {   u64 rchar;   u64 wchar;   u64 syscr;   u64 syscw;   u64 read_bytes;   u64 write_bytes;   u64 cancelled_write_bytes; } ;    45     struct latency_record {   unsigned long backtrace[12U];   unsigned int count;   unsigned long time;   unsigned long max; } ;    39     struct assoc_array_ptr ;    39     struct assoc_array {   struct assoc_array_ptr *root;   unsigned long nr_leaves_on_tree; } ;    31     typedef int32_t key_serial_t;    34     typedef uint32_t key_perm_t;    35     struct key ;    36     struct signal_struct ;    37     struct key_type ;    41     struct keyring_index_key {   struct key_type *type;   const char *description;   size_t desc_len; } ;   123     union __anonunion____missing_field_name_196 {   struct list_head graveyard_link;   struct rb_node serial_node; } ;   123     struct key_user ;   123     union __anonunion____missing_field_name_197 {   time_t expiry;   time_t revoked_at; } ;   123     struct __anonstruct____missing_field_name_199 {   struct key_type *type;   char *description; } ;   123     union __anonunion____missing_field_name_198 {   struct keyring_index_key index_key;   struct __anonstruct____missing_field_name_199 __annonCompField53; } ;   123     union __anonunion_type_data_200 {   struct list_head link;   unsigned long x[2U];   void *p[2U];   int reject_error; } ;   123     union __anonunion_payload_202 {   unsigned long value;   void *rcudata;   void *data;   void *data2[2U]; } ;   123     union __anonunion____missing_field_name_201 {   union __anonunion_payload_202 payload;   struct assoc_array keys; } ;   123     struct key {   atomic_t usage;   key_serial_t serial;   union __anonunion____missing_field_name_196 __annonCompField51;   struct rw_semaphore sem;   struct key_user *user;   void *security;   union __anonunion____missing_field_name_197 __annonCompField52;   time_t last_used_at;   kuid_t uid;   kgid_t gid;   key_perm_t perm;   unsigned short quotalen;   unsigned short datalen;   unsigned long flags;   union __anonunion____missing_field_name_198 __annonCompField54;   union __anonunion_type_data_200 type_data;   union __anonunion____missing_field_name_201 __annonCompField55; } ;   358     struct audit_context ;    27     struct group_info {   atomic_t usage;   int ngroups;   int nblocks;   kgid_t small_block[32U];   kgid_t *blocks[0U]; } ;    90     struct cred {   atomic_t usage;   atomic_t subscribers;   void *put_addr;   unsigned int magic;   kuid_t uid;   kgid_t gid;   kuid_t suid;   kgid_t sgid;   kuid_t euid;   kgid_t egid;   kuid_t fsuid;   kgid_t fsgid;   unsigned int securebits;   kernel_cap_t cap_inheritable;   kernel_cap_t cap_permitted;   kernel_cap_t cap_effective;   kernel_cap_t cap_bset;   unsigned char jit_keyring;   struct key *session_keyring;   struct key *process_keyring;   struct key *thread_keyring;   struct key *request_key_auth;   void *security;   struct user_struct *user;   struct user_namespace *user_ns;   struct group_info *group_info;   struct callback_head rcu; } ;   127     struct futex_pi_state ;   128     struct robust_list_head ;   129     struct bio_list ;   130     struct fs_struct ;   131     struct perf_event_context ;   132     struct blk_plug ;   189     struct cfs_rq ;   190     struct task_group ;   480     struct sighand_struct {   atomic_t count;   struct k_sigaction action[64U];   spinlock_t siglock;   wait_queue_head_t signalfd_wqh; } ;   519     struct pacct_struct {   int ac_flag;   long ac_exitcode;   unsigned long ac_mem;   cputime_t ac_utime;   cputime_t ac_stime;   unsigned long ac_minflt;   unsigned long ac_majflt; } ;   527     struct cpu_itimer {   cputime_t expires;   cputime_t incr;   u32 error;   u32 incr_error; } ;   534     struct cputime {   cputime_t utime;   cputime_t stime; } ;   546     struct task_cputime {   cputime_t utime;   cputime_t stime;   unsigned long long sum_exec_runtime; } ;   566     struct thread_group_cputimer {   struct task_cputime cputime;   int running;   raw_spinlock_t lock; } ;   608     struct autogroup ;   609     struct tty_struct ;   609     struct taskstats ;   609     struct tty_audit_buf ;   609     struct signal_struct {   atomic_t sigcnt;   atomic_t live;   int nr_threads;   struct list_head thread_head;   wait_queue_head_t wait_chldexit;   struct task_struct *curr_target;   struct sigpending shared_pending;   int group_exit_code;   int notify_count;   struct task_struct *group_exit_task;   int group_stop_count;   unsigned int flags;   unsigned char is_child_subreaper;   unsigned char has_child_subreaper;   int posix_timer_id;   struct list_head posix_timers;   struct hrtimer real_timer;   struct pid *leader_pid;   ktime_t it_real_incr;   struct cpu_itimer it[2U];   struct thread_group_cputimer cputimer;   struct task_cputime cputime_expires;   struct list_head cpu_timers[3U];   struct pid *tty_old_pgrp;   int leader;   struct tty_struct *tty;   struct autogroup *autogroup;   seqlock_t stats_lock;   cputime_t utime;   cputime_t stime;   cputime_t cutime;   cputime_t cstime;   cputime_t gtime;   cputime_t cgtime;   struct cputime prev_cputime;   unsigned long nvcsw;   unsigned long nivcsw;   unsigned long cnvcsw;   unsigned long cnivcsw;   unsigned long min_flt;   unsigned long maj_flt;   unsigned long cmin_flt;   unsigned long cmaj_flt;   unsigned long inblock;   unsigned long oublock;   unsigned long cinblock;   unsigned long coublock;   unsigned long maxrss;   unsigned long cmaxrss;   struct task_io_accounting ioac;   unsigned long long sum_sched_runtime;   struct rlimit rlim[16U];   struct pacct_struct pacct;   struct taskstats *stats;   unsigned int audit_tty;   unsigned int audit_tty_log_passwd;   struct tty_audit_buf *tty_audit_buf;   struct rw_semaphore group_rwsem;   oom_flags_t oom_flags;   short oom_score_adj;   short oom_score_adj_min;   struct mutex cred_guard_mutex; } ;   790     struct user_struct {   atomic_t __count;   atomic_t processes;   atomic_t sigpending;   atomic_t inotify_watches;   atomic_t inotify_devs;   atomic_t fanotify_listeners;   atomic_long_t epoll_watches;   unsigned long mq_bytes;   unsigned long locked_shm;   struct key *uid_keyring;   struct key *session_keyring;   struct hlist_node uidhash_node;   kuid_t uid;   atomic_long_t locked_vm; } ;   833     struct backing_dev_info ;   834     struct reclaim_state ;   835     struct sched_info {   unsigned long pcount;   unsigned long long run_delay;   unsigned long long last_arrival;   unsigned long long last_queued; } ;   849     struct task_delay_info {   spinlock_t lock;   unsigned int flags;   u64 blkio_start;   u64 blkio_delay;   u64 swapin_delay;   u32 blkio_count;   u32 swapin_count;   u64 freepages_start;   u64 freepages_delay;   u32 freepages_count; } ;  1082     struct io_context ;  1116     struct pipe_inode_info ;  1118     struct load_weight {   unsigned long weight;   u32 inv_weight; } ;  1125     struct sched_avg {   u64 last_runnable_update;   s64 decay_count;   unsigned long load_avg_contrib;   unsigned long utilization_avg_contrib;   u32 runnable_avg_sum;   u32 avg_period;   u32 running_avg_sum; } ;  1150     struct sched_statistics {   u64 wait_start;   u64 wait_max;   u64 wait_count;   u64 wait_sum;   u64 iowait_count;   u64 iowait_sum;   u64 sleep_start;   u64 sleep_max;   s64 sum_sleep_runtime;   u64 block_start;   u64 block_max;   u64 exec_max;   u64 slice_max;   u64 nr_migrations_cold;   u64 nr_failed_migrations_affine;   u64 nr_failed_migrations_running;   u64 nr_failed_migrations_hot;   u64 nr_forced_migrations;   u64 nr_wakeups;   u64 nr_wakeups_sync;   u64 nr_wakeups_migrate;   u64 nr_wakeups_local;   u64 nr_wakeups_remote;   u64 nr_wakeups_affine;   u64 nr_wakeups_affine_attempts;   u64 nr_wakeups_passive;   u64 nr_wakeups_idle; } ;  1185     struct sched_entity {   struct load_weight load;   struct rb_node run_node;   struct list_head group_node;   unsigned int on_rq;   u64 exec_start;   u64 sum_exec_runtime;   u64 vruntime;   u64 prev_sum_exec_runtime;   u64 nr_migrations;   struct sched_statistics statistics;   int depth;   struct sched_entity *parent;   struct cfs_rq *cfs_rq;   struct cfs_rq *my_q;   struct sched_avg avg; } ;  1217     struct rt_rq ;  1217     struct sched_rt_entity {   struct list_head run_list;   unsigned long timeout;   unsigned long watchdog_stamp;   unsigned int time_slice;   struct sched_rt_entity *back;   struct sched_rt_entity *parent;   struct rt_rq *rt_rq;   struct rt_rq *my_q; } ;  1233     struct sched_dl_entity {   struct rb_node rb_node;   u64 dl_runtime;   u64 dl_deadline;   u64 dl_period;   u64 dl_bw;   s64 runtime;   u64 deadline;   unsigned int flags;   int dl_throttled;   int dl_new;   int dl_boosted;   int dl_yielded;   struct hrtimer dl_timer; } ;  1299     struct memcg_oom_info {   struct mem_cgroup *memcg;   gfp_t gfp_mask;   int order;   unsigned char may_oom; } ;  1724     struct sched_class ;  1724     struct files_struct ;  1724     struct css_set ;  1724     struct compat_robust_list_head ;  1724     struct numa_group ;  1724     struct task_struct {   volatile long state;   void *stack;   atomic_t usage;   unsigned int flags;   unsigned int ptrace;   struct llist_node wake_entry;   int on_cpu;   struct task_struct *last_wakee;   unsigned long wakee_flips;   unsigned long wakee_flip_decay_ts;   int wake_cpu;   int on_rq;   int prio;   int static_prio;   int normal_prio;   unsigned int rt_priority;   const struct sched_class *sched_class;   struct sched_entity se;   struct sched_rt_entity rt;   struct task_group *sched_task_group;   struct sched_dl_entity dl;   struct hlist_head preempt_notifiers;   unsigned int policy;   int nr_cpus_allowed;   cpumask_t cpus_allowed;   unsigned long rcu_tasks_nvcsw;   bool rcu_tasks_holdout;   struct list_head rcu_tasks_holdout_list;   int rcu_tasks_idle_cpu;   struct sched_info sched_info;   struct list_head tasks;   struct plist_node pushable_tasks;   struct rb_node pushable_dl_tasks;   struct mm_struct *mm;   struct mm_struct *active_mm;   unsigned char brk_randomized;   u32 vmacache_seqnum;   struct vm_area_struct *vmacache[4U];   struct task_rss_stat rss_stat;   int exit_state;   int exit_code;   int exit_signal;   int pdeath_signal;   unsigned int jobctl;   unsigned int personality;   unsigned char in_execve;   unsigned char in_iowait;   unsigned char sched_reset_on_fork;   unsigned char sched_contributes_to_load;   unsigned char memcg_kmem_skip_account;   unsigned long atomic_flags;   struct restart_block restart_block;   pid_t pid;   pid_t tgid;   struct task_struct *real_parent;   struct task_struct *parent;   struct list_head children;   struct list_head sibling;   struct task_struct *group_leader;   struct list_head ptraced;   struct list_head ptrace_entry;   struct pid_link pids[3U];   struct list_head thread_group;   struct list_head thread_node;   struct completion *vfork_done;   int *set_child_tid;   int *clear_child_tid;   cputime_t utime;   cputime_t stime;   cputime_t utimescaled;   cputime_t stimescaled;   cputime_t gtime;   struct cputime prev_cputime;   unsigned long nvcsw;   unsigned long nivcsw;   u64 start_time;   u64 real_start_time;   unsigned long min_flt;   unsigned long maj_flt;   struct task_cputime cputime_expires;   struct list_head cpu_timers[3U];   const struct cred *real_cred;   const struct cred *cred;   char comm[16U];   int link_count;   int total_link_count;   struct sysv_sem sysvsem;   struct sysv_shm sysvshm;   unsigned long last_switch_count;   struct thread_struct thread;   struct fs_struct *fs;   struct files_struct *files;   struct nsproxy *nsproxy;   struct signal_struct *signal;   struct sighand_struct *sighand;   sigset_t blocked;   sigset_t real_blocked;   sigset_t saved_sigmask;   struct sigpending pending;   unsigned long sas_ss_sp;   size_t sas_ss_size;   int (*notifier)(void *);   void *notifier_data;   sigset_t *notifier_mask;   struct callback_head *task_works;   struct audit_context *audit_context;   kuid_t loginuid;   unsigned int sessionid;   struct seccomp seccomp;   u32 parent_exec_id;   u32 self_exec_id;   spinlock_t alloc_lock;   raw_spinlock_t pi_lock;   struct rb_root pi_waiters;   struct rb_node *pi_waiters_leftmost;   struct rt_mutex_waiter *pi_blocked_on;   struct mutex_waiter *blocked_on;   unsigned int irq_events;   unsigned long hardirq_enable_ip;   unsigned long hardirq_disable_ip;   unsigned int hardirq_enable_event;   unsigned int hardirq_disable_event;   int hardirqs_enabled;   int hardirq_context;   unsigned long softirq_disable_ip;   unsigned long softirq_enable_ip;   unsigned int softirq_disable_event;   unsigned int softirq_enable_event;   int softirqs_enabled;   int softirq_context;   u64 curr_chain_key;   int lockdep_depth;   unsigned int lockdep_recursion;   struct held_lock held_locks[48U];   gfp_t lockdep_reclaim_gfp;   void *journal_info;   struct bio_list *bio_list;   struct blk_plug *plug;   struct reclaim_state *reclaim_state;   struct backing_dev_info *backing_dev_info;   struct io_context *io_context;   unsigned long ptrace_message;   siginfo_t *last_siginfo;   struct task_io_accounting ioac;   u64 acct_rss_mem1;   u64 acct_vm_mem1;   cputime_t acct_timexpd;   nodemask_t mems_allowed;   seqcount_t mems_allowed_seq;   int cpuset_mem_spread_rotor;   int cpuset_slab_spread_rotor;   struct css_set *cgroups;   struct list_head cg_list;   struct robust_list_head *robust_list;   struct compat_robust_list_head *compat_robust_list;   struct list_head pi_state_list;   struct futex_pi_state *pi_state_cache;   struct perf_event_context *perf_event_ctxp[2U];   struct mutex perf_event_mutex;   struct list_head perf_event_list;   struct mempolicy *mempolicy;   short il_next;   short pref_node_fork;   int numa_scan_seq;   unsigned int numa_scan_period;   unsigned int numa_scan_period_max;   int numa_preferred_nid;   unsigned long numa_migrate_retry;   u64 node_stamp;   u64 last_task_numa_placement;   u64 last_sum_exec_runtime;   struct callback_head numa_work;   struct list_head numa_entry;   struct numa_group *numa_group;   unsigned long *numa_faults;   unsigned long total_numa_faults;   unsigned long numa_faults_locality[3U];   unsigned long numa_pages_migrated;   struct callback_head rcu;   struct pipe_inode_info *splice_pipe;   struct page_frag task_frag;   struct task_delay_info *delays;   int make_it_fail;   int nr_dirtied;   int nr_dirtied_pause;   unsigned long dirty_paused_when;   int latency_record_count;   struct latency_record latency_record[32U];   unsigned long timer_slack_ns;   unsigned long default_timer_slack_ns;   unsigned int kasan_depth;   unsigned long trace;   unsigned long trace_recursion;   struct memcg_oom_info memcg_oom;   struct uprobe_task *utask;   unsigned int sequential_io;   unsigned int sequential_io_avg;   unsigned long task_state_change; } ;    70     struct hotplug_slot ;    70     struct pci_slot {   struct pci_bus *bus;   struct list_head list;   struct hotplug_slot *hotplug;   unsigned char number;   struct kobject kobj; } ;   110     typedef int pci_power_t;   137     typedef unsigned int pci_channel_state_t;   138     enum pci_channel_state {   pci_channel_io_normal = 1,   pci_channel_io_frozen = 2,   pci_channel_io_perm_failure = 3 } ;   163     typedef unsigned short pci_dev_flags_t;   190     typedef unsigned short pci_bus_flags_t;   247     struct pcie_link_state ;   248     struct pci_vpd ;   249     struct pci_sriov ;   250     struct pci_ats ;   251     struct proc_dir_entry ;   251     struct pci_driver ;   251     union __anonunion____missing_field_name_207 {   struct pci_sriov *sriov;   struct pci_dev *physfn; } ;   251     struct pci_dev {   struct list_head bus_list;   struct pci_bus *bus;   struct pci_bus *subordinate;   void *sysdata;   struct proc_dir_entry *procent;   struct pci_slot *slot;   unsigned int devfn;   unsigned short vendor;   unsigned short device;   unsigned short subsystem_vendor;   unsigned short subsystem_device;   unsigned int class;   u8 revision;   u8 hdr_type;   u8 pcie_cap;   u8 msi_cap;   u8 msix_cap;   unsigned char pcie_mpss;   u8 rom_base_reg;   u8 pin;   u16 pcie_flags_reg;   u8 dma_alias_devfn;   struct pci_driver *driver;   u64 dma_mask;   struct device_dma_parameters dma_parms;   pci_power_t current_state;   u8 pm_cap;   unsigned char pme_support;   unsigned char pme_interrupt;   unsigned char pme_poll;   unsigned char d1_support;   unsigned char d2_support;   unsigned char no_d1d2;   unsigned char no_d3cold;   unsigned char d3cold_allowed;   unsigned char mmio_always_on;   unsigned char wakeup_prepared;   unsigned char runtime_d3cold;   unsigned char ignore_hotplug;   unsigned int d3_delay;   unsigned int d3cold_delay;   struct pcie_link_state *link_state;   pci_channel_state_t error_state;   struct device dev;   int cfg_size;   unsigned int irq;   struct resource resource[17U];   bool match_driver;   unsigned char transparent;   unsigned char multifunction;   unsigned char is_added;   unsigned char is_busmaster;   unsigned char no_msi;   unsigned char no_64bit_msi;   unsigned char block_cfg_access;   unsigned char broken_parity_status;   unsigned char irq_reroute_variant;   unsigned char msi_enabled;   unsigned char msix_enabled;   unsigned char ari_enabled;   unsigned char is_managed;   unsigned char needs_freset;   unsigned char state_saved;   unsigned char is_physfn;   unsigned char is_virtfn;   unsigned char reset_fn;   unsigned char is_hotplug_bridge;   unsigned char __aer_firmware_first_valid;   unsigned char __aer_firmware_first;   unsigned char broken_intx_masking;   unsigned char io_window_1k;   unsigned char irq_managed;   pci_dev_flags_t dev_flags;   atomic_t enable_cnt;   u32 saved_config_space[16U];   struct hlist_head saved_cap_space;   struct bin_attribute *rom_attr;   int rom_attr_enabled;   struct bin_attribute *res_attr[17U];   struct bin_attribute *res_attr_wc[17U];   struct list_head msi_list;   const struct attribute_group **msi_irq_groups;   struct pci_vpd *vpd;   union __anonunion____missing_field_name_207 __annonCompField59;   struct pci_ats *ats;   phys_addr_t rom;   size_t romlen;   char *driver_override; } ;   439     struct pci_ops ;   439     struct msi_controller ;   439     struct pci_bus {   struct list_head node;   struct pci_bus *parent;   struct list_head children;   struct list_head devices;   struct pci_dev *self;   struct list_head slots;   struct resource *resource[4U];   struct list_head resources;   struct resource busn_res;   struct pci_ops *ops;   struct msi_controller *msi;   void *sysdata;   struct proc_dir_entry *procdir;   unsigned char number;   unsigned char primary;   unsigned char max_bus_speed;   unsigned char cur_bus_speed;   char name[48U];   unsigned short bridge_ctl;   pci_bus_flags_t bus_flags;   struct device *bridge;   struct device dev;   struct bin_attribute *legacy_io;   struct bin_attribute *legacy_mem;   unsigned char is_added; } ;   562     struct pci_ops {   void * (*map_bus)(struct pci_bus *, unsigned int, int);   int (*read)(struct pci_bus *, unsigned int, int, int, u32 *);   int (*write)(struct pci_bus *, unsigned int, int, int, u32 ); } ;   584     struct pci_dynids {   spinlock_t lock;   struct list_head list; } ;   598     typedef unsigned int pci_ers_result_t;   608     struct pci_error_handlers {   pci_ers_result_t  (*error_detected)(struct pci_dev *, enum pci_channel_state );   pci_ers_result_t  (*mmio_enabled)(struct pci_dev *);   pci_ers_result_t  (*link_reset)(struct pci_dev *);   pci_ers_result_t  (*slot_reset)(struct pci_dev *);   void (*reset_notify)(struct pci_dev *, bool );   void (*resume)(struct pci_dev *); } ;   641     struct pci_driver {   struct list_head node;   const char *name;   const struct pci_device_id *id_table;   int (*probe)(struct pci_dev *, const struct pci_device_id *);   void (*remove)(struct pci_dev *);   int (*suspend)(struct pci_dev *, pm_message_t );   int (*suspend_late)(struct pci_dev *, pm_message_t );   int (*resume_early)(struct pci_dev *);   int (*resume)(struct pci_dev *);   void (*shutdown)(struct pci_dev *);   int (*sriov_configure)(struct pci_dev *, int);   const struct pci_error_handlers *err_handler;   struct device_driver driver;   struct pci_dynids dynids; } ;  1187     struct scatterlist {   unsigned long sg_magic;   unsigned long page_link;   unsigned int offset;   unsigned int length;   dma_addr_t dma_address;   unsigned int dma_length; } ;    19     struct dma_pool ;    93     struct shrink_control {   gfp_t gfp_mask;   unsigned long nr_to_scan;   int nid;   struct mem_cgroup *memcg; } ;    27     struct shrinker {   unsigned long int (*count_objects)(struct shrinker *, struct shrink_control *);   unsigned long int (*scan_objects)(struct shrinker *, struct shrink_control *);   int seeks;   long batch;   unsigned long flags;   struct list_head list;   atomic_long_t *nr_deferred; } ;    64     struct file_ra_state ;    65     struct writeback_control ;   206     struct vm_fault {   unsigned int flags;   unsigned long pgoff;   void *virtual_address;   struct page *cow_page;   struct page *page;   unsigned long max_pgoff;   pte_t *pte; } ;   238     struct vm_operations_struct {   void (*open)(struct vm_area_struct *);   void (*close)(struct vm_area_struct *);   int (*fault)(struct vm_area_struct *, struct vm_fault *);   void (*map_pages)(struct vm_area_struct *, struct vm_fault *);   int (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *);   int (*pfn_mkwrite)(struct vm_area_struct *, struct vm_fault *);   int (*access)(struct vm_area_struct *, unsigned long, void *, int, int);   const char * (*name)(struct vm_area_struct *);   int (*set_policy)(struct vm_area_struct *, struct mempolicy *);   struct mempolicy * (*get_policy)(struct vm_area_struct *, unsigned long);   struct page * (*find_special_page)(struct vm_area_struct *, unsigned long); } ;    34     struct dma_attrs {   unsigned long flags[1U]; } ;    70     enum dma_data_direction {   DMA_BIDIRECTIONAL = 0,   DMA_TO_DEVICE = 1,   DMA_FROM_DEVICE = 2,   DMA_NONE = 3 } ;    77     struct sg_table {   struct scatterlist *sgl;   unsigned int nents;   unsigned int orig_nents; } ;   351     struct dma_map_ops {   void * (*alloc)(struct device *, size_t , dma_addr_t *, gfp_t , struct dma_attrs *);   void (*free)(struct device *, size_t , void *, dma_addr_t , struct dma_attrs *);   int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t , size_t , struct dma_attrs *);   int (*get_sgtable)(struct device *, struct sg_table *, void *, dma_addr_t , size_t , struct dma_attrs *);   dma_addr_t  (*map_page)(struct device *, struct page *, unsigned long, size_t , enum dma_data_direction , struct dma_attrs *);   void (*unmap_page)(struct device *, dma_addr_t , size_t , enum dma_data_direction , struct dma_attrs *);   int (*map_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , struct dma_attrs *);   void (*unmap_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , struct dma_attrs *);   void (*sync_single_for_cpu)(struct device *, dma_addr_t , size_t , enum dma_data_direction );   void (*sync_single_for_device)(struct device *, dma_addr_t , size_t , enum dma_data_direction );   void (*sync_sg_for_cpu)(struct device *, struct scatterlist *, int, enum dma_data_direction );   void (*sync_sg_for_device)(struct device *, struct scatterlist *, int, enum dma_data_direction );   int (*mapping_error)(struct device *, dma_addr_t );   int (*dma_supported)(struct device *, u64 );   int (*set_dma_mask)(struct device *, u64 );   int is_phys; } ;    54     enum irqreturn {   IRQ_NONE = 0,   IRQ_HANDLED = 1,   IRQ_WAKE_THREAD = 2 } ;    16     typedef enum irqreturn irqreturn_t;    62     struct exception_table_entry {   int insn;   int fixup; } ;   464     struct tasklet_struct {   struct tasklet_struct *next;   unsigned long state;   atomic_t count;   void (*func)(unsigned long);   unsigned long data; } ;    91     struct hlist_bl_node ;    91     struct hlist_bl_head {   struct hlist_bl_node *first; } ;    36     struct hlist_bl_node {   struct hlist_bl_node *next;   struct hlist_bl_node **pprev; } ;   114     struct __anonstruct____missing_field_name_213 {   spinlock_t lock;   int count; } ;   114     union __anonunion____missing_field_name_212 {   struct __anonstruct____missing_field_name_213 __annonCompField63; } ;   114     struct lockref {   union __anonunion____missing_field_name_212 __annonCompField64; } ;    50     struct vfsmount ;    51     struct __anonstruct____missing_field_name_215 {   u32 hash;   u32 len; } ;    51     union __anonunion____missing_field_name_214 {   struct __anonstruct____missing_field_name_215 __annonCompField65;   u64 hash_len; } ;    51     struct qstr {   union __anonunion____missing_field_name_214 __annonCompField66;   const unsigned char *name; } ;    90     struct dentry_operations ;    90     union __anonunion_d_u_216 {   struct hlist_node d_alias;   struct callback_head d_rcu; } ;    90     struct dentry {   unsigned int d_flags;   seqcount_t d_seq;   struct hlist_bl_node d_hash;   struct dentry *d_parent;   struct qstr d_name;   struct inode *d_inode;   unsigned char d_iname[32U];   struct lockref d_lockref;   const struct dentry_operations *d_op;   struct super_block *d_sb;   unsigned long d_time;   void *d_fsdata;   struct list_head d_lru;   struct list_head d_child;   struct list_head d_subdirs;   union __anonunion_d_u_216 d_u; } ;   142     struct dentry_operations {   int (*d_revalidate)(struct dentry *, unsigned int);   int (*d_weak_revalidate)(struct dentry *, unsigned int);   int (*d_hash)(const struct dentry *, struct qstr *);   int (*d_compare)(const struct dentry *, const struct dentry *, unsigned int, const char *, const struct qstr *);   int (*d_delete)(const struct dentry *);   void (*d_release)(struct dentry *);   void (*d_prune)(struct dentry *);   void (*d_iput)(struct dentry *, struct inode *);   char * (*d_dname)(struct dentry *, char *, int);   struct vfsmount * (*d_automount)(struct path *);   int (*d_manage)(struct dentry *, bool ); } ;   578     struct path {   struct vfsmount *mnt;   struct dentry *dentry; } ;    27     struct list_lru_one {   struct list_head list;   long nr_items; } ;    32     struct list_lru_memcg {   struct list_lru_one *lru[0U]; } ;    37     struct list_lru_node {   spinlock_t lock;   struct list_lru_one lru;   struct list_lru_memcg *memcg_lrus; } ;    47     struct list_lru {   struct list_lru_node *node;   struct list_head list; } ;    58     struct __anonstruct____missing_field_name_220 {   struct radix_tree_node *parent;   void *private_data; } ;    58     union __anonunion____missing_field_name_219 {   struct __anonstruct____missing_field_name_220 __annonCompField67;   struct callback_head callback_head; } ;    58     struct radix_tree_node {   unsigned int path;   unsigned int count;   union __anonunion____missing_field_name_219 __annonCompField68;   struct list_head private_list;   void *slots[64U];   unsigned long tags[3U][1U]; } ;   105     struct radix_tree_root {   unsigned int height;   gfp_t gfp_mask;   struct radix_tree_node *rnode; } ;    45     struct fiemap_extent {   __u64 fe_logical;   __u64 fe_physical;   __u64 fe_length;   __u64 fe_reserved64[2U];   __u32 fe_flags;   __u32 fe_reserved[3U]; } ;    38     enum migrate_mode {   MIGRATE_ASYNC = 0,   MIGRATE_SYNC_LIGHT = 1,   MIGRATE_SYNC = 2 } ;    30     struct block_device ;    60     struct export_operations ;    63     struct nameidata ;    64     struct kiocb ;    65     struct poll_table_struct ;    66     struct kstatfs ;    67     struct swap_info_struct ;    68     struct iov_iter ;    72     struct iattr {   unsigned int ia_valid;   umode_t ia_mode;   kuid_t ia_uid;   kgid_t ia_gid;   loff_t ia_size;   struct timespec ia_atime;   struct timespec ia_mtime;   struct timespec ia_ctime;   struct file *ia_file; } ;   212     struct dquot ;    19     typedef __kernel_uid32_t projid_t;    23     struct __anonstruct_kprojid_t_222 {   projid_t val; } ;    23     typedef struct __anonstruct_kprojid_t_222 kprojid_t;   166     enum quota_type {   USRQUOTA = 0,   GRPQUOTA = 1,   PRJQUOTA = 2 } ;    66     typedef long long qsize_t;    67     union __anonunion____missing_field_name_223 {   kuid_t uid;   kgid_t gid;   kprojid_t projid; } ;    67     struct kqid {   union __anonunion____missing_field_name_223 __annonCompField70;   enum quota_type type; } ;   184     struct mem_dqblk {   qsize_t dqb_bhardlimit;   qsize_t dqb_bsoftlimit;   qsize_t dqb_curspace;   qsize_t dqb_rsvspace;   qsize_t dqb_ihardlimit;   qsize_t dqb_isoftlimit;   qsize_t dqb_curinodes;   time_t dqb_btime;   time_t dqb_itime; } ;   206     struct quota_format_type ;   207     struct mem_dqinfo {   struct quota_format_type *dqi_format;   int dqi_fmt_id;   struct list_head dqi_dirty_list;   unsigned long dqi_flags;   unsigned int dqi_bgrace;   unsigned int dqi_igrace;   qsize_t dqi_max_spc_limit;   qsize_t dqi_max_ino_limit;   void *dqi_priv; } ;   272     struct dquot {   struct hlist_node dq_hash;   struct list_head dq_inuse;   struct list_head dq_free;   struct list_head dq_dirty;   struct mutex dq_lock;   atomic_t dq_count;   wait_queue_head_t dq_wait_unused;   struct super_block *dq_sb;   struct kqid dq_id;   loff_t dq_off;   unsigned long dq_flags;   struct mem_dqblk dq_dqb; } ;   299     struct quota_format_ops {   int (*check_quota_file)(struct super_block *, int);   int (*read_file_info)(struct super_block *, int);   int (*write_file_info)(struct super_block *, int);   int (*free_file_info)(struct super_block *, int);   int (*read_dqblk)(struct dquot *);   int (*commit_dqblk)(struct dquot *);   int (*release_dqblk)(struct dquot *); } ;   310     struct dquot_operations {   int (*write_dquot)(struct dquot *);   struct dquot * (*alloc_dquot)(struct super_block *, int);   void (*destroy_dquot)(struct dquot *);   int (*acquire_dquot)(struct dquot *);   int (*release_dquot)(struct dquot *);   int (*mark_dirty)(struct dquot *);   int (*write_info)(struct super_block *, int);   qsize_t * (*get_reserved_space)(struct inode *);   int (*get_projid)(struct inode *, kprojid_t *); } ;   325     struct qc_dqblk {   int d_fieldmask;   u64 d_spc_hardlimit;   u64 d_spc_softlimit;   u64 d_ino_hardlimit;   u64 d_ino_softlimit;   u64 d_space;   u64 d_ino_count;   s64 d_ino_timer;   s64 d_spc_timer;   int d_ino_warns;   int d_spc_warns;   u64 d_rt_spc_hardlimit;   u64 d_rt_spc_softlimit;   u64 d_rt_space;   s64 d_rt_spc_timer;   int d_rt_spc_warns; } ;   348     struct qc_type_state {   unsigned int flags;   unsigned int spc_timelimit;   unsigned int ino_timelimit;   unsigned int rt_spc_timelimit;   unsigned int spc_warnlimit;   unsigned int ino_warnlimit;   unsigned int rt_spc_warnlimit;   unsigned long long ino;   blkcnt_t blocks;   blkcnt_t nextents; } ;   394     struct qc_state {   unsigned int s_incoredqs;   struct qc_type_state s_state[3U]; } ;   405     struct qc_info {   int i_fieldmask;   unsigned int i_flags;   unsigned int i_spc_timelimit;   unsigned int i_ino_timelimit;   unsigned int i_rt_spc_timelimit;   unsigned int i_spc_warnlimit;   unsigned int i_ino_warnlimit;   unsigned int i_rt_spc_warnlimit; } ;   418     struct quotactl_ops {   int (*quota_on)(struct super_block *, int, int, struct path *);   int (*quota_off)(struct super_block *, int);   int (*quota_enable)(struct super_block *, unsigned int);   int (*quota_disable)(struct super_block *, unsigned int);   int (*quota_sync)(struct super_block *, int);   int (*set_info)(struct super_block *, int, struct qc_info *);   int (*get_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *);   int (*set_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *);   int (*get_state)(struct super_block *, struct qc_state *);   int (*rm_xquota)(struct super_block *, unsigned int); } ;   432     struct quota_format_type {   int qf_fmt_id;   const struct quota_format_ops *qf_ops;   struct module *qf_owner;   struct quota_format_type *qf_next; } ;   496     struct quota_info {   unsigned int flags;   struct mutex dqio_mutex;   struct mutex dqonoff_mutex;   struct inode *files[3U];   struct mem_dqinfo info[3U];   const struct quota_format_ops *ops[3U]; } ;   526     struct kiocb {   struct file *ki_filp;   loff_t ki_pos;   void (*ki_complete)(struct kiocb *, long, long);   void *private;   int ki_flags; } ;   364     struct address_space_operations {   int (*writepage)(struct page *, struct writeback_control *);   int (*readpage)(struct file *, struct page *);   int (*writepages)(struct address_space *, struct writeback_control *);   int (*set_page_dirty)(struct page *);   int (*readpages)(struct file *, struct address_space *, struct list_head *, unsigned int);   int (*write_begin)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page **, void **);   int (*write_end)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page *, void *);   sector_t  (*bmap)(struct address_space *, sector_t );   void (*invalidatepage)(struct page *, unsigned int, unsigned int);   int (*releasepage)(struct page *, gfp_t );   void (*freepage)(struct page *);   ssize_t  (*direct_IO)(struct kiocb *, struct iov_iter *, loff_t );   int (*migratepage)(struct address_space *, struct page *, struct page *, enum migrate_mode );   int (*launder_page)(struct page *);   int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long);   void (*is_dirty_writeback)(struct page *, bool *, bool *);   int (*error_remove_page)(struct address_space *, struct page *);   int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *);   void (*swap_deactivate)(struct file *); } ;   421     struct address_space {   struct inode *host;   struct radix_tree_root page_tree;   spinlock_t tree_lock;   atomic_t i_mmap_writable;   struct rb_root i_mmap;   struct rw_semaphore i_mmap_rwsem;   unsigned long nrpages;   unsigned long nrshadows;   unsigned long writeback_index;   const struct address_space_operations *a_ops;   unsigned long flags;   spinlock_t private_lock;   struct list_head private_list;   void *private_data; } ;   441     struct request_queue ;   442     struct hd_struct ;   442     struct gendisk ;   442     struct block_device {   dev_t bd_dev;   int bd_openers;   struct inode *bd_inode;   struct super_block *bd_super;   struct mutex bd_mutex;   struct list_head bd_inodes;   void *bd_claiming;   void *bd_holder;   int bd_holders;   bool bd_write_holder;   struct list_head bd_holder_disks;   struct block_device *bd_contains;   unsigned int bd_block_size;   struct hd_struct *bd_part;   unsigned int bd_part_count;   int bd_invalidated;   struct gendisk *bd_disk;   struct request_queue *bd_queue;   struct list_head bd_list;   unsigned long bd_private;   int bd_fsfreeze_count;   struct mutex bd_fsfreeze_mutex; } ;   558     struct posix_acl ;   559     struct inode_operations ;   559     union __anonunion____missing_field_name_226 {   const unsigned int i_nlink;   unsigned int __i_nlink; } ;   559     union __anonunion____missing_field_name_227 {   struct hlist_head i_dentry;   struct callback_head i_rcu; } ;   559     struct file_lock_context ;   559     struct cdev ;   559     union __anonunion____missing_field_name_228 {   struct pipe_inode_info *i_pipe;   struct block_device *i_bdev;   struct cdev *i_cdev; } ;   559     struct inode {   umode_t i_mode;   unsigned short i_opflags;   kuid_t i_uid;   kgid_t i_gid;   unsigned int i_flags;   struct posix_acl *i_acl;   struct posix_acl *i_default_acl;   const struct inode_operations *i_op;   struct super_block *i_sb;   struct address_space *i_mapping;   void *i_security;   unsigned long i_ino;   union __anonunion____missing_field_name_226 __annonCompField71;   dev_t i_rdev;   loff_t i_size;   struct timespec i_atime;   struct timespec i_mtime;   struct timespec i_ctime;   spinlock_t i_lock;   unsigned short i_bytes;   unsigned int i_blkbits;   blkcnt_t i_blocks;   unsigned long i_state;   struct mutex i_mutex;   unsigned long dirtied_when;   unsigned long dirtied_time_when;   struct hlist_node i_hash;   struct list_head i_wb_list;   struct list_head i_lru;   struct list_head i_sb_list;   union __anonunion____missing_field_name_227 __annonCompField72;   u64 i_version;   atomic_t i_count;   atomic_t i_dio_count;   atomic_t i_writecount;   atomic_t i_readcount;   const struct file_operations *i_fop;   struct file_lock_context *i_flctx;   struct address_space i_data;   struct list_head i_devices;   union __anonunion____missing_field_name_228 __annonCompField73;   __u32 i_generation;   __u32 i_fsnotify_mask;   struct hlist_head i_fsnotify_marks;   void *i_private; } ;   796     struct fown_struct {   rwlock_t lock;   struct pid *pid;   enum pid_type pid_type;   kuid_t uid;   kuid_t euid;   int signum; } ;   804     struct file_ra_state {   unsigned long start;   unsigned int size;   unsigned int async_size;   unsigned int ra_pages;   unsigned int mmap_miss;   loff_t prev_pos; } ;   827     union __anonunion_f_u_229 {   struct llist_node fu_llist;   struct callback_head fu_rcuhead; } ;   827     struct file {   union __anonunion_f_u_229 f_u;   struct path f_path;   struct inode *f_inode;   const struct file_operations *f_op;   spinlock_t f_lock;   atomic_long_t f_count;   unsigned int f_flags;   fmode_t f_mode;   struct mutex f_pos_lock;   loff_t f_pos;   struct fown_struct f_owner;   const struct cred *f_cred;   struct file_ra_state f_ra;   u64 f_version;   void *f_security;   void *private_data;   struct list_head f_ep_links;   struct list_head f_tfile_llink;   struct address_space *f_mapping; } ;   912     typedef void *fl_owner_t;   913     struct file_lock ;   914     struct file_lock_operations {   void (*fl_copy_lock)(struct file_lock *, struct file_lock *);   void (*fl_release_private)(struct file_lock *); } ;   920     struct lock_manager_operations {   int (*lm_compare_owner)(struct file_lock *, struct file_lock *);   unsigned long int (*lm_owner_key)(struct file_lock *);   fl_owner_t  (*lm_get_owner)(fl_owner_t );   void (*lm_put_owner)(fl_owner_t );   void (*lm_notify)(struct file_lock *);   int (*lm_grant)(struct file_lock *, int);   bool  (*lm_break)(struct file_lock *);   int (*lm_change)(struct file_lock *, int, struct list_head *);   void (*lm_setup)(struct file_lock *, void **); } ;   941     struct nlm_lockowner ;   942     struct nfs_lock_info {   u32 state;   struct nlm_lockowner *owner;   struct list_head list; } ;    14     struct nfs4_lock_state ;    15     struct nfs4_lock_info {   struct nfs4_lock_state *owner; } ;    19     struct fasync_struct ;    19     struct __anonstruct_afs_231 {   struct list_head link;   int state; } ;    19     union __anonunion_fl_u_230 {   struct nfs_lock_info nfs_fl;   struct nfs4_lock_info nfs4_fl;   struct __anonstruct_afs_231 afs; } ;    19     struct file_lock {   struct file_lock *fl_next;   struct list_head fl_list;   struct hlist_node fl_link;   struct list_head fl_block;   fl_owner_t fl_owner;   unsigned int fl_flags;   unsigned char fl_type;   unsigned int fl_pid;   int fl_link_cpu;   struct pid *fl_nspid;   wait_queue_head_t fl_wait;   struct file *fl_file;   loff_t fl_start;   loff_t fl_end;   struct fasync_struct *fl_fasync;   unsigned long fl_break_time;   unsigned long fl_downgrade_time;   const struct file_lock_operations *fl_ops;   const struct lock_manager_operations *fl_lmops;   union __anonunion_fl_u_230 fl_u; } ;   994     struct file_lock_context {   spinlock_t flc_lock;   struct list_head flc_flock;   struct list_head flc_posix;   struct list_head flc_lease; } ;  1052     struct fasync_struct {   spinlock_t fa_lock;   int magic;   int fa_fd;   struct fasync_struct *fa_next;   struct file *fa_file;   struct callback_head fa_rcu; } ;  1230     struct sb_writers {   struct percpu_counter counter[3U];   wait_queue_head_t wait;   int frozen;   wait_queue_head_t wait_unfrozen;   struct lockdep_map lock_map[3U]; } ;  1259     struct super_operations ;  1259     struct xattr_handler ;  1259     struct mtd_info ;  1259     struct super_block {   struct list_head s_list;   dev_t s_dev;   unsigned char s_blocksize_bits;   unsigned long s_blocksize;   loff_t s_maxbytes;   struct file_system_type *s_type;   const struct super_operations *s_op;   const struct dquot_operations *dq_op;   const struct quotactl_ops *s_qcop;   const struct export_operations *s_export_op;   unsigned long s_flags;   unsigned long s_magic;   struct dentry *s_root;   struct rw_semaphore s_umount;   int s_count;   atomic_t s_active;   void *s_security;   const struct xattr_handler **s_xattr;   struct list_head s_inodes;   struct hlist_bl_head s_anon;   struct list_head s_mounts;   struct block_device *s_bdev;   struct backing_dev_info *s_bdi;   struct mtd_info *s_mtd;   struct hlist_node s_instances;   unsigned int s_quota_types;   struct quota_info s_dquot;   struct sb_writers s_writers;   char s_id[32U];   u8 s_uuid[16U];   void *s_fs_info;   unsigned int s_max_links;   fmode_t s_mode;   u32 s_time_gran;   struct mutex s_vfs_rename_mutex;   char *s_subtype;   char *s_options;   const struct dentry_operations *s_d_op;   int cleancache_poolid;   struct shrinker s_shrink;   atomic_long_t s_remove_count;   int s_readonly_remount;   struct workqueue_struct *s_dio_done_wq;   struct hlist_head s_pins;   struct list_lru s_dentry_lru;   struct list_lru s_inode_lru;   struct callback_head rcu;   int s_stack_depth; } ;  1497     struct fiemap_extent_info {   unsigned int fi_flags;   unsigned int fi_extents_mapped;   unsigned int fi_extents_max;   struct fiemap_extent *fi_extents_start; } ;  1511     struct dir_context ;  1536     struct dir_context {   int (*actor)(struct dir_context *, const char *, int, loff_t , u64 , unsigned int);   loff_t pos; } ;  1543     struct file_operations {   struct module *owner;   loff_t  (*llseek)(struct file *, loff_t , int);   ssize_t  (*read)(struct file *, char *, size_t , loff_t *);   ssize_t  (*write)(struct file *, const char *, size_t , loff_t *);   ssize_t  (*read_iter)(struct kiocb *, struct iov_iter *);   ssize_t  (*write_iter)(struct kiocb *, struct iov_iter *);   int (*iterate)(struct file *, struct dir_context *);   unsigned int (*poll)(struct file *, struct poll_table_struct *);   long int (*unlocked_ioctl)(struct file *, unsigned int, unsigned long);   long int (*compat_ioctl)(struct file *, unsigned int, unsigned long);   int (*mmap)(struct file *, struct vm_area_struct *);   int (*mremap)(struct file *, struct vm_area_struct *);   int (*open)(struct inode *, struct file *);   int (*flush)(struct file *, fl_owner_t );   int (*release)(struct inode *, struct file *);   int (*fsync)(struct file *, loff_t , loff_t , int);   int (*aio_fsync)(struct kiocb *, int);   int (*fasync)(int, struct file *, int);   int (*lock)(struct file *, int, struct file_lock *);   ssize_t  (*sendpage)(struct file *, struct page *, int, size_t , loff_t *, int);   unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);   int (*check_flags)(int);   int (*flock)(struct file *, int, struct file_lock *);   ssize_t  (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t , unsigned int);   ssize_t  (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t , unsigned int);   int (*setlease)(struct file *, long, struct file_lock **, void **);   long int (*fallocate)(struct file *, int, loff_t , loff_t );   void (*show_fdinfo)(struct seq_file *, struct file *); } ;  1604     struct inode_operations {   struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int);   void * (*follow_link)(struct dentry *, struct nameidata *);   int (*permission)(struct inode *, int);   struct posix_acl * (*get_acl)(struct inode *, int);   int (*readlink)(struct dentry *, char *, int);   void (*put_link)(struct dentry *, struct nameidata *, void *);   int (*create)(struct inode *, struct dentry *, umode_t , bool );   int (*link)(struct dentry *, struct inode *, struct dentry *);   int (*unlink)(struct inode *, struct dentry *);   int (*symlink)(struct inode *, struct dentry *, const char *);   int (*mkdir)(struct inode *, struct dentry *, umode_t );   int (*rmdir)(struct inode *, struct dentry *);   int (*mknod)(struct inode *, struct dentry *, umode_t , dev_t );   int (*rename)(struct inode *, struct dentry *, struct inode *, struct dentry *);   int (*rename2)(struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int);   int (*setattr)(struct dentry *, struct iattr *);   int (*getattr)(struct vfsmount *, struct dentry *, struct kstat *);   int (*setxattr)(struct dentry *, const char *, const void *, size_t , int);   ssize_t  (*getxattr)(struct dentry *, const char *, void *, size_t );   ssize_t  (*listxattr)(struct dentry *, char *, size_t );   int (*removexattr)(struct dentry *, const char *);   int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 , u64 );   int (*update_time)(struct inode *, struct timespec *, int);   int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t , int *);   int (*tmpfile)(struct inode *, struct dentry *, umode_t );   int (*set_acl)(struct inode *, struct posix_acl *, int);   int (*dentry_open)(struct dentry *, struct file *, const struct cred *); } ;  1659     struct super_operations {   struct inode * (*alloc_inode)(struct super_block *);   void (*destroy_inode)(struct inode *);   void (*dirty_inode)(struct inode *, int);   int (*write_inode)(struct inode *, struct writeback_control *);   int (*drop_inode)(struct inode *);   void (*evict_inode)(struct inode *);   void (*put_super)(struct super_block *);   int (*sync_fs)(struct super_block *, int);   int (*freeze_super)(struct super_block *);   int (*freeze_fs)(struct super_block *);   int (*thaw_super)(struct super_block *);   int (*unfreeze_fs)(struct super_block *);   int (*statfs)(struct dentry *, struct kstatfs *);   int (*remount_fs)(struct super_block *, int *, char *);   void (*umount_begin)(struct super_block *);   int (*show_options)(struct seq_file *, struct dentry *);   int (*show_devname)(struct seq_file *, struct dentry *);   int (*show_path)(struct seq_file *, struct dentry *);   int (*show_stats)(struct seq_file *, struct dentry *);   ssize_t  (*quota_read)(struct super_block *, int, char *, size_t , loff_t );   ssize_t  (*quota_write)(struct super_block *, int, const char *, size_t , loff_t );   struct dquot ** (*get_dquots)(struct inode *);   int (*bdev_try_to_free_page)(struct super_block *, struct page *, gfp_t );   long int (*nr_cached_objects)(struct super_block *, struct shrink_control *);   long int (*free_cached_objects)(struct super_block *, struct shrink_control *); } ;  1891     struct file_system_type {   const char *name;   int fs_flags;   struct dentry * (*mount)(struct file_system_type *, int, const char *, void *);   void (*kill_sb)(struct super_block *);   struct module *owner;   struct file_system_type *next;   struct hlist_head fs_supers;   struct lock_class_key s_lock_key;   struct lock_class_key s_umount_key;   struct lock_class_key s_vfs_rename_key;   struct lock_class_key s_writers_key[3U];   struct lock_class_key i_lock_key;   struct lock_class_key i_mutex_key;   struct lock_class_key i_mutex_dir_key; } ;     9     struct usb_ctrlrequest {   __u8 bRequestType;   __u8 bRequest;   __le16 wValue;   __le16 wIndex;   __le16 wLength; } ;   363     struct usb_endpoint_descriptor {   __u8 bLength;   __u8 bDescriptorType;   __u8 bEndpointAddress;   __u8 bmAttributes;   __le16 wMaxPacketSize;   __u8 bInterval;   __u8 bRefresh;   __u8 bSynchAddress; } ;   613     struct usb_ss_ep_comp_descriptor {   __u8 bLength;   __u8 bDescriptorType;   __u8 bMaxBurst;   __u8 bmAttributes;   __le16 wBytesPerInterval; } ;   905     enum usb_device_speed {   USB_SPEED_UNKNOWN = 0,   USB_SPEED_LOW = 1,   USB_SPEED_FULL = 2,   USB_SPEED_HIGH = 3,   USB_SPEED_WIRELESS = 4,   USB_SPEED_SUPER = 5 } ;   914     enum usb_device_state {   USB_STATE_NOTATTACHED = 0,   USB_STATE_ATTACHED = 1,   USB_STATE_POWERED = 2,   USB_STATE_RECONNECTING = 3,   USB_STATE_UNAUTHENTICATED = 4,   USB_STATE_DEFAULT = 5,   USB_STATE_ADDRESS = 6,   USB_STATE_CONFIGURED = 7,   USB_STATE_SUSPENDED = 8 } ;    54     struct usb_ep ;    55     struct usb_request {   void *buf;   unsigned int length;   dma_addr_t dma;   struct scatterlist *sg;   unsigned int num_sgs;   unsigned int num_mapped_sgs;   unsigned short stream_id;   unsigned char no_interrupt;   unsigned char zero;   unsigned char short_not_ok;   void (*complete)(struct usb_ep *, struct usb_request *);   void *context;   struct list_head list;   int status;   unsigned int actual; } ;   113     struct usb_ep_ops {   int (*enable)(struct usb_ep *, const struct usb_endpoint_descriptor *);   int (*disable)(struct usb_ep *);   struct usb_request * (*alloc_request)(struct usb_ep *, gfp_t );   void (*free_request)(struct usb_ep *, struct usb_request *);   int (*queue)(struct usb_ep *, struct usb_request *, gfp_t );   int (*dequeue)(struct usb_ep *, struct usb_request *);   int (*set_halt)(struct usb_ep *, int);   int (*set_wedge)(struct usb_ep *);   int (*fifo_status)(struct usb_ep *);   void (*fifo_flush)(struct usb_ep *); } ;   142     struct usb_ep {   void *driver_data;   const char *name;   const struct usb_ep_ops *ops;   struct list_head ep_list;   unsigned short maxpacket;   unsigned short maxpacket_limit;   unsigned short max_streams;   unsigned char mult;   unsigned char maxburst;   u8 address;   const struct usb_endpoint_descriptor *desc;   const struct usb_ss_ep_comp_descriptor *comp_desc; } ;   463     struct usb_dcd_config_params {   __u8 bU1devExitLat;   __le16 bU2DevExitLat; } ;   472     struct usb_gadget ;   473     struct usb_gadget_driver ;   474     struct usb_udc ;   475     struct usb_gadget_ops {   int (*get_frame)(struct usb_gadget *);   int (*wakeup)(struct usb_gadget *);   int (*set_selfpowered)(struct usb_gadget *, int);   int (*vbus_session)(struct usb_gadget *, int);   int (*vbus_draw)(struct usb_gadget *, unsigned int);   int (*pullup)(struct usb_gadget *, int);   int (*ioctl)(struct usb_gadget *, unsigned int, unsigned long);   void (*get_config_params)(struct usb_dcd_config_params *);   int (*udc_start)(struct usb_gadget *, struct usb_gadget_driver *);   int (*udc_stop)(struct usb_gadget *); } ;   496     struct usb_gadget {   struct work_struct work;   struct usb_udc *udc;   const struct usb_gadget_ops *ops;   struct usb_ep *ep0;   struct list_head ep_list;   enum usb_device_speed speed;   enum usb_device_speed max_speed;   enum usb_device_state state;   const char *name;   struct device dev;   unsigned int out_epnum;   unsigned int in_epnum;   unsigned char sg_supported;   unsigned char is_otg;   unsigned char is_a_peripheral;   unsigned char b_hnp_enable;   unsigned char a_hnp_support;   unsigned char a_alt_hnp_support;   unsigned char quirk_ep_out_aligned_size;   unsigned char is_selfpowered; } ;   800     struct usb_gadget_driver {   char *function;   enum usb_device_speed max_speed;   int (*bind)(struct usb_gadget *, struct usb_gadget_driver *);   void (*unbind)(struct usb_gadget *);   int (*setup)(struct usb_gadget *, const struct usb_ctrlrequest *);   void (*disconnect)(struct usb_gadget *);   void (*suspend)(struct usb_gadget *);   void (*resume)(struct usb_gadget *);   void (*reset)(struct usb_gadget *);   struct device_driver driver; } ;  1053     struct udc_csrs {   u32 sca;   u32 ne[9U]; } ;   389     struct udc_regs {   u32 cfg;   u32 ctl;   u32 sts;   u32 irqsts;   u32 irqmsk;   u32 ep_irqsts;   u32 ep_irqmsk; } ;   414     struct udc_ep_regs {   u32 ctl;   u32 sts;   u32 bufin_framenum;   u32 bufout_maxpkt;   u32 subptr;   u32 desptr;   u32 reserved;   u32 confirm; } ;   442     struct udc_stp_dma {   u32 status;   u32 _reserved;   u32 data12;   u32 data34; } ;   455     struct udc_data_dma {   u32 status;   u32 _reserved;   u32 bufptr;   u32 next; } ;   467     struct udc_request {   struct usb_request req;   unsigned char dma_going;   unsigned char dma_done;   dma_addr_t td_phys;   struct udc_data_dma *td_data;   struct udc_data_dma *td_data_last;   struct list_head queue;   unsigned int chain_len; } ;   487     struct udc ;   487     struct udc_ep {   struct usb_ep ep;   struct udc_ep_regs *regs;   u32 *txfifo;   u32 *dma;   dma_addr_t td_phys;   dma_addr_t td_stp_dma;   struct udc_stp_dma *td_stp;   struct udc_data_dma *td;   struct udc_request *req;   unsigned int req_used;   unsigned int req_completed;   struct udc_request *bna_dummy_req;   unsigned int bna_occurred;   unsigned int naking;   struct udc *dev;   struct list_head queue;   unsigned int halted;   unsigned int cancel_transfer;   unsigned char num;   unsigned short fifo_depth;   unsigned char in; } ;   520     struct udc {   struct usb_gadget gadget;   spinlock_t lock;   struct udc_ep ep[32U];   struct usb_gadget_driver *driver;   unsigned char active;   unsigned char stall_ep0in;   unsigned char waiting_zlp_ack_ep0in;   unsigned char set_cfg_not_acked;   unsigned char irq_registered;   unsigned char data_ep_enabled;   unsigned char data_ep_queued;   unsigned char mem_region;   unsigned char sys_suspended;   unsigned int connected;   u16 chiprev;   struct pci_dev *pdev;   struct udc_csrs *csr;   struct udc_regs *regs;   struct udc_ep_regs *ep_regs;   u32 *rxfifo;   u32 *txfifo;   struct dma_pool *data_requests;   struct dma_pool *stp_requests;   unsigned long phys_addr;   void *virt_addr;   unsigned int irq;   u16 cur_config;   u16 cur_intf;   u16 cur_alt; } ;   564     union udc_setup_data {   u32 data[2U];   struct usb_ctrlrequest request; } ;  3021     struct amd5536udc ;     1     void __builtin_prefetch(const void *, ...);     1     long int __builtin_expect(long exp, long c);    30     void * __memcpy(void *, const void *, size_t );    57     void * __memset(void *, int, size_t );    65     char * strcpy(char *, const char *);   204     int test_and_set_bit(long nr, volatile unsigned long *addr);    56     unsigned int readl(const volatile void *addr);    62     void writeb(unsigned char val, volatile void *addr);    64     void writel(unsigned int val, volatile void *addr);   134     void * phys_to_virt(phys_addr_t address);   182     void * ldv_ioremap_nocache_1(resource_size_t ldv_func_arg1, unsigned long ldv_func_arg2);   186     void * ldv_ioremap_nocache_3(resource_size_t ldv_func_arg1, unsigned long ldv_func_arg2);   203     void ldv_iounmap_2(volatile void *addr);    45     void __dynamic_pr_debug(struct _ddebug *, const char *, ...);    53     void __dynamic_dev_dbg(struct _ddebug *, const struct device *, const char *, ...);   404     int snprintf(char *, size_t , const char *, ...);    25     void INIT_LIST_HEAD(struct list_head *list);    48     void __list_add(struct list_head *, struct list_head *, struct list_head *);    75     void list_add_tail(struct list_head *new, struct list_head *head);   112     void __list_del_entry(struct list_head *);   143     void list_del_init(struct list_head *entry);   187     int list_empty(const struct list_head *head);    93     void __raw_spin_lock_init(raw_spinlock_t *, const char *, struct lock_class_key *);    22     void _raw_spin_lock(raw_spinlock_t *);    31     void _raw_spin_lock_irq(raw_spinlock_t *);    34     unsigned long int _raw_spin_lock_irqsave(raw_spinlock_t *);    41     void _raw_spin_unlock(raw_spinlock_t *);    43     void _raw_spin_unlock_irq(raw_spinlock_t *);    45     void _raw_spin_unlock_irqrestore(raw_spinlock_t *, unsigned long);   299     raw_spinlock_t * spinlock_check(spinlock_t *lock);   310     void spin_lock(spinlock_t *lock);   340     void spin_lock_irq(spinlock_t *lock);   355     void spin_unlock(spinlock_t *lock);   365     void spin_unlock_irq(spinlock_t *lock);   370     void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags);    10     void ldv_error();    26     void * ldv_undef_ptr();     7     int LDV_IO_MEMS = 0;    11     void * ldv_io_mem_remap(void *addr);    23     void ldv_io_mem_unmap(const volatile void *addr);    29     void ldv_check_final_state();    91     void wait_for_completion(struct completion *);   106     void complete(struct completion *);    77     extern volatile unsigned long jiffies;    94     void init_timer_key(struct timer_list *, unsigned int, const char *, struct lock_class_key *);   169     int timer_pending(const struct timer_list *timer);   176     int mod_timer(struct timer_list *, unsigned long);   240     void add_timer(struct timer_list *);   245     int del_timer_sync(struct timer_list *);   139     extern struct resource iomem_resource;   192     struct resource * __request_region(struct resource *, resource_size_t , resource_size_t , const char *, int);   201     void __release_region(struct resource *, resource_size_t , resource_size_t );   812     int dev_set_name(struct device *, const char *, ...);   833     void * dev_get_drvdata(const struct device *dev);   838     void dev_set_drvdata(struct device *dev, void *data);  1053     void dev_err(const struct device *, const char *, ...);  1059     void _dev_info(const struct device *, const char *, ...);   143     void kfree(const void *);   289     void * __kmalloc(size_t , gfp_t );   418     void * kmalloc(size_t size, gfp_t flags);   581     void * kzalloc(size_t size, gfp_t flags);   944     int pci_enable_device(struct pci_dev *);   961     void pci_disable_device(struct pci_dev *);   964     void pci_set_master(struct pci_dev *);   971     int pci_try_set_mwi(struct pci_dev *);    19     struct dma_pool * dma_pool_create(const char *, struct device *, size_t , size_t , size_t );    22     void dma_pool_destroy(struct dma_pool *);    24     void * dma_pool_alloc(struct dma_pool *, gfp_t , dma_addr_t *);    27     void dma_pool_free(struct dma_pool *, void *, dma_addr_t );  1479     void * pci_get_drvdata(struct pci_dev *pdev);  1484     void pci_set_drvdata(struct pci_dev *pdev, void *data);   127     int request_threaded_irq(unsigned int, irqreturn_t  (*)(int, void *), irqreturn_t  (*)(int, void *), unsigned long, const char *, void *);   132     int request_irq(unsigned int irq, irqreturn_t  (*handler)(int, void *), unsigned long flags, const char *name___0, void *dev);   146     void free_irq(unsigned int, void *);   528     void __tasklet_schedule(struct tasklet_struct *);   530     void tasklet_schedule(struct tasklet_struct *t);   603     int usb_endpoint_maxp(const struct usb_endpoint_descriptor *epd);    44     const char * usb_speed_string(enum usb_device_speed );   196     void usb_ep_set_maxpacket_limit(struct usb_ep *ep, unsigned int maxpacket_limit);   928     int usb_add_gadget_udc_release(struct device *, struct usb_gadget *, void (*)(struct device *));   931     void usb_del_gadget_udc(struct usb_gadget *);  1009     int usb_gadget_map_request(struct usb_gadget *, struct usb_request *, int);  1012     void usb_gadget_unmap_request(struct usb_gadget *, struct usb_request *, int);  1025     void usb_gadget_udc_reset(struct usb_gadget *, struct usb_gadget_driver *);  1032     void usb_gadget_giveback_request(struct usb_ep *, struct usb_request *);    67     void udc_tasklet_disconnect(unsigned long par);    68     void empty_req_queue(struct udc_ep *ep);    69     int udc_probe(struct udc *dev);    70     void udc_basic_init(struct udc *dev);    71     void udc_setup_endpoints(struct udc *dev);    72     void udc_soft_reset(struct udc *dev);    73     struct udc_request * udc_alloc_bna_dummy(struct udc_ep *ep);    74     void udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq);    75     int udc_free_dma_chain(struct udc *dev, struct udc_request *req);    76     int udc_create_dma_chain(struct udc_ep *ep, struct udc_request *req, unsigned long buf_len, gfp_t gfp_flags);    78     int udc_remote_wakeup(struct udc *dev);    79     int udc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id);    80     void udc_pci_remove(struct pci_dev *pdev);    83     const char mod_desc[37U] = { 'A', 'M', 'D', ' ', '5', '5', '3', '6', ' ', 'U', 'D', 'C', ' ', '-', ' ', 'U', 'S', 'B', ' ', 'D', 'e', 'v', 'i', 'c', 'e', ' ', 'C', 'o', 'n', 't', 'r', 'o', 'l', 'l', 'e', 'r', '\x0' };    84     const char name[11U] = { 'a', 'm', 'd', '5', '5', '3', '6', 'u', 'd', 'c', '\x0' };    87     const struct usb_ep_ops udc_ep_ops;    90     union udc_setup_data setup_data = {  };    93     struct udc *udc = 0;    96     struct spinlock udc_irq_spinlock = { { { { { 0U } }, 3735899821U, 4294967295U, (void *)-1, { 0, { 0, 0 }, "udc_irq_spinlock", 0, 0UL } } } };    98     struct spinlock udc_stall_spinlock = { { { { { 0U } }, 3735899821U, 4294967295U, (void *)-1, { 0, { 0, 0 }, "udc_stall_spinlock", 0, 0UL } } } };   104     unsigned int udc_rxfifo_pending = 0U;   107     int soft_reset_occured = 0;   108     int soft_reset_after_usbreset_occured = 0;   111     struct timer_list udc_timer = {  };   112     int stop_timer = 0;   128     int set_rde = -1;   130     struct completion on_exit = { 0U, { { { { { { 0U } }, 3735899821U, 4294967295U, (void *)-1, { 0, { 0, 0 }, "(on_exit).wait.lock", 0, 0UL } } } }, { &(on_exit.wait.task_list), &(on_exit.wait.task_list) } } };   131     struct timer_list udc_pollstall_timer = {  };   132     int stop_pollstall_timer = 0;   133     struct completion on_pollstall_exit = { 0U, { { { { { { 0U } }, 3735899821U, 4294967295U, (void *)-1, { 0, { 0, 0 }, "(on_pollstall_exit).wait.lock", 0, 0UL } } } }, { &(on_pollstall_exit.wait.task_list), &(on_pollstall_exit.wait.task_list) } } };   136     struct tasklet_struct disconnect_tasklet = { (struct tasklet_struct *)0, 0UL, { 0 }, &udc_tasklet_disconnect, (unsigned long)(&udc) };   141     const char ep0_string[6U] = { 'e', 'p', '0', 'i', 'n', '\x0' };   142     const const char *ep_string[32U] = { (const char *)(&ep0_string), "ep1in-int", "ep2in-bulk", "ep3in-bulk", "ep4in-bulk", "ep5in-bulk", "ep6in-bulk", "ep7in-bulk", "ep8in-bulk", "ep9in-bulk", "ep10in-bulk", "ep11in-bulk", "ep12in-bulk", "ep13in-bulk", "ep14in-bulk", "ep15in-bulk", "ep0out", "ep1out-bulk", "ep2out-bulk", "ep3out-bulk", "ep4out-bulk", "ep5out-bulk", "ep6out-bulk", "ep7out-bulk", "ep8out-bulk", "ep9out-bulk", "ep10out-bulk", "ep11out-bulk", "ep12out-bulk", "ep13out-bulk", "ep14out-bulk", "ep15out-bulk" };   154     _Bool use_dma = 1;   156     _Bool use_dma_ppb = 1;   158     _Bool use_dma_ppb_du = 0;   160     int use_dma_bufferfill_mode = 0;   162     _Bool use_fullspeed = 0;   164     unsigned long hs_tx_buf = 256UL;   179     void print_regs(struct udc *dev);   212     int udc_mask_unused_interrupts(struct udc *dev);   234     int udc_enable_ep0_interrupts(struct udc *dev);   251     int udc_enable_dev_setup_interrupts(struct udc *dev);   272     int udc_set_txfifo_addr(struct udc_ep *ep);   297     unsigned int cnak_pending = 0U;   299     void UDC_QUEUE_CNAK(struct udc_ep *ep, unsigned int num);   312     int udc_ep_enable(struct usb_ep *usbep, const struct usb_endpoint_descriptor *desc);   440     void ep_init(struct udc_regs *regs, struct udc_ep *ep);   482     int udc_ep_disable(struct usb_ep *usbep);   507     struct usb_request * udc_alloc_request(struct usb_ep *usbep, gfp_t gfp);   580     void udc_init_bna_dummy(struct udc_request *req);   617     void udc_txfifo_write(struct udc_ep *ep, struct usb_request *req);   653     int udc_rxfifo_read_dwords(struct udc *dev, u32 *buf, int dwords);   665     int udc_rxfifo_read_bytes(struct udc *dev, u8 *buf, int bytes);   690     int udc_rxfifo_read(struct udc_ep *ep, struct udc_request *req);   727     int prep_dma(struct udc_ep *ep, struct udc_request *req, gfp_t gfp);   817     void complete_req(struct udc_ep *ep, struct udc_request *req, int sts);   877     struct udc_data_dma * udc_get_last_dma_desc(struct udc_request *req);   890     u32  udc_get_ppbdu_rxbytes(struct udc_request *req);  1032     void udc_set_rde(struct udc *dev);  1050     int udc_queue(struct usb_ep *usbep, struct usb_request *usbreq, gfp_t gfp);  1251     int udc_dequeue(struct usb_ep *usbep, struct usb_request *usbreq);  1306     int udc_set_halt(struct usb_ep *usbep, int halt___0);  1368     const struct usb_ep_ops udc_ep_ops = { &udc_ep_enable, &udc_ep_disable, &udc_alloc_request, &udc_free_request, &udc_queue, &udc_dequeue, &udc_set_halt, 0, 0, 0 };  1385     int udc_get_frame(struct usb_gadget *gadget);  1391     int udc_wakeup(struct usb_gadget *gadget);  1403     int amd5536_udc_start(struct usb_gadget *g, struct usb_gadget_driver *driver);  1405     int amd5536_udc_stop(struct usb_gadget *g);  1407     const struct usb_gadget_ops udc_ops = { &udc_get_frame, &udc_wakeup, 0, 0, 0, 0, 0, 0, &amd5536_udc_start, &amd5536_udc_stop };  1415     void make_ep_lists(struct udc *dev);  1436     int startup_registers(struct udc *dev);  1592     void usb_connect(struct udc *dev);  1610     void usb_disconnect(struct udc *dev);  1694     void udc_timer_function(unsigned long v);  1744     void udc_handle_halt_state(struct udc_ep *ep);  1774     void udc_pollstall_timer_function(unsigned long v);  1808     void activate_control_endpoints(struct udc *dev);  1907     int setup_ep0(struct udc *dev);  1949     void shutdown(struct udc *dev, struct usb_gadget_driver *driver);  1987     void udc_process_cnak_queue(struct udc *dev);  2019     void udc_ep0_set_rde(struct udc *dev);  2046     irqreturn_t  udc_data_out_isr(struct udc *dev, int ep_ix);  2260     irqreturn_t  udc_data_in_isr(struct udc *dev, int ep_ix);  2419     irqreturn_t  udc_control_out_isr(struct udc *dev);  2635     irqreturn_t  udc_control_in_isr(struct udc *dev);  2735     irqreturn_t  udc_dev_isr(struct udc *dev, u32 dev_irq);  2965     irqreturn_t  udc_irq(int irq, void *pdev);  3019     void gadget_release(struct device *pdev);  3026     void udc_remove(struct udc *dev);  3086     int init_dma_pools(struct udc *dev);  3350     const struct pci_device_id __mod_pci__pci_id_device_table[2U] = {  };  3386     void ldv_check_return_value(int);  3389     void ldv_check_return_value_probe(int);  3392     void ldv_initialize();  3395     void ldv_handler_precall();  3398     int nondet_int();  3401     int LDV_IN_INTERRUPT = 0;  3404     void ldv_main0_sequence_infinite_withcheck_stateful();           return ;}         {  3406     struct usb_ep *var_group1;  3407     const struct usb_endpoint_descriptor *var_udc_ep_enable_6_p1;  3408     unsigned int var_udc_alloc_request_9_p1;  3409     struct usb_request *var_group2;  3410     unsigned int var_udc_queue_23_p2;  3411     int var_udc_set_halt_26_p1;  3412     struct usb_gadget *var_group3;  3413     struct usb_gadget_driver *var_group4;  3414     struct pci_dev *var_group5;  3415     const struct pci_device_id *var_udc_pci_probe_54_p1;  3416     int res_udc_pci_probe_54;  3417     int var_udc_irq_49_p0;  3418     void *var_udc_irq_49_p1;  3419     unsigned long var_udc_timer_function_37_p0;  3420     unsigned long var_udc_pollstall_timer_function_39_p0;  3421     int ldv_s_udc_pci_driver_pci_driver;  3422     int tmp;  3423     int tmp___0;  3590     ldv_s_udc_pci_driver_pci_driver = 0;  3576     LDV_IN_INTERRUPT = 1;  3585     ldv_initialize() { /* Function call is skipped due to function is undefined */}  3597     goto ldv_33371;  3597     tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */}  3600     goto ldv_33370;  3598     ldv_33370:;  3601     tmp = nondet_int() { /* Function call is skipped due to function is undefined */}3601     switch (tmp)           {  3146       struct udc *dev;  3147       unsigned long resource;  3148       unsigned long len;  3149       int retval;  3150       struct _ddebug descriptor;  3151       long tmp;  3152       void *tmp___0;  3153       int tmp___1;  3154       struct _ddebug descriptor___0;  3155       long tmp___2;  3156       struct resource *tmp___3;  3157       struct _ddebug descriptor___1;  3158       long tmp___4;  3159       struct lock_class_key __key;  3160       struct _ddebug descriptor___2;  3161       long tmp___5;  3162       int tmp___6;  3163       int tmp___7;  3152       retval = 0;             {   583         void *tmp;               {}   420           void *tmp___2;   435           tmp___2 = __kmalloc(size, flags) { /* Function call is skipped due to function is undefined */}}  3161       dev = (struct udc *)tmp___0;  3168       tmp___1 = pci_enable_device(pdev) { /* Function call is skipped due to function is undefined */}  3174       dev->active = 1U;  3177       resource = (unsigned long)(((pdev->resource)[0]).start);  3178       unsigned long int __CPAchecker_TMP_0;  3178       __CPAchecker_TMP_0 = (unsigned long)(((((pdev->resource)[0]).end) - (((pdev->resource)[0]).start)) + 1ULL);  3178       len = __CPAchecker_TMP_0;  3180       tmp___3 = __request_region(&iomem_resource, (resource_size_t )resource, (resource_size_t )len, (const char *)(&name), 0) { /* Function call is skipped due to function is undefined */}  3187       dev->mem_region = 1U;             {    47         void *tmp;               {}    13           void *ptr;    14           void *tmp;    14           tmp = ldv_undef_ptr() { /* Function call is skipped due to function is undefined */}    14           ptr = tmp;    16           LDV_IO_MEMS = LDV_IO_MEMS + 1;}  3190       unsigned long __CPAchecker_TMP_1 = (unsigned long)(dev->virt_addr);  3206       __raw_spin_lock_init(&(dev->lock.__annonCompField19.rlock), "&(&dev->lock)->rlock", &__key) { /* Function call is skipped due to function is undefined */}  3208       struct udc_csrs *__CPAchecker_TMP_2 = (struct udc_csrs *)(dev->virt_addr);  3208       dev->csr = __CPAchecker_TMP_2 + 1280U;  3210       struct udc_regs *__CPAchecker_TMP_3 = (struct udc_regs *)(dev->virt_addr);  3210       dev->regs = __CPAchecker_TMP_3 + 1024U;  3212       struct udc_ep_regs *__CPAchecker_TMP_4 = (struct udc_ep_regs *)(dev->virt_addr);  3212       dev->ep_regs = __CPAchecker_TMP_4;  3214       u32 *__CPAchecker_TMP_5 = (u32 *)(dev->virt_addr);  3214       dev->rxfifo = __CPAchecker_TMP_5 + 2048U;  3215       u32 *__CPAchecker_TMP_6 = (u32 *)(dev->virt_addr);  3215       dev->txfifo = __CPAchecker_TMP_6 + 3072U;             {   135         int tmp;   135         tmp = request_threaded_irq(irq, handler, (irqreturn_t  (*)(int, void *))0, flags, name___0, dev) { /* Function call is skipped due to function is undefined */}}  3218       descriptor___2.modname = "amd5536udc";  3218       descriptor___2.function = "udc_pci_probe";  3218       descriptor___2.filename = "/home/ldvuser/ref_launch/work/current--X--drivers--X--defaultlinux-4.1-rc1.tar.xz--X--152_1a--X--cpachecker/linux-4.1-rc1.tar.xz/csd_deg_dscv/8673/dscv_tempdir/dscv/ri/152_1a/drivers/usb/gadget/udc/amd5536udc.c";  3218       descriptor___2.format = "request_irq(%d) fail\n";  3218       descriptor___2.lineno = 3218U;  3218       descriptor___2.flags = 0U;  3218       __dynamic_dev_dbg(&descriptor___2, (const struct device *)(&(pdev->dev)), "request_irq(%d) fail\n", pdev->irq) { /* Function call is skipped due to function is undefined */}  3219       kfree((const void *)dev) { /* Function call is skipped due to function is undefined */}  3220       dev = (struct udc *)0;  3221       retval = -16;  3222       goto finished;}  3867     ldv_check_return_value(res_udc_pci_probe_54) { /* Function call is skipped due to function is undefined */}  3868     ldv_check_return_value_probe(res_udc_pci_probe_54) { /* Function call is skipped due to function is undefined */}  3870     goto ldv_module_exit;} |       Source code         1 
    2 /*
    3  * amd5536.c -- AMD 5536 UDC high/full speed USB device controller
    4  *
    5  * Copyright (C) 2005-2007 AMD (http://www.amd.com)
    6  * Author: Thomas Dahlmann
    7  *
    8  * This program is free software; you can redistribute it and/or modify
    9  * it under the terms of the GNU General Public License as published by
   10  * the Free Software Foundation; either version 2 of the License, or
   11  * (at your option) any later version.
   12  */
   13 
   14 /*
   15  * The AMD5536 UDC is part of the x86 southbridge AMD Geode CS5536.
   16  * It is a USB Highspeed DMA capable USB device controller. Beside ep0 it
   17  * provides 4 IN and 4 OUT endpoints (bulk or interrupt type).
   18  *
   19  * Make sure that UDC is assigned to port 4 by BIOS settings (port can also
   20  * be used as host port) and UOC bits PAD_EN and APU are set (should be done
   21  * by BIOS init).
   22  *
   23  * UDC DMA requires 32-bit aligned buffers so DMA with gadget ether does not
   24  * work without updating NET_IP_ALIGN. Or PIO mode (module param "use_dma=0")
   25  * can be used with gadget ether.
   26  */
   27 
   28 /* debug control */
   29 /* #define UDC_VERBOSE */
   30 
   31 /* Driver strings */
   32 #define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
   33 #define UDC_DRIVER_VERSION_STRING	"01.00.0206"
   34 
   35 /* system */
   36 #include <linux/module.h>
   37 #include <linux/pci.h>
   38 #include <linux/kernel.h>
   39 #include <linux/delay.h>
   40 #include <linux/ioport.h>
   41 #include <linux/sched.h>
   42 #include <linux/slab.h>
   43 #include <linux/errno.h>
   44 #include <linux/timer.h>
   45 #include <linux/list.h>
   46 #include <linux/interrupt.h>
   47 #include <linux/ioctl.h>
   48 #include <linux/fs.h>
   49 #include <linux/dmapool.h>
   50 #include <linux/moduleparam.h>
   51 #include <linux/device.h>
   52 #include <linux/io.h>
   53 #include <linux/irq.h>
   54 #include <linux/prefetch.h>
   55 
   56 #include <asm/byteorder.h>
   57 #include <asm/unaligned.h>
   58 
   59 /* gadget stack */
   60 #include <linux/usb/ch9.h>
   61 #include <linux/usb/gadget.h>
   62 
   63 /* udc specific */
   64 #include "amd5536udc.h"
   65 
   66 
   67 static void udc_tasklet_disconnect(unsigned long);
   68 static void empty_req_queue(struct udc_ep *);
   69 static int udc_probe(struct udc *dev);
   70 static void udc_basic_init(struct udc *dev);
   71 static void udc_setup_endpoints(struct udc *dev);
   72 static void udc_soft_reset(struct udc *dev);
   73 static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep);
   74 static void udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq);
   75 static int udc_free_dma_chain(struct udc *dev, struct udc_request *req);
   76 static int udc_create_dma_chain(struct udc_ep *ep, struct udc_request *req,
   77 				unsigned long buf_len, gfp_t gfp_flags);
   78 static int udc_remote_wakeup(struct udc *dev);
   79 static int udc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id);
   80 static void udc_pci_remove(struct pci_dev *pdev);
   81 
   82 /* description */
   83 static const char mod_desc[] = UDC_MOD_DESCRIPTION;
   84 static const char name[] = "amd5536udc";
   85 
   86 /* structure to hold endpoint function pointers */
   87 static const struct usb_ep_ops udc_ep_ops;
   88 
   89 /* received setup data */
   90 static union udc_setup_data setup_data;
   91 
   92 /* pointer to device object */
   93 static struct udc *udc;
   94 
   95 /* irq spin lock for soft reset */
   96 static DEFINE_SPINLOCK(udc_irq_spinlock);
   97 /* stall spin lock */
   98 static DEFINE_SPINLOCK(udc_stall_spinlock);
   99 
  100 /*
  101 * slave mode: pending bytes in rx fifo after nyet,
  102 * used if EPIN irq came but no req was available
  103 */
  104 static unsigned int udc_rxfifo_pending;
  105 
  106 /* count soft resets after suspend to avoid loop */
  107 static int soft_reset_occured;
  108 static int soft_reset_after_usbreset_occured;
  109 
  110 /* timer */
  111 static struct timer_list udc_timer;
  112 static int stop_timer;
  113 
  114 /* set_rde -- Is used to control enabling of RX DMA. Problem is
  115  * that UDC has only one bit (RDE) to enable/disable RX DMA for
  116  * all OUT endpoints. So we have to handle race conditions like
  117  * when OUT data reaches the fifo but no request was queued yet.
  118  * This cannot be solved by letting the RX DMA disabled until a
  119  * request gets queued because there may be other OUT packets
  120  * in the FIFO (important for not blocking control traffic).
  121  * The value of set_rde controls the correspondig timer.
  122  *
  123  * set_rde -1 == not used, means it is alloed to be set to 0 or 1
  124  * set_rde  0 == do not touch RDE, do no start the RDE timer
  125  * set_rde  1 == timer function will look whether FIFO has data
  126  * set_rde  2 == set by timer function to enable RX DMA on next call
  127  */
  128 static int set_rde = -1;
  129 
  130 static DECLARE_COMPLETION(on_exit);
  131 static struct timer_list udc_pollstall_timer;
  132 static int stop_pollstall_timer;
  133 static DECLARE_COMPLETION(on_pollstall_exit);
  134 
  135 /* tasklet for usb disconnect */
  136 static DECLARE_TASKLET(disconnect_tasklet, udc_tasklet_disconnect,
  137 		(unsigned long) &udc);
  138 
  139 
  140 /* endpoint names used for print */
  141 static const char ep0_string[] = "ep0in";
  142 static const char *const ep_string[] = {
  143 	ep0_string,
  144 	"ep1in-int", "ep2in-bulk", "ep3in-bulk", "ep4in-bulk", "ep5in-bulk",
  145 	"ep6in-bulk", "ep7in-bulk", "ep8in-bulk", "ep9in-bulk", "ep10in-bulk",
  146 	"ep11in-bulk", "ep12in-bulk", "ep13in-bulk", "ep14in-bulk",
  147 	"ep15in-bulk", "ep0out", "ep1out-bulk", "ep2out-bulk", "ep3out-bulk",
  148 	"ep4out-bulk", "ep5out-bulk", "ep6out-bulk", "ep7out-bulk",
  149 	"ep8out-bulk", "ep9out-bulk", "ep10out-bulk", "ep11out-bulk",
  150 	"ep12out-bulk", "ep13out-bulk", "ep14out-bulk", "ep15out-bulk"
  151 };
  152 
  153 /* DMA usage flag */
  154 static bool use_dma = 1;
  155 /* packet per buffer dma */
  156 static bool use_dma_ppb = 1;
  157 /* with per descr. update */
  158 static bool use_dma_ppb_du;
  159 /* buffer fill mode */
  160 static int use_dma_bufferfill_mode;
  161 /* full speed only mode */
  162 static bool use_fullspeed;
  163 /* tx buffer size for high speed */
  164 static unsigned long hs_tx_buf = UDC_EPIN_BUFF_SIZE;
  165 
  166 /* module parameters */
  167 module_param(use_dma, bool, S_IRUGO);
  168 MODULE_PARM_DESC(use_dma, "true for DMA");
  169 module_param(use_dma_ppb, bool, S_IRUGO);
  170 MODULE_PARM_DESC(use_dma_ppb, "true for DMA in packet per buffer mode");
  171 module_param(use_dma_ppb_du, bool, S_IRUGO);
  172 MODULE_PARM_DESC(use_dma_ppb_du,
  173 	"true for DMA in packet per buffer mode with descriptor update");
  174 module_param(use_fullspeed, bool, S_IRUGO);
  175 MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
  176 
  177 /*---------------------------------------------------------------------------*/
  178 /* Prints UDC device registers and endpoint irq registers */
  179 static void print_regs(struct udc *dev)
  180 {
  181 	DBG(dev, "------- Device registers -------\n");
  182 	DBG(dev, "dev config     = %08x\n", readl(&dev->regs->cfg));
  183 	DBG(dev, "dev control    = %08x\n", readl(&dev->regs->ctl));
  184 	DBG(dev, "dev status     = %08x\n", readl(&dev->regs->sts));
  185 	DBG(dev, "\n");
  186 	DBG(dev, "dev int's      = %08x\n", readl(&dev->regs->irqsts));
  187 	DBG(dev, "dev intmask    = %08x\n", readl(&dev->regs->irqmsk));
  188 	DBG(dev, "\n");
  189 	DBG(dev, "dev ep int's   = %08x\n", readl(&dev->regs->ep_irqsts));
  190 	DBG(dev, "dev ep intmask = %08x\n", readl(&dev->regs->ep_irqmsk));
  191 	DBG(dev, "\n");
  192 	DBG(dev, "USE DMA        = %d\n", use_dma);
  193 	if (use_dma && use_dma_ppb && !use_dma_ppb_du) {
  194 		DBG(dev, "DMA mode       = PPBNDU (packet per buffer "
  195 			"WITHOUT desc. update)\n");
  196 		dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBNDU");
  197 	} else if (use_dma && use_dma_ppb && use_dma_ppb_du) {
  198 		DBG(dev, "DMA mode       = PPBDU (packet per buffer "
  199 			"WITH desc. update)\n");
  200 		dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBDU");
  201 	}
  202 	if (use_dma && use_dma_bufferfill_mode) {
  203 		DBG(dev, "DMA mode       = BF (buffer fill mode)\n");
  204 		dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "BF");
  205 	}
  206 	if (!use_dma)
  207 		dev_info(&dev->pdev->dev, "FIFO mode\n");
  208 	DBG(dev, "-------------------------------------------------------\n");
  209 }
  210 
  211 /* Masks unused interrupts */
  212 static int udc_mask_unused_interrupts(struct udc *dev)
  213 {
  214 	u32 tmp;
  215 
  216 	/* mask all dev interrupts */
  217 	tmp =	AMD_BIT(UDC_DEVINT_SVC) |
  218 		AMD_BIT(UDC_DEVINT_ENUM) |
  219 		AMD_BIT(UDC_DEVINT_US) |
  220 		AMD_BIT(UDC_DEVINT_UR) |
  221 		AMD_BIT(UDC_DEVINT_ES) |
  222 		AMD_BIT(UDC_DEVINT_SI) |
  223 		AMD_BIT(UDC_DEVINT_SOF)|
  224 		AMD_BIT(UDC_DEVINT_SC);
  225 	writel(tmp, &dev->regs->irqmsk);
  226 
  227 	/* mask all ep interrupts */
  228 	writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqmsk);
  229 
  230 	return 0;
  231 }
  232 
  233 /* Enables endpoint 0 interrupts */
  234 static int udc_enable_ep0_interrupts(struct udc *dev)
  235 {
  236 	u32 tmp;
  237 
  238 	DBG(dev, "udc_enable_ep0_interrupts()\n");
  239 
  240 	/* read irq mask */
  241 	tmp = readl(&dev->regs->ep_irqmsk);
  242 	/* enable ep0 irq's */
  243 	tmp &= AMD_UNMASK_BIT(UDC_EPINT_IN_EP0)
  244 		& AMD_UNMASK_BIT(UDC_EPINT_OUT_EP0);
  245 	writel(tmp, &dev->regs->ep_irqmsk);
  246 
  247 	return 0;
  248 }
  249 
  250 /* Enables device interrupts for SET_INTF and SET_CONFIG */
  251 static int udc_enable_dev_setup_interrupts(struct udc *dev)
  252 {
  253 	u32 tmp;
  254 
  255 	DBG(dev, "enable device interrupts for setup data\n");
  256 
  257 	/* read irq mask */
  258 	tmp = readl(&dev->regs->irqmsk);
  259 
  260 	/* enable SET_INTERFACE, SET_CONFIG and other needed irq's */
  261 	tmp &= AMD_UNMASK_BIT(UDC_DEVINT_SI)
  262 		& AMD_UNMASK_BIT(UDC_DEVINT_SC)
  263 		& AMD_UNMASK_BIT(UDC_DEVINT_UR)
  264 		& AMD_UNMASK_BIT(UDC_DEVINT_SVC)
  265 		& AMD_UNMASK_BIT(UDC_DEVINT_ENUM);
  266 	writel(tmp, &dev->regs->irqmsk);
  267 
  268 	return 0;
  269 }
  270 
  271 /* Calculates fifo start of endpoint based on preceding endpoints */
  272 static int udc_set_txfifo_addr(struct udc_ep *ep)
  273 {
  274 	struct udc	*dev;
  275 	u32 tmp;
  276 	int i;
  277 
  278 	if (!ep || !(ep->in))
  279 		return -EINVAL;
  280 
  281 	dev = ep->dev;
  282 	ep->txfifo = dev->txfifo;
  283 
  284 	/* traverse ep's */
  285 	for (i = 0; i < ep->num; i++) {
  286 		if (dev->ep[i].regs) {
  287 			/* read fifo size */
  288 			tmp = readl(&dev->ep[i].regs->bufin_framenum);
  289 			tmp = AMD_GETBITS(tmp, UDC_EPIN_BUFF_SIZE);
  290 			ep->txfifo += tmp;
  291 		}
  292 	}
  293 	return 0;
  294 }
  295 
  296 /* CNAK pending field: bit0 = ep0in, bit16 = ep0out */
  297 static u32 cnak_pending;
  298 
  299 static void UDC_QUEUE_CNAK(struct udc_ep *ep, unsigned num)
  300 {
  301 	if (readl(&ep->regs->ctl) & AMD_BIT(UDC_EPCTL_NAK)) {
  302 		DBG(ep->dev, "NAK could not be cleared for ep%d\n", num);
  303 		cnak_pending |= 1 << (num);
  304 		ep->naking = 1;
  305 	} else
  306 		cnak_pending = cnak_pending & (~(1 << (num)));
  307 }
  308 
  309 
  310 /* Enables endpoint, is called by gadget driver */
  311 static int
  312 udc_ep_enable(struct usb_ep *usbep, const struct usb_endpoint_descriptor *desc)
  313 {
  314 	struct udc_ep		*ep;
  315 	struct udc		*dev;
  316 	u32			tmp;
  317 	unsigned long		iflags;
  318 	u8 udc_csr_epix;
  319 	unsigned		maxpacket;
  320 
  321 	if (!usbep
  322 			|| usbep->name == ep0_string
  323 			|| !desc
  324 			|| desc->bDescriptorType != USB_DT_ENDPOINT)
  325 		return -EINVAL;
  326 
  327 	ep = container_of(usbep, struct udc_ep, ep);
  328 	dev = ep->dev;
  329 
  330 	DBG(dev, "udc_ep_enable() ep %d\n", ep->num);
  331 
  332 	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
  333 		return -ESHUTDOWN;
  334 
  335 	spin_lock_irqsave(&dev->lock, iflags);
  336 	ep->ep.desc = desc;
  337 
  338 	ep->halted = 0;
  339 
  340 	/* set traffic type */
  341 	tmp = readl(&dev->ep[ep->num].regs->ctl);
  342 	tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_EPCTL_ET);
  343 	writel(tmp, &dev->ep[ep->num].regs->ctl);
  344 
  345 	/* set max packet size */
  346 	maxpacket = usb_endpoint_maxp(desc);
  347 	tmp = readl(&dev->ep[ep->num].regs->bufout_maxpkt);
  348 	tmp = AMD_ADDBITS(tmp, maxpacket, UDC_EP_MAX_PKT_SIZE);
  349 	ep->ep.maxpacket = maxpacket;
  350 	writel(tmp, &dev->ep[ep->num].regs->bufout_maxpkt);
  351 
  352 	/* IN ep */
  353 	if (ep->in) {
  354 
  355 		/* ep ix in UDC CSR register space */
  356 		udc_csr_epix = ep->num;
  357 
  358 		/* set buffer size (tx fifo entries) */
  359 		tmp = readl(&dev->ep[ep->num].regs->bufin_framenum);
  360 		/* double buffering: fifo size = 2 x max packet size */
  361 		tmp = AMD_ADDBITS(
  362 				tmp,
  363 				maxpacket * UDC_EPIN_BUFF_SIZE_MULT
  364 					  / UDC_DWORD_BYTES,
  365 				UDC_EPIN_BUFF_SIZE);
  366 		writel(tmp, &dev->ep[ep->num].regs->bufin_framenum);
  367 
  368 		/* calc. tx fifo base addr */
  369 		udc_set_txfifo_addr(ep);
  370 
  371 		/* flush fifo */
  372 		tmp = readl(&ep->regs->ctl);
  373 		tmp |= AMD_BIT(UDC_EPCTL_F);
  374 		writel(tmp, &ep->regs->ctl);
  375 
  376 	/* OUT ep */
  377 	} else {
  378 		/* ep ix in UDC CSR register space */
  379 		udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
  380 
  381 		/* set max packet size UDC CSR	*/
  382 		tmp = readl(&dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
  383 		tmp = AMD_ADDBITS(tmp, maxpacket,
  384 					UDC_CSR_NE_MAX_PKT);
  385 		writel(tmp, &dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
  386 
  387 		if (use_dma && !ep->in) {
  388 			/* alloc and init BNA dummy request */
  389 			ep->bna_dummy_req = udc_alloc_bna_dummy(ep);
  390 			ep->bna_occurred = 0;
  391 		}
  392 
  393 		if (ep->num != UDC_EP0OUT_IX)
  394 			dev->data_ep_enabled = 1;
  395 	}
  396 
  397 	/* set ep values */
  398 	tmp = readl(&dev->csr->ne[udc_csr_epix]);
  399 	/* max packet */
  400 	tmp = AMD_ADDBITS(tmp, maxpacket, UDC_CSR_NE_MAX_PKT);
  401 	/* ep number */
  402 	tmp = AMD_ADDBITS(tmp, desc->bEndpointAddress, UDC_CSR_NE_NUM);
  403 	/* ep direction */
  404 	tmp = AMD_ADDBITS(tmp, ep->in, UDC_CSR_NE_DIR);
  405 	/* ep type */
  406 	tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_CSR_NE_TYPE);
  407 	/* ep config */
  408 	tmp = AMD_ADDBITS(tmp, ep->dev->cur_config, UDC_CSR_NE_CFG);
  409 	/* ep interface */
  410 	tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf, UDC_CSR_NE_INTF);
  411 	/* ep alt */
  412 	tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt, UDC_CSR_NE_ALT);
  413 	/* write reg */
  414 	writel(tmp, &dev->csr->ne[udc_csr_epix]);
  415 
  416 	/* enable ep irq */
  417 	tmp = readl(&dev->regs->ep_irqmsk);
  418 	tmp &= AMD_UNMASK_BIT(ep->num);
  419 	writel(tmp, &dev->regs->ep_irqmsk);
  420 
  421 	/*
  422 	 * clear NAK by writing CNAK
  423 	 * avoid BNA for OUT DMA, don't clear NAK until DMA desc. written
  424 	 */
  425 	if (!use_dma || ep->in) {
  426 		tmp = readl(&ep->regs->ctl);
  427 		tmp |= AMD_BIT(UDC_EPCTL_CNAK);
  428 		writel(tmp, &ep->regs->ctl);
  429 		ep->naking = 0;
  430 		UDC_QUEUE_CNAK(ep, ep->num);
  431 	}
  432 	tmp = desc->bEndpointAddress;
  433 	DBG(dev, "%s enabled\n", usbep->name);
  434 
  435 	spin_unlock_irqrestore(&dev->lock, iflags);
  436 	return 0;
  437 }
  438 
  439 /* Resets endpoint */
  440 static void ep_init(struct udc_regs __iomem *regs, struct udc_ep *ep)
  441 {
  442 	u32		tmp;
  443 
  444 	VDBG(ep->dev, "ep-%d reset\n", ep->num);
  445 	ep->ep.desc = NULL;
  446 	ep->ep.ops = &udc_ep_ops;
  447 	INIT_LIST_HEAD(&ep->queue);
  448 
  449 	usb_ep_set_maxpacket_limit(&ep->ep,(u16) ~0);
  450 	/* set NAK */
  451 	tmp = readl(&ep->regs->ctl);
  452 	tmp |= AMD_BIT(UDC_EPCTL_SNAK);
  453 	writel(tmp, &ep->regs->ctl);
  454 	ep->naking = 1;
  455 
  456 	/* disable interrupt */
  457 	tmp = readl(®s->ep_irqmsk);
  458 	tmp |= AMD_BIT(ep->num);
  459 	writel(tmp, ®s->ep_irqmsk);
  460 
  461 	if (ep->in) {
  462 		/* unset P and IN bit of potential former DMA */
  463 		tmp = readl(&ep->regs->ctl);
  464 		tmp &= AMD_UNMASK_BIT(UDC_EPCTL_P);
  465 		writel(tmp, &ep->regs->ctl);
  466 
  467 		tmp = readl(&ep->regs->sts);
  468 		tmp |= AMD_BIT(UDC_EPSTS_IN);
  469 		writel(tmp, &ep->regs->sts);
  470 
  471 		/* flush the fifo */
  472 		tmp = readl(&ep->regs->ctl);
  473 		tmp |= AMD_BIT(UDC_EPCTL_F);
  474 		writel(tmp, &ep->regs->ctl);
  475 
  476 	}
  477 	/* reset desc pointer */
  478 	writel(0, &ep->regs->desptr);
  479 }
  480 
  481 /* Disables endpoint, is called by gadget driver */
  482 static int udc_ep_disable(struct usb_ep *usbep)
  483 {
  484 	struct udc_ep	*ep = NULL;
  485 	unsigned long	iflags;
  486 
  487 	if (!usbep)
  488 		return -EINVAL;
  489 
  490 	ep = container_of(usbep, struct udc_ep, ep);
  491 	if (usbep->name == ep0_string || !ep->ep.desc)
  492 		return -EINVAL;
  493 
  494 	DBG(ep->dev, "Disable ep-%d\n", ep->num);
  495 
  496 	spin_lock_irqsave(&ep->dev->lock, iflags);
  497 	udc_free_request(&ep->ep, &ep->bna_dummy_req->req);
  498 	empty_req_queue(ep);
  499 	ep_init(ep->dev->regs, ep);
  500 	spin_unlock_irqrestore(&ep->dev->lock, iflags);
  501 
  502 	return 0;
  503 }
  504 
  505 /* Allocates request packet, called by gadget driver */
  506 static struct usb_request *
  507 udc_alloc_request(struct usb_ep *usbep, gfp_t gfp)
  508 {
  509 	struct udc_request	*req;
  510 	struct udc_data_dma	*dma_desc;
  511 	struct udc_ep	*ep;
  512 
  513 	if (!usbep)
  514 		return NULL;
  515 
  516 	ep = container_of(usbep, struct udc_ep, ep);
  517 
  518 	VDBG(ep->dev, "udc_alloc_req(): ep%d\n", ep->num);
  519 	req = kzalloc(sizeof(struct udc_request), gfp);
  520 	if (!req)
  521 		return NULL;
  522 
  523 	req->req.dma = DMA_DONT_USE;
  524 	INIT_LIST_HEAD(&req->queue);
  525 
  526 	if (ep->dma) {
  527 		/* ep0 in requests are allocated from data pool here */
  528 		dma_desc = pci_pool_alloc(ep->dev->data_requests, gfp,
  529 						&req->td_phys);
  530 		if (!dma_desc) {
  531 			kfree(req);
  532 			return NULL;
  533 		}
  534 
  535 		VDBG(ep->dev, "udc_alloc_req: req = %p dma_desc = %p, "
  536 				"td_phys = %lx\n",
  537 				req, dma_desc,
  538 				(unsigned long)req->td_phys);
  539 		/* prevent from using desc. - set HOST BUSY */
  540 		dma_desc->status = AMD_ADDBITS(dma_desc->status,
  541 						UDC_DMA_STP_STS_BS_HOST_BUSY,
  542 						UDC_DMA_STP_STS_BS);
  543 		dma_desc->bufptr = cpu_to_le32(DMA_DONT_USE);
  544 		req->td_data = dma_desc;
  545 		req->td_data_last = NULL;
  546 		req->chain_len = 1;
  547 	}
  548 
  549 	return &req->req;
  550 }
  551 
  552 /* Frees request packet, called by gadget driver */
  553 static void
  554 udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq)
  555 {
  556 	struct udc_ep	*ep;
  557 	struct udc_request	*req;
  558 
  559 	if (!usbep || !usbreq)
  560 		return;
  561 
  562 	ep = container_of(usbep, struct udc_ep, ep);
  563 	req = container_of(usbreq, struct udc_request, req);
  564 	VDBG(ep->dev, "free_req req=%p\n", req);
  565 	BUG_ON(!list_empty(&req->queue));
  566 	if (req->td_data) {
  567 		VDBG(ep->dev, "req->td_data=%p\n", req->td_data);
  568 
  569 		/* free dma chain if created */
  570 		if (req->chain_len > 1)
  571 			udc_free_dma_chain(ep->dev, req);
  572 
  573 		pci_pool_free(ep->dev->data_requests, req->td_data,
  574 							req->td_phys);
  575 	}
  576 	kfree(req);
  577 }
  578 
  579 /* Init BNA dummy descriptor for HOST BUSY and pointing to itself */
  580 static void udc_init_bna_dummy(struct udc_request *req)
  581 {
  582 	if (req) {
  583 		/* set last bit */
  584 		req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
  585 		/* set next pointer to itself */
  586 		req->td_data->next = req->td_phys;
  587 		/* set HOST BUSY */
  588 		req->td_data->status
  589 			= AMD_ADDBITS(req->td_data->status,
  590 					UDC_DMA_STP_STS_BS_DMA_DONE,
  591 					UDC_DMA_STP_STS_BS);
  592 #ifdef UDC_VERBOSE
  593 		pr_debug("bna desc = %p, sts = %08x\n",
  594 			req->td_data, req->td_data->status);
  595 #endif
  596 	}
  597 }
  598 
  599 /* Allocate BNA dummy descriptor */
  600 static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep)
  601 {
  602 	struct udc_request *req = NULL;
  603 	struct usb_request *_req = NULL;
  604 
  605 	/* alloc the dummy request */
  606 	_req = udc_alloc_request(&ep->ep, GFP_ATOMIC);
  607 	if (_req) {
  608 		req = container_of(_req, struct udc_request, req);
  609 		ep->bna_dummy_req = req;
  610 		udc_init_bna_dummy(req);
  611 	}
  612 	return req;
  613 }
  614 
  615 /* Write data to TX fifo for IN packets */
  616 static void
  617 udc_txfifo_write(struct udc_ep *ep, struct usb_request *req)
  618 {
  619 	u8			*req_buf;
  620 	u32			*buf;
  621 	int			i, j;
  622 	unsigned		bytes = 0;
  623 	unsigned		remaining = 0;
  624 
  625 	if (!req || !ep)
  626 		return;
  627 
  628 	req_buf = req->buf + req->actual;
  629 	prefetch(req_buf);
  630 	remaining = req->length - req->actual;
  631 
  632 	buf = (u32 *) req_buf;
  633 
  634 	bytes = ep->ep.maxpacket;
  635 	if (bytes > remaining)
  636 		bytes = remaining;
  637 
  638 	/* dwords first */
  639 	for (i = 0; i < bytes / UDC_DWORD_BYTES; i++)
  640 		writel(*(buf + i), ep->txfifo);
  641 
  642 	/* remaining bytes must be written by byte access */
  643 	for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
  644 		writeb((u8)(*(buf + i) >> (j << UDC_BITS_PER_BYTE_SHIFT)),
  645 							ep->txfifo);
  646 	}
  647 
  648 	/* dummy write confirm */
  649 	writel(0, &ep->regs->confirm);
  650 }
  651 
  652 /* Read dwords from RX fifo for OUT transfers */
  653 static int udc_rxfifo_read_dwords(struct udc *dev, u32 *buf, int dwords)
  654 {
  655 	int i;
  656 
  657 	VDBG(dev, "udc_read_dwords(): %d dwords\n", dwords);
  658 
  659 	for (i = 0; i < dwords; i++)
  660 		*(buf + i) = readl(dev->rxfifo);
  661 	return 0;
  662 }
  663 
  664 /* Read bytes from RX fifo for OUT transfers */
  665 static int udc_rxfifo_read_bytes(struct udc *dev, u8 *buf, int bytes)
  666 {
  667 	int i, j;
  668 	u32 tmp;
  669 
  670 	VDBG(dev, "udc_read_bytes(): %d bytes\n", bytes);
  671 
  672 	/* dwords first */
  673 	for (i = 0; i < bytes / UDC_DWORD_BYTES; i++)
  674 		*((u32 *)(buf + (i<<2))) = readl(dev->rxfifo);
  675 
  676 	/* remaining bytes must be read by byte access */
  677 	if (bytes % UDC_DWORD_BYTES) {
  678 		tmp = readl(dev->rxfifo);
  679 		for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
  680 			*(buf + (i<<2) + j) = (u8)(tmp & UDC_BYTE_MASK);
  681 			tmp = tmp >> UDC_BITS_PER_BYTE;
  682 		}
  683 	}
  684 
  685 	return 0;
  686 }
  687 
  688 /* Read data from RX fifo for OUT transfers */
  689 static int
  690 udc_rxfifo_read(struct udc_ep *ep, struct udc_request *req)
  691 {
  692 	u8 *buf;
  693 	unsigned buf_space;
  694 	unsigned bytes = 0;
  695 	unsigned finished = 0;
  696 
  697 	/* received number bytes */
  698 	bytes = readl(&ep->regs->sts);
  699 	bytes = AMD_GETBITS(bytes, UDC_EPSTS_RX_PKT_SIZE);
  700 
  701 	buf_space = req->req.length - req->req.actual;
  702 	buf = req->req.buf + req->req.actual;
  703 	if (bytes > buf_space) {
  704 		if ((buf_space % ep->ep.maxpacket) != 0) {
  705 			DBG(ep->dev,
  706 				"%s: rx %d bytes, rx-buf space = %d bytesn\n",
  707 				ep->ep.name, bytes, buf_space);
  708 			req->req.status = -EOVERFLOW;
  709 		}
  710 		bytes = buf_space;
  711 	}
  712 	req->req.actual += bytes;
  713 
  714 	/* last packet ? */
  715 	if (((bytes % ep->ep.maxpacket) != 0) || (!bytes)
  716 		|| ((req->req.actual == req->req.length) && !req->req.zero))
  717 		finished = 1;
  718 
  719 	/* read rx fifo bytes */
  720 	VDBG(ep->dev, "ep %s: rxfifo read %d bytes\n", ep->ep.name, bytes);
  721 	udc_rxfifo_read_bytes(ep->dev, buf, bytes);
  722 
  723 	return finished;
  724 }
  725 
  726 /* create/re-init a DMA descriptor or a DMA descriptor chain */
  727 static int prep_dma(struct udc_ep *ep, struct udc_request *req, gfp_t gfp)
  728 {
  729 	int	retval = 0;
  730 	u32	tmp;
  731 
  732 	VDBG(ep->dev, "prep_dma\n");
  733 	VDBG(ep->dev, "prep_dma ep%d req->td_data=%p\n",
  734 			ep->num, req->td_data);
  735 
  736 	/* set buffer pointer */
  737 	req->td_data->bufptr = req->req.dma;
  738 
  739 	/* set last bit */
  740 	req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
  741 
  742 	/* build/re-init dma chain if maxpkt scatter mode, not for EP0 */
  743 	if (use_dma_ppb) {
  744 
  745 		retval = udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
  746 		if (retval != 0) {
  747 			if (retval == -ENOMEM)
  748 				DBG(ep->dev, "Out of DMA memory\n");
  749 			return retval;
  750 		}
  751 		if (ep->in) {
  752 			if (req->req.length == ep->ep.maxpacket) {
  753 				/* write tx bytes */
  754 				req->td_data->status =
  755 					AMD_ADDBITS(req->td_data->status,
  756 						ep->ep.maxpacket,
  757 						UDC_DMA_IN_STS_TXBYTES);
  758 
  759 			}
  760 		}
  761 
  762 	}
  763 
  764 	if (ep->in) {
  765 		VDBG(ep->dev, "IN: use_dma_ppb=%d req->req.len=%d "
  766 				"maxpacket=%d ep%d\n",
  767 				use_dma_ppb, req->req.length,
  768 				ep->ep.maxpacket, ep->num);
  769 		/*
  770 		 * if bytes < max packet then tx bytes must
  771 		 * be written in packet per buffer mode
  772 		 */
  773 		if (!use_dma_ppb || req->req.length < ep->ep.maxpacket
  774 				|| ep->num == UDC_EP0OUT_IX
  775 				|| ep->num == UDC_EP0IN_IX) {
  776 			/* write tx bytes */
  777 			req->td_data->status =
  778 				AMD_ADDBITS(req->td_data->status,
  779 						req->req.length,
  780 						UDC_DMA_IN_STS_TXBYTES);
  781 			/* reset frame num */
  782 			req->td_data->status =
  783 				AMD_ADDBITS(req->td_data->status,
  784 						0,
  785 						UDC_DMA_IN_STS_FRAMENUM);
  786 		}
  787 		/* set HOST BUSY */
  788 		req->td_data->status =
  789 			AMD_ADDBITS(req->td_data->status,
  790 				UDC_DMA_STP_STS_BS_HOST_BUSY,
  791 				UDC_DMA_STP_STS_BS);
  792 	} else {
  793 		VDBG(ep->dev, "OUT set host ready\n");
  794 		/* set HOST READY */
  795 		req->td_data->status =
  796 			AMD_ADDBITS(req->td_data->status,
  797 				UDC_DMA_STP_STS_BS_HOST_READY,
  798 				UDC_DMA_STP_STS_BS);
  799 
  800 
  801 			/* clear NAK by writing CNAK */
  802 			if (ep->naking) {
  803 				tmp = readl(&ep->regs->ctl);
  804 				tmp |= AMD_BIT(UDC_EPCTL_CNAK);
  805 				writel(tmp, &ep->regs->ctl);
  806 				ep->naking = 0;
  807 				UDC_QUEUE_CNAK(ep, ep->num);
  808 			}
  809 
  810 	}
  811 
  812 	return retval;
  813 }
  814 
  815 /* Completes request packet ... caller MUST hold lock */
  816 static void
  817 complete_req(struct udc_ep *ep, struct udc_request *req, int sts)
  818 __releases(ep->dev->lock)
  819 __acquires(ep->dev->lock)
  820 {
  821 	struct udc		*dev;
  822 	unsigned		halted;
  823 
  824 	VDBG(ep->dev, "complete_req(): ep%d\n", ep->num);
  825 
  826 	dev = ep->dev;
  827 	/* unmap DMA */
  828 	if (ep->dma)
  829 		usb_gadget_unmap_request(&dev->gadget, &req->req, ep->in);
  830 
  831 	halted = ep->halted;
  832 	ep->halted = 1;
  833 
  834 	/* set new status if pending */
  835 	if (req->req.status == -EINPROGRESS)
  836 		req->req.status = sts;
  837 
  838 	/* remove from ep queue */
  839 	list_del_init(&req->queue);
  840 
  841 	VDBG(ep->dev, "req %p => complete %d bytes at %s with sts %d\n",
  842 		&req->req, req->req.length, ep->ep.name, sts);
  843 
  844 	spin_unlock(&dev->lock);
  845 	usb_gadget_giveback_request(&ep->ep, &req->req);
  846 	spin_lock(&dev->lock);
  847 	ep->halted = halted;
  848 }
  849 
  850 /* frees pci pool descriptors of a DMA chain */
  851 static int udc_free_dma_chain(struct udc *dev, struct udc_request *req)
  852 {
  853 
  854 	int ret_val = 0;
  855 	struct udc_data_dma	*td;
  856 	struct udc_data_dma	*td_last = NULL;
  857 	unsigned int i;
  858 
  859 	DBG(dev, "free chain req = %p\n", req);
  860 
  861 	/* do not free first desc., will be done by free for request */
  862 	td_last = req->td_data;
  863 	td = phys_to_virt(td_last->next);
  864 
  865 	for (i = 1; i < req->chain_len; i++) {
  866 
  867 		pci_pool_free(dev->data_requests, td,
  868 				(dma_addr_t) td_last->next);
  869 		td_last = td;
  870 		td = phys_to_virt(td_last->next);
  871 	}
  872 
  873 	return ret_val;
  874 }
  875 
  876 /* Iterates to the end of a DMA chain and returns last descriptor */
  877 static struct udc_data_dma *udc_get_last_dma_desc(struct udc_request *req)
  878 {
  879 	struct udc_data_dma	*td;
  880 
  881 	td = req->td_data;
  882 	while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L)))
  883 		td = phys_to_virt(td->next);
  884 
  885 	return td;
  886 
  887 }
  888 
  889 /* Iterates to the end of a DMA chain and counts bytes received */
  890 static u32 udc_get_ppbdu_rxbytes(struct udc_request *req)
  891 {
  892 	struct udc_data_dma	*td;
  893 	u32 count;
  894 
  895 	td = req->td_data;
  896 	/* received number bytes */
  897 	count = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_RXBYTES);
  898 
  899 	while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L))) {
  900 		td = phys_to_virt(td->next);
  901 		/* received number bytes */
  902 		if (td) {
  903 			count += AMD_GETBITS(td->status,
  904 				UDC_DMA_OUT_STS_RXBYTES);
  905 		}
  906 	}
  907 
  908 	return count;
  909 
  910 }
  911 
  912 /* Creates or re-inits a DMA chain */
  913 static int udc_create_dma_chain(
  914 	struct udc_ep *ep,
  915 	struct udc_request *req,
  916 	unsigned long buf_len, gfp_t gfp_flags
  917 )
  918 {
  919 	unsigned long bytes = req->req.length;
  920 	unsigned int i;
  921 	dma_addr_t dma_addr;
  922 	struct udc_data_dma	*td = NULL;
  923 	struct udc_data_dma	*last = NULL;
  924 	unsigned long txbytes;
  925 	unsigned create_new_chain = 0;
  926 	unsigned len;
  927 
  928 	VDBG(ep->dev, "udc_create_dma_chain: bytes=%ld buf_len=%ld\n",
  929 			bytes, buf_len);
  930 	dma_addr = DMA_DONT_USE;
  931 
  932 	/* unset L bit in first desc for OUT */
  933 	if (!ep->in)
  934 		req->td_data->status &= AMD_CLEAR_BIT(UDC_DMA_IN_STS_L);
  935 
  936 	/* alloc only new desc's if not already available */
  937 	len = req->req.length / ep->ep.maxpacket;
  938 	if (req->req.length % ep->ep.maxpacket)
  939 		len++;
  940 
  941 	if (len > req->chain_len) {
  942 		/* shorter chain already allocated before */
  943 		if (req->chain_len > 1)
  944 			udc_free_dma_chain(ep->dev, req);
  945 		req->chain_len = len;
  946 		create_new_chain = 1;
  947 	}
  948 
  949 	td = req->td_data;
  950 	/* gen. required number of descriptors and buffers */
  951 	for (i = buf_len; i < bytes; i += buf_len) {
  952 		/* create or determine next desc. */
  953 		if (create_new_chain) {
  954 
  955 			td = pci_pool_alloc(ep->dev->data_requests,
  956 					gfp_flags, &dma_addr);
  957 			if (!td)
  958 				return -ENOMEM;
  959 
  960 			td->status = 0;
  961 		} else if (i == buf_len) {
  962 			/* first td */
  963 			td = (struct udc_data_dma *) phys_to_virt(
  964 						req->td_data->next);
  965 			td->status = 0;
  966 		} else {
  967 			td = (struct udc_data_dma *) phys_to_virt(last->next);
  968 			td->status = 0;
  969 		}
  970 
  971 
  972 		if (td)
  973 			td->bufptr = req->req.dma + i; /* assign buffer */
  974 		else
  975 			break;
  976 
  977 		/* short packet ? */
  978 		if ((bytes - i) >= buf_len) {
  979 			txbytes = buf_len;
  980 		} else {
  981 			/* short packet */
  982 			txbytes = bytes - i;
  983 		}
  984 
  985 		/* link td and assign tx bytes */
  986 		if (i == buf_len) {
  987 			if (create_new_chain)
  988 				req->td_data->next = dma_addr;
  989 			/*
  990 			else
  991 				req->td_data->next = virt_to_phys(td);
  992 			*/
  993 			/* write tx bytes */
  994 			if (ep->in) {
  995 				/* first desc */
  996 				req->td_data->status =
  997 					AMD_ADDBITS(req->td_data->status,
  998 							ep->ep.maxpacket,
  999 							UDC_DMA_IN_STS_TXBYTES);
 1000 				/* second desc */
 1001 				td->status = AMD_ADDBITS(td->status,
 1002 							txbytes,
 1003 							UDC_DMA_IN_STS_TXBYTES);
 1004 			}
 1005 		} else {
 1006 			if (create_new_chain)
 1007 				last->next = dma_addr;
 1008 			/*
 1009 			else
 1010 				last->next = virt_to_phys(td);
 1011 			*/
 1012 			if (ep->in) {
 1013 				/* write tx bytes */
 1014 				td->status = AMD_ADDBITS(td->status,
 1015 							txbytes,
 1016 							UDC_DMA_IN_STS_TXBYTES);
 1017 			}
 1018 		}
 1019 		last = td;
 1020 	}
 1021 	/* set last bit */
 1022 	if (td) {
 1023 		td->status |= AMD_BIT(UDC_DMA_IN_STS_L);
 1024 		/* last desc. points to itself */
 1025 		req->td_data_last = td;
 1026 	}
 1027 
 1028 	return 0;
 1029 }
 1030 
 1031 /* Enabling RX DMA */
 1032 static void udc_set_rde(struct udc *dev)
 1033 {
 1034 	u32 tmp;
 1035 
 1036 	VDBG(dev, "udc_set_rde()\n");
 1037 	/* stop RDE timer */
 1038 	if (timer_pending(&udc_timer)) {
 1039 		set_rde = 0;
 1040 		mod_timer(&udc_timer, jiffies - 1);
 1041 	}
 1042 	/* set RDE */
 1043 	tmp = readl(&dev->regs->ctl);
 1044 	tmp |= AMD_BIT(UDC_DEVCTL_RDE);
 1045 	writel(tmp, &dev->regs->ctl);
 1046 }
 1047 
 1048 /* Queues a request packet, called by gadget driver */
 1049 static int
 1050 udc_queue(struct usb_ep *usbep, struct usb_request *usbreq, gfp_t gfp)
 1051 {
 1052 	int			retval = 0;
 1053 	u8			open_rxfifo = 0;
 1054 	unsigned long		iflags;
 1055 	struct udc_ep		*ep;
 1056 	struct udc_request	*req;
 1057 	struct udc		*dev;
 1058 	u32			tmp;
 1059 
 1060 	/* check the inputs */
 1061 	req = container_of(usbreq, struct udc_request, req);
 1062 
 1063 	if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf
 1064 			|| !list_empty(&req->queue))
 1065 		return -EINVAL;
 1066 
 1067 	ep = container_of(usbep, struct udc_ep, ep);
 1068 	if (!ep->ep.desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
 1069 		return -EINVAL;
 1070 
 1071 	VDBG(ep->dev, "udc_queue(): ep%d-in=%d\n", ep->num, ep->in);
 1072 	dev = ep->dev;
 1073 
 1074 	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
 1075 		return -ESHUTDOWN;
 1076 
 1077 	/* map dma (usually done before) */
 1078 	if (ep->dma) {
 1079 		VDBG(dev, "DMA map req %p\n", req);
 1080 		retval = usb_gadget_map_request(&udc->gadget, usbreq, ep->in);
 1081 		if (retval)
 1082 			return retval;
 1083 	}
 1084 
 1085 	VDBG(dev, "%s queue req %p, len %d req->td_data=%p buf %p\n",
 1086 			usbep->name, usbreq, usbreq->length,
 1087 			req->td_data, usbreq->buf);
 1088 
 1089 	spin_lock_irqsave(&dev->lock, iflags);
 1090 	usbreq->actual = 0;
 1091 	usbreq->status = -EINPROGRESS;
 1092 	req->dma_done = 0;
 1093 
 1094 	/* on empty queue just do first transfer */
 1095 	if (list_empty(&ep->queue)) {
 1096 		/* zlp */
 1097 		if (usbreq->length == 0) {
 1098 			/* IN zlp's are handled by hardware */
 1099 			complete_req(ep, req, 0);
 1100 			VDBG(dev, "%s: zlp\n", ep->ep.name);
 1101 			/*
 1102 			 * if set_config or set_intf is waiting for ack by zlp
 1103 			 * then set CSR_DONE
 1104 			 */
 1105 			if (dev->set_cfg_not_acked) {
 1106 				tmp = readl(&dev->regs->ctl);
 1107 				tmp |= AMD_BIT(UDC_DEVCTL_CSR_DONE);
 1108 				writel(tmp, &dev->regs->ctl);
 1109 				dev->set_cfg_not_acked = 0;
 1110 			}
 1111 			/* setup command is ACK'ed now by zlp */
 1112 			if (dev->waiting_zlp_ack_ep0in) {
 1113 				/* clear NAK by writing CNAK in EP0_IN */
 1114 				tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
 1115 				tmp |= AMD_BIT(UDC_EPCTL_CNAK);
 1116 				writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
 1117 				dev->ep[UDC_EP0IN_IX].naking = 0;
 1118 				UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX],
 1119 							UDC_EP0IN_IX);
 1120 				dev->waiting_zlp_ack_ep0in = 0;
 1121 			}
 1122 			goto finished;
 1123 		}
 1124 		if (ep->dma) {
 1125 			retval = prep_dma(ep, req, GFP_ATOMIC);
 1126 			if (retval != 0)
 1127 				goto finished;
 1128 			/* write desc pointer to enable DMA */
 1129 			if (ep->in) {
 1130 				/* set HOST READY */
 1131 				req->td_data->status =
 1132 					AMD_ADDBITS(req->td_data->status,
 1133 						UDC_DMA_IN_STS_BS_HOST_READY,
 1134 						UDC_DMA_IN_STS_BS);
 1135 			}
 1136 
 1137 			/* disabled rx dma while descriptor update */
 1138 			if (!ep->in) {
 1139 				/* stop RDE timer */
 1140 				if (timer_pending(&udc_timer)) {
 1141 					set_rde = 0;
 1142 					mod_timer(&udc_timer, jiffies - 1);
 1143 				}
 1144 				/* clear RDE */
 1145 				tmp = readl(&dev->regs->ctl);
 1146 				tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
 1147 				writel(tmp, &dev->regs->ctl);
 1148 				open_rxfifo = 1;
 1149 
 1150 				/*
 1151 				 * if BNA occurred then let BNA dummy desc.
 1152 				 * point to current desc.
 1153 				 */
 1154 				if (ep->bna_occurred) {
 1155 					VDBG(dev, "copy to BNA dummy desc.\n");
 1156 					memcpy(ep->bna_dummy_req->td_data,
 1157 						req->td_data,
 1158 						sizeof(struct udc_data_dma));
 1159 				}
 1160 			}
 1161 			/* write desc pointer */
 1162 			writel(req->td_phys, &ep->regs->desptr);
 1163 
 1164 			/* clear NAK by writing CNAK */
 1165 			if (ep->naking) {
 1166 				tmp = readl(&ep->regs->ctl);
 1167 				tmp |= AMD_BIT(UDC_EPCTL_CNAK);
 1168 				writel(tmp, &ep->regs->ctl);
 1169 				ep->naking = 0;
 1170 				UDC_QUEUE_CNAK(ep, ep->num);
 1171 			}
 1172 
 1173 			if (ep->in) {
 1174 				/* enable ep irq */
 1175 				tmp = readl(&dev->regs->ep_irqmsk);
 1176 				tmp &= AMD_UNMASK_BIT(ep->num);
 1177 				writel(tmp, &dev->regs->ep_irqmsk);
 1178 			}
 1179 		} else if (ep->in) {
 1180 				/* enable ep irq */
 1181 				tmp = readl(&dev->regs->ep_irqmsk);
 1182 				tmp &= AMD_UNMASK_BIT(ep->num);
 1183 				writel(tmp, &dev->regs->ep_irqmsk);
 1184 			}
 1185 
 1186 	} else if (ep->dma) {
 1187 
 1188 		/*
 1189 		 * prep_dma not used for OUT ep's, this is not possible
 1190 		 * for PPB modes, because of chain creation reasons
 1191 		 */
 1192 		if (ep->in) {
 1193 			retval = prep_dma(ep, req, GFP_ATOMIC);
 1194 			if (retval != 0)
 1195 				goto finished;
 1196 		}
 1197 	}
 1198 	VDBG(dev, "list_add\n");
 1199 	/* add request to ep queue */
 1200 	if (req) {
 1201 
 1202 		list_add_tail(&req->queue, &ep->queue);
 1203 
 1204 		/* open rxfifo if out data queued */
 1205 		if (open_rxfifo) {
 1206 			/* enable DMA */
 1207 			req->dma_going = 1;
 1208 			udc_set_rde(dev);
 1209 			if (ep->num != UDC_EP0OUT_IX)
 1210 				dev->data_ep_queued = 1;
 1211 		}
 1212 		/* stop OUT naking */
 1213 		if (!ep->in) {
 1214 			if (!use_dma && udc_rxfifo_pending) {
 1215 				DBG(dev, "udc_queue(): pending bytes in "
 1216 					"rxfifo after nyet\n");
 1217 				/*
 1218 				 * read pending bytes afer nyet:
 1219 				 * referring to isr
 1220 				 */
 1221 				if (udc_rxfifo_read(ep, req)) {
 1222 					/* finish */
 1223 					complete_req(ep, req, 0);
 1224 				}
 1225 				udc_rxfifo_pending = 0;
 1226 
 1227 			}
 1228 		}
 1229 	}
 1230 
 1231 finished:
 1232 	spin_unlock_irqrestore(&dev->lock, iflags);
 1233 	return retval;
 1234 }
 1235 
 1236 /* Empty request queue of an endpoint; caller holds spinlock */
 1237 static void empty_req_queue(struct udc_ep *ep)
 1238 {
 1239 	struct udc_request	*req;
 1240 
 1241 	ep->halted = 1;
 1242 	while (!list_empty(&ep->queue)) {
 1243 		req = list_entry(ep->queue.next,
 1244 			struct udc_request,
 1245 			queue);
 1246 		complete_req(ep, req, -ESHUTDOWN);
 1247 	}
 1248 }
 1249 
 1250 /* Dequeues a request packet, called by gadget driver */
 1251 static int udc_dequeue(struct usb_ep *usbep, struct usb_request *usbreq)
 1252 {
 1253 	struct udc_ep		*ep;
 1254 	struct udc_request	*req;
 1255 	unsigned		halted;
 1256 	unsigned long		iflags;
 1257 
 1258 	ep = container_of(usbep, struct udc_ep, ep);
 1259 	if (!usbep || !usbreq || (!ep->ep.desc && (ep->num != 0
 1260 				&& ep->num != UDC_EP0OUT_IX)))
 1261 		return -EINVAL;
 1262 
 1263 	req = container_of(usbreq, struct udc_request, req);
 1264 
 1265 	spin_lock_irqsave(&ep->dev->lock, iflags);
 1266 	halted = ep->halted;
 1267 	ep->halted = 1;
 1268 	/* request in processing or next one */
 1269 	if (ep->queue.next == &req->queue) {
 1270 		if (ep->dma && req->dma_going) {
 1271 			if (ep->in)
 1272 				ep->cancel_transfer = 1;
 1273 			else {
 1274 				u32 tmp;
 1275 				u32 dma_sts;
 1276 				/* stop potential receive DMA */
 1277 				tmp = readl(&udc->regs->ctl);
 1278 				writel(tmp & AMD_UNMASK_BIT(UDC_DEVCTL_RDE),
 1279 							&udc->regs->ctl);
 1280 				/*
 1281 				 * Cancel transfer later in ISR
 1282 				 * if descriptor was touched.
 1283 				 */
 1284 				dma_sts = AMD_GETBITS(req->td_data->status,
 1285 							UDC_DMA_OUT_STS_BS);
 1286 				if (dma_sts != UDC_DMA_OUT_STS_BS_HOST_READY)
 1287 					ep->cancel_transfer = 1;
 1288 				else {
 1289 					udc_init_bna_dummy(ep->req);
 1290 					writel(ep->bna_dummy_req->td_phys,
 1291 						&ep->regs->desptr);
 1292 				}
 1293 				writel(tmp, &udc->regs->ctl);
 1294 			}
 1295 		}
 1296 	}
 1297 	complete_req(ep, req, -ECONNRESET);
 1298 	ep->halted = halted;
 1299 
 1300 	spin_unlock_irqrestore(&ep->dev->lock, iflags);
 1301 	return 0;
 1302 }
 1303 
 1304 /* Halt or clear halt of endpoint */
 1305 static int
 1306 udc_set_halt(struct usb_ep *usbep, int halt)
 1307 {
 1308 	struct udc_ep	*ep;
 1309 	u32 tmp;
 1310 	unsigned long iflags;
 1311 	int retval = 0;
 1312 
 1313 	if (!usbep)
 1314 		return -EINVAL;
 1315 
 1316 	pr_debug("set_halt %s: halt=%d\n", usbep->name, halt);
 1317 
 1318 	ep = container_of(usbep, struct udc_ep, ep);
 1319 	if (!ep->ep.desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
 1320 		return -EINVAL;
 1321 	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
 1322 		return -ESHUTDOWN;
 1323 
 1324 	spin_lock_irqsave(&udc_stall_spinlock, iflags);
 1325 	/* halt or clear halt */
 1326 	if (halt) {
 1327 		if (ep->num == 0)
 1328 			ep->dev->stall_ep0in = 1;
 1329 		else {
 1330 			/*
 1331 			 * set STALL
 1332 			 * rxfifo empty not taken into acount
 1333 			 */
 1334 			tmp = readl(&ep->regs->ctl);
 1335 			tmp |= AMD_BIT(UDC_EPCTL_S);
 1336 			writel(tmp, &ep->regs->ctl);
 1337 			ep->halted = 1;
 1338 
 1339 			/* setup poll timer */
 1340 			if (!timer_pending(&udc_pollstall_timer)) {
 1341 				udc_pollstall_timer.expires = jiffies +
 1342 					HZ * UDC_POLLSTALL_TIMER_USECONDS
 1343 					/ (1000 * 1000);
 1344 				if (!stop_pollstall_timer) {
 1345 					DBG(ep->dev, "start polltimer\n");
 1346 					add_timer(&udc_pollstall_timer);
 1347 				}
 1348 			}
 1349 		}
 1350 	} else {
 1351 		/* ep is halted by set_halt() before */
 1352 		if (ep->halted) {
 1353 			tmp = readl(&ep->regs->ctl);
 1354 			/* clear stall bit */
 1355 			tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
 1356 			/* clear NAK by writing CNAK */
 1357 			tmp |= AMD_BIT(UDC_EPCTL_CNAK);
 1358 			writel(tmp, &ep->regs->ctl);
 1359 			ep->halted = 0;
 1360 			UDC_QUEUE_CNAK(ep, ep->num);
 1361 		}
 1362 	}
 1363 	spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
 1364 	return retval;
 1365 }
 1366 
 1367 /* gadget interface */
 1368 static const struct usb_ep_ops udc_ep_ops = {
 1369 	.enable		= udc_ep_enable,
 1370 	.disable	= udc_ep_disable,
 1371 
 1372 	.alloc_request	= udc_alloc_request,
 1373 	.free_request	= udc_free_request,
 1374 
 1375 	.queue		= udc_queue,
 1376 	.dequeue	= udc_dequeue,
 1377 
 1378 	.set_halt	= udc_set_halt,
 1379 	/* fifo ops not implemented */
 1380 };
 1381 
 1382 /*-------------------------------------------------------------------------*/
 1383 
 1384 /* Get frame counter (not implemented) */
 1385 static int udc_get_frame(struct usb_gadget *gadget)
 1386 {
 1387 	return -EOPNOTSUPP;
 1388 }
 1389 
 1390 /* Remote wakeup gadget interface */
 1391 static int udc_wakeup(struct usb_gadget *gadget)
 1392 {
 1393 	struct udc		*dev;
 1394 
 1395 	if (!gadget)
 1396 		return -EINVAL;
 1397 	dev = container_of(gadget, struct udc, gadget);
 1398 	udc_remote_wakeup(dev);
 1399 
 1400 	return 0;
 1401 }
 1402 
 1403 static int amd5536_udc_start(struct usb_gadget *g,
 1404 		struct usb_gadget_driver *driver);
 1405 static int amd5536_udc_stop(struct usb_gadget *g);
 1406 
 1407 static const struct usb_gadget_ops udc_ops = {
 1408 	.wakeup		= udc_wakeup,
 1409 	.get_frame	= udc_get_frame,
 1410 	.udc_start	= amd5536_udc_start,
 1411 	.udc_stop	= amd5536_udc_stop,
 1412 };
 1413 
 1414 /* Setups endpoint parameters, adds endpoints to linked list */
 1415 static void make_ep_lists(struct udc *dev)
 1416 {
 1417 	/* make gadget ep lists */
 1418 	INIT_LIST_HEAD(&dev->gadget.ep_list);
 1419 	list_add_tail(&dev->ep[UDC_EPIN_STATUS_IX].ep.ep_list,
 1420 						&dev->gadget.ep_list);
 1421 	list_add_tail(&dev->ep[UDC_EPIN_IX].ep.ep_list,
 1422 						&dev->gadget.ep_list);
 1423 	list_add_tail(&dev->ep[UDC_EPOUT_IX].ep.ep_list,
 1424 						&dev->gadget.ep_list);
 1425 
 1426 	/* fifo config */
 1427 	dev->ep[UDC_EPIN_STATUS_IX].fifo_depth = UDC_EPIN_SMALLINT_BUFF_SIZE;
 1428 	if (dev->gadget.speed == USB_SPEED_FULL)
 1429 		dev->ep[UDC_EPIN_IX].fifo_depth = UDC_FS_EPIN_BUFF_SIZE;
 1430 	else if (dev->gadget.speed == USB_SPEED_HIGH)
 1431 		dev->ep[UDC_EPIN_IX].fifo_depth = hs_tx_buf;
 1432 	dev->ep[UDC_EPOUT_IX].fifo_depth = UDC_RXFIFO_SIZE;
 1433 }
 1434 
 1435 /* init registers at driver load time */
 1436 static int startup_registers(struct udc *dev)
 1437 {
 1438 	u32 tmp;
 1439 
 1440 	/* init controller by soft reset */
 1441 	udc_soft_reset(dev);
 1442 
 1443 	/* mask not needed interrupts */
 1444 	udc_mask_unused_interrupts(dev);
 1445 
 1446 	/* put into initial config */
 1447 	udc_basic_init(dev);
 1448 	/* link up all endpoints */
 1449 	udc_setup_endpoints(dev);
 1450 
 1451 	/* program speed */
 1452 	tmp = readl(&dev->regs->cfg);
 1453 	if (use_fullspeed)
 1454 		tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
 1455 	else
 1456 		tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_HS, UDC_DEVCFG_SPD);
 1457 	writel(tmp, &dev->regs->cfg);
 1458 
 1459 	return 0;
 1460 }
 1461 
 1462 /* Inits UDC context */
 1463 static void udc_basic_init(struct udc *dev)
 1464 {
 1465 	u32	tmp;
 1466 
 1467 	DBG(dev, "udc_basic_init()\n");
 1468 
 1469 	dev->gadget.speed = USB_SPEED_UNKNOWN;
 1470 
 1471 	/* stop RDE timer */
 1472 	if (timer_pending(&udc_timer)) {
 1473 		set_rde = 0;
 1474 		mod_timer(&udc_timer, jiffies - 1);
 1475 	}
 1476 	/* stop poll stall timer */
 1477 	if (timer_pending(&udc_pollstall_timer))
 1478 		mod_timer(&udc_pollstall_timer, jiffies - 1);
 1479 	/* disable DMA */
 1480 	tmp = readl(&dev->regs->ctl);
 1481 	tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
 1482 	tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_TDE);
 1483 	writel(tmp, &dev->regs->ctl);
 1484 
 1485 	/* enable dynamic CSR programming */
 1486 	tmp = readl(&dev->regs->cfg);
 1487 	tmp |= AMD_BIT(UDC_DEVCFG_CSR_PRG);
 1488 	/* set self powered */
 1489 	tmp |= AMD_BIT(UDC_DEVCFG_SP);
 1490 	/* set remote wakeupable */
 1491 	tmp |= AMD_BIT(UDC_DEVCFG_RWKP);
 1492 	writel(tmp, &dev->regs->cfg);
 1493 
 1494 	make_ep_lists(dev);
 1495 
 1496 	dev->data_ep_enabled = 0;
 1497 	dev->data_ep_queued = 0;
 1498 }
 1499 
 1500 /* Sets initial endpoint parameters */
 1501 static void udc_setup_endpoints(struct udc *dev)
 1502 {
 1503 	struct udc_ep	*ep;
 1504 	u32	tmp;
 1505 	u32	reg;
 1506 
 1507 	DBG(dev, "udc_setup_endpoints()\n");
 1508 
 1509 	/* read enum speed */
 1510 	tmp = readl(&dev->regs->sts);
 1511 	tmp = AMD_GETBITS(tmp, UDC_DEVSTS_ENUM_SPEED);
 1512 	if (tmp == UDC_DEVSTS_ENUM_SPEED_HIGH)
 1513 		dev->gadget.speed = USB_SPEED_HIGH;
 1514 	else if (tmp == UDC_DEVSTS_ENUM_SPEED_FULL)
 1515 		dev->gadget.speed = USB_SPEED_FULL;
 1516 
 1517 	/* set basic ep parameters */
 1518 	for (tmp = 0; tmp < UDC_EP_NUM; tmp++) {
 1519 		ep = &dev->ep[tmp];
 1520 		ep->dev = dev;
 1521 		ep->ep.name = ep_string[tmp];
 1522 		ep->num = tmp;
 1523 		/* txfifo size is calculated at enable time */
 1524 		ep->txfifo = dev->txfifo;
 1525 
 1526 		/* fifo size */
 1527 		if (tmp < UDC_EPIN_NUM) {
 1528 			ep->fifo_depth = UDC_TXFIFO_SIZE;
 1529 			ep->in = 1;
 1530 		} else {
 1531 			ep->fifo_depth = UDC_RXFIFO_SIZE;
 1532 			ep->in = 0;
 1533 
 1534 		}
 1535 		ep->regs = &dev->ep_regs[tmp];
 1536 		/*
 1537 		 * ep will be reset only if ep was not enabled before to avoid
 1538 		 * disabling ep interrupts when ENUM interrupt occurs but ep is
 1539 		 * not enabled by gadget driver
 1540 		 */
 1541 		if (!ep->ep.desc)
 1542 			ep_init(dev->regs, ep);
 1543 
 1544 		if (use_dma) {
 1545 			/*
 1546 			 * ep->dma is not really used, just to indicate that
 1547 			 * DMA is active: remove this
 1548 			 * dma regs = dev control regs
 1549 			 */
 1550 			ep->dma = &dev->regs->ctl;
 1551 
 1552 			/* nak OUT endpoints until enable - not for ep0 */
 1553 			if (tmp != UDC_EP0IN_IX && tmp != UDC_EP0OUT_IX
 1554 						&& tmp > UDC_EPIN_NUM) {
 1555 				/* set NAK */
 1556 				reg = readl(&dev->ep[tmp].regs->ctl);
 1557 				reg |= AMD_BIT(UDC_EPCTL_SNAK);
 1558 				writel(reg, &dev->ep[tmp].regs->ctl);
 1559 				dev->ep[tmp].naking = 1;
 1560 
 1561 			}
 1562 		}
 1563 	}
 1564 	/* EP0 max packet */
 1565 	if (dev->gadget.speed == USB_SPEED_FULL) {
 1566 		usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IX].ep,
 1567 					   UDC_FS_EP0IN_MAX_PKT_SIZE);
 1568 		usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IX].ep,
 1569 					   UDC_FS_EP0OUT_MAX_PKT_SIZE);
 1570 	} else if (dev->gadget.speed == USB_SPEED_HIGH) {
 1571 		usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IX].ep,
 1572 					   UDC_EP0IN_MAX_PKT_SIZE);
 1573 		usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IX].ep,
 1574 					   UDC_EP0OUT_MAX_PKT_SIZE);
 1575 	}
 1576 
 1577 	/*
 1578 	 * with suspend bug workaround, ep0 params for gadget driver
 1579 	 * are set at gadget driver bind() call
 1580 	 */
 1581 	dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
 1582 	dev->ep[UDC_EP0IN_IX].halted = 0;
 1583 	INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
 1584 
 1585 	/* init cfg/alt/int */
 1586 	dev->cur_config = 0;
 1587 	dev->cur_intf = 0;
 1588 	dev->cur_alt = 0;
 1589 }
 1590 
 1591 /* Bringup after Connect event, initial bringup to be ready for ep0 events */
 1592 static void usb_connect(struct udc *dev)
 1593 {
 1594 
 1595 	dev_info(&dev->pdev->dev, "USB Connect\n");
 1596 
 1597 	dev->connected = 1;
 1598 
 1599 	/* put into initial config */
 1600 	udc_basic_init(dev);
 1601 
 1602 	/* enable device setup interrupts */
 1603 	udc_enable_dev_setup_interrupts(dev);
 1604 }
 1605 
 1606 /*
 1607  * Calls gadget with disconnect event and resets the UDC and makes
 1608  * initial bringup to be ready for ep0 events
 1609  */
 1610 static void usb_disconnect(struct udc *dev)
 1611 {
 1612 
 1613 	dev_info(&dev->pdev->dev, "USB Disconnect\n");
 1614 
 1615 	dev->connected = 0;
 1616 
 1617 	/* mask interrupts */
 1618 	udc_mask_unused_interrupts(dev);
 1619 
 1620 	/* REVISIT there doesn't seem to be a point to having this
 1621 	 * talk to a tasklet ... do it directly, we already hold
 1622 	 * the spinlock needed to process the disconnect.
 1623 	 */
 1624 
 1625 	tasklet_schedule(&disconnect_tasklet);
 1626 }
 1627 
 1628 /* Tasklet for disconnect to be outside of interrupt context */
 1629 static void udc_tasklet_disconnect(unsigned long par)
 1630 {
 1631 	struct udc *dev = (struct udc *)(*((struct udc **) par));
 1632 	u32 tmp;
 1633 
 1634 	DBG(dev, "Tasklet disconnect\n");
 1635 	spin_lock_irq(&dev->lock);
 1636 
 1637 	if (dev->driver) {
 1638 		spin_unlock(&dev->lock);
 1639 		dev->driver->disconnect(&dev->gadget);
 1640 		spin_lock(&dev->lock);
 1641 
 1642 		/* empty queues */
 1643 		for (tmp = 0; tmp < UDC_EP_NUM; tmp++)
 1644 			empty_req_queue(&dev->ep[tmp]);
 1645 
 1646 	}
 1647 
 1648 	/* disable ep0 */
 1649 	ep_init(dev->regs,
 1650 			&dev->ep[UDC_EP0IN_IX]);
 1651 
 1652 
 1653 	if (!soft_reset_occured) {
 1654 		/* init controller by soft reset */
 1655 		udc_soft_reset(dev);
 1656 		soft_reset_occured++;
 1657 	}
 1658 
 1659 	/* re-enable dev interrupts */
 1660 	udc_enable_dev_setup_interrupts(dev);
 1661 	/* back to full speed ? */
 1662 	if (use_fullspeed) {
 1663 		tmp = readl(&dev->regs->cfg);
 1664 		tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
 1665 		writel(tmp, &dev->regs->cfg);
 1666 	}
 1667 
 1668 	spin_unlock_irq(&dev->lock);
 1669 }
 1670 
 1671 /* Reset the UDC core */
 1672 static void udc_soft_reset(struct udc *dev)
 1673 {
 1674 	unsigned long	flags;
 1675 
 1676 	DBG(dev, "Soft reset\n");
 1677 	/*
 1678 	 * reset possible waiting interrupts, because int.
 1679 	 * status is lost after soft reset,
 1680 	 * ep int. status reset
 1681 	 */
 1682 	writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqsts);
 1683 	/* device int. status reset */
 1684 	writel(UDC_DEV_MSK_DISABLE, &dev->regs->irqsts);
 1685 
 1686 	spin_lock_irqsave(&udc_irq_spinlock, flags);
 1687 	writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
 1688 	readl(&dev->regs->cfg);
 1689 	spin_unlock_irqrestore(&udc_irq_spinlock, flags);
 1690 
 1691 }
 1692 
 1693 /* RDE timer callback to set RDE bit */
 1694 static void udc_timer_function(unsigned long v)
 1695 {
 1696 	u32 tmp;
 1697 
 1698 	spin_lock_irq(&udc_irq_spinlock);
 1699 
 1700 	if (set_rde > 0) {
 1701 		/*
 1702 		 * open the fifo if fifo was filled on last timer call
 1703 		 * conditionally
 1704 		 */
 1705 		if (set_rde > 1) {
 1706 			/* set RDE to receive setup data */
 1707 			tmp = readl(&udc->regs->ctl);
 1708 			tmp |= AMD_BIT(UDC_DEVCTL_RDE);
 1709 			writel(tmp, &udc->regs->ctl);
 1710 			set_rde = -1;
 1711 		} else if (readl(&udc->regs->sts)
 1712 				& AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) {
 1713 			/*
 1714 			 * if fifo empty setup polling, do not just
 1715 			 * open the fifo
 1716 			 */
 1717 			udc_timer.expires = jiffies + HZ/UDC_RDE_TIMER_DIV;
 1718 			if (!stop_timer)
 1719 				add_timer(&udc_timer);
 1720 		} else {
 1721 			/*
 1722 			 * fifo contains data now, setup timer for opening
 1723 			 * the fifo when timer expires to be able to receive
 1724 			 * setup packets, when data packets gets queued by
 1725 			 * gadget layer then timer will forced to expire with
 1726 			 * set_rde=0 (RDE is set in udc_queue())
 1727 			 */
 1728 			set_rde++;
 1729 			/* debug: lhadmot_timer_start = 221070 */
 1730 			udc_timer.expires = jiffies + HZ*UDC_RDE_TIMER_SECONDS;
 1731 			if (!stop_timer)
 1732 				add_timer(&udc_timer);
 1733 		}
 1734 
 1735 	} else
 1736 		set_rde = -1; /* RDE was set by udc_queue() */
 1737 	spin_unlock_irq(&udc_irq_spinlock);
 1738 	if (stop_timer)
 1739 		complete(&on_exit);
 1740 
 1741 }
 1742 
 1743 /* Handle halt state, used in stall poll timer */
 1744 static void udc_handle_halt_state(struct udc_ep *ep)
 1745 {
 1746 	u32 tmp;
 1747 	/* set stall as long not halted */
 1748 	if (ep->halted == 1) {
 1749 		tmp = readl(&ep->regs->ctl);
 1750 		/* STALL cleared ? */
 1751 		if (!(tmp & AMD_BIT(UDC_EPCTL_S))) {
 1752 			/*
 1753 			 * FIXME: MSC spec requires that stall remains
 1754 			 * even on receivng of CLEAR_FEATURE HALT. So
 1755 			 * we would set STALL again here to be compliant.
 1756 			 * But with current mass storage drivers this does
 1757 			 * not work (would produce endless host retries).
 1758 			 * So we clear halt on CLEAR_FEATURE.
 1759 			 *
 1760 			DBG(ep->dev, "ep %d: set STALL again\n", ep->num);
 1761 			tmp |= AMD_BIT(UDC_EPCTL_S);
 1762 			writel(tmp, &ep->regs->ctl);*/
 1763 
 1764 			/* clear NAK by writing CNAK */
 1765 			tmp |= AMD_BIT(UDC_EPCTL_CNAK);
 1766 			writel(tmp, &ep->regs->ctl);
 1767 			ep->halted = 0;
 1768 			UDC_QUEUE_CNAK(ep, ep->num);
 1769 		}
 1770 	}
 1771 }
 1772 
 1773 /* Stall timer callback to poll S bit and set it again after */
 1774 static void udc_pollstall_timer_function(unsigned long v)
 1775 {
 1776 	struct udc_ep *ep;
 1777 	int halted = 0;
 1778 
 1779 	spin_lock_irq(&udc_stall_spinlock);
 1780 	/*
 1781 	 * only one IN and OUT endpoints are handled
 1782 	 * IN poll stall
 1783 	 */
 1784 	ep = &udc->ep[UDC_EPIN_IX];
 1785 	udc_handle_halt_state(ep);
 1786 	if (ep->halted)
 1787 		halted = 1;
 1788 	/* OUT poll stall */
 1789 	ep = &udc->ep[UDC_EPOUT_IX];
 1790 	udc_handle_halt_state(ep);
 1791 	if (ep->halted)
 1792 		halted = 1;
 1793 
 1794 	/* setup timer again when still halted */
 1795 	if (!stop_pollstall_timer && halted) {
 1796 		udc_pollstall_timer.expires = jiffies +
 1797 					HZ * UDC_POLLSTALL_TIMER_USECONDS
 1798 					/ (1000 * 1000);
 1799 		add_timer(&udc_pollstall_timer);
 1800 	}
 1801 	spin_unlock_irq(&udc_stall_spinlock);
 1802 
 1803 	if (stop_pollstall_timer)
 1804 		complete(&on_pollstall_exit);
 1805 }
 1806 
 1807 /* Inits endpoint 0 so that SETUP packets are processed */
 1808 static void activate_control_endpoints(struct udc *dev)
 1809 {
 1810 	u32 tmp;
 1811 
 1812 	DBG(dev, "activate_control_endpoints\n");
 1813 
 1814 	/* flush fifo */
 1815 	tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
 1816 	tmp |= AMD_BIT(UDC_EPCTL_F);
 1817 	writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
 1818 
 1819 	/* set ep0 directions */
 1820 	dev->ep[UDC_EP0IN_IX].in = 1;
 1821 	dev->ep[UDC_EP0OUT_IX].in = 0;
 1822 
 1823 	/* set buffer size (tx fifo entries) of EP0_IN */
 1824 	tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
 1825 	if (dev->gadget.speed == USB_SPEED_FULL)
 1826 		tmp = AMD_ADDBITS(tmp, UDC_FS_EPIN0_BUFF_SIZE,
 1827 					UDC_EPIN_BUFF_SIZE);
 1828 	else if (dev->gadget.speed == USB_SPEED_HIGH)
 1829 		tmp = AMD_ADDBITS(tmp, UDC_EPIN0_BUFF_SIZE,
 1830 					UDC_EPIN_BUFF_SIZE);
 1831 	writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
 1832 
 1833 	/* set max packet size of EP0_IN */
 1834 	tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
 1835 	if (dev->gadget.speed == USB_SPEED_FULL)
 1836 		tmp = AMD_ADDBITS(tmp, UDC_FS_EP0IN_MAX_PKT_SIZE,
 1837 					UDC_EP_MAX_PKT_SIZE);
 1838 	else if (dev->gadget.speed == USB_SPEED_HIGH)
 1839 		tmp = AMD_ADDBITS(tmp, UDC_EP0IN_MAX_PKT_SIZE,
 1840 				UDC_EP_MAX_PKT_SIZE);
 1841 	writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
 1842 
 1843 	/* set max packet size of EP0_OUT */
 1844 	tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
 1845 	if (dev->gadget.speed == USB_SPEED_FULL)
 1846 		tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
 1847 					UDC_EP_MAX_PKT_SIZE);
 1848 	else if (dev->gadget.speed == USB_SPEED_HIGH)
 1849 		tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
 1850 					UDC_EP_MAX_PKT_SIZE);
 1851 	writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
 1852 
 1853 	/* set max packet size of EP0 in UDC CSR */
 1854 	tmp = readl(&dev->csr->ne[0]);
 1855 	if (dev->gadget.speed == USB_SPEED_FULL)
 1856 		tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
 1857 					UDC_CSR_NE_MAX_PKT);
 1858 	else if (dev->gadget.speed == USB_SPEED_HIGH)
 1859 		tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
 1860 					UDC_CSR_NE_MAX_PKT);
 1861 	writel(tmp, &dev->csr->ne[0]);
 1862 
 1863 	if (use_dma) {
 1864 		dev->ep[UDC_EP0OUT_IX].td->status |=
 1865 			AMD_BIT(UDC_DMA_OUT_STS_L);
 1866 		/* write dma desc address */
 1867 		writel(dev->ep[UDC_EP0OUT_IX].td_stp_dma,
 1868 			&dev->ep[UDC_EP0OUT_IX].regs->subptr);
 1869 		writel(dev->ep[UDC_EP0OUT_IX].td_phys,
 1870 			&dev->ep[UDC_EP0OUT_IX].regs->desptr);
 1871 		/* stop RDE timer */
 1872 		if (timer_pending(&udc_timer)) {
 1873 			set_rde = 0;
 1874 			mod_timer(&udc_timer, jiffies - 1);
 1875 		}
 1876 		/* stop pollstall timer */
 1877 		if (timer_pending(&udc_pollstall_timer))
 1878 			mod_timer(&udc_pollstall_timer, jiffies - 1);
 1879 		/* enable DMA */
 1880 		tmp = readl(&dev->regs->ctl);
 1881 		tmp |= AMD_BIT(UDC_DEVCTL_MODE)
 1882 				| AMD_BIT(UDC_DEVCTL_RDE)
 1883 				| AMD_BIT(UDC_DEVCTL_TDE);
 1884 		if (use_dma_bufferfill_mode)
 1885 			tmp |= AMD_BIT(UDC_DEVCTL_BF);
 1886 		else if (use_dma_ppb_du)
 1887 			tmp |= AMD_BIT(UDC_DEVCTL_DU);
 1888 		writel(tmp, &dev->regs->ctl);
 1889 	}
 1890 
 1891 	/* clear NAK by writing CNAK for EP0IN */
 1892 	tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
 1893 	tmp |= AMD_BIT(UDC_EPCTL_CNAK);
 1894 	writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
 1895 	dev->ep[UDC_EP0IN_IX].naking = 0;
 1896 	UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
 1897 
 1898 	/* clear NAK by writing CNAK for EP0OUT */
 1899 	tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
 1900 	tmp |= AMD_BIT(UDC_EPCTL_CNAK);
 1901 	writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
 1902 	dev->ep[UDC_EP0OUT_IX].naking = 0;
 1903 	UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
 1904 }
 1905 
 1906 /* Make endpoint 0 ready for control traffic */
 1907 static int setup_ep0(struct udc *dev)
 1908 {
 1909 	activate_control_endpoints(dev);
 1910 	/* enable ep0 interrupts */
 1911 	udc_enable_ep0_interrupts(dev);
 1912 	/* enable device setup interrupts */
 1913 	udc_enable_dev_setup_interrupts(dev);
 1914 
 1915 	return 0;
 1916 }
 1917 
 1918 /* Called by gadget driver to register itself */
 1919 static int amd5536_udc_start(struct usb_gadget *g,
 1920 		struct usb_gadget_driver *driver)
 1921 {
 1922 	struct udc *dev = to_amd5536_udc(g);
 1923 	u32 tmp;
 1924 
 1925 	driver->driver.bus = NULL;
 1926 	dev->driver = driver;
 1927 
 1928 	/* Some gadget drivers use both ep0 directions.
 1929 	 * NOTE: to gadget driver, ep0 is just one endpoint...
 1930 	 */
 1931 	dev->ep[UDC_EP0OUT_IX].ep.driver_data =
 1932 		dev->ep[UDC_EP0IN_IX].ep.driver_data;
 1933 
 1934 	/* get ready for ep0 traffic */
 1935 	setup_ep0(dev);
 1936 
 1937 	/* clear SD */
 1938 	tmp = readl(&dev->regs->ctl);
 1939 	tmp = tmp & AMD_CLEAR_BIT(UDC_DEVCTL_SD);
 1940 	writel(tmp, &dev->regs->ctl);
 1941 
 1942 	usb_connect(dev);
 1943 
 1944 	return 0;
 1945 }
 1946 
 1947 /* shutdown requests and disconnect from gadget */
 1948 static void
 1949 shutdown(struct udc *dev, struct usb_gadget_driver *driver)
 1950 __releases(dev->lock)
 1951 __acquires(dev->lock)
 1952 {
 1953 	int tmp;
 1954 
 1955 	/* empty queues and init hardware */
 1956 	udc_basic_init(dev);
 1957 
 1958 	for (tmp = 0; tmp < UDC_EP_NUM; tmp++)
 1959 		empty_req_queue(&dev->ep[tmp]);
 1960 
 1961 	udc_setup_endpoints(dev);
 1962 }
 1963 
 1964 /* Called by gadget driver to unregister itself */
 1965 static int amd5536_udc_stop(struct usb_gadget *g)
 1966 {
 1967 	struct udc *dev = to_amd5536_udc(g);
 1968 	unsigned long flags;
 1969 	u32 tmp;
 1970 
 1971 	spin_lock_irqsave(&dev->lock, flags);
 1972 	udc_mask_unused_interrupts(dev);
 1973 	shutdown(dev, NULL);
 1974 	spin_unlock_irqrestore(&dev->lock, flags);
 1975 
 1976 	dev->driver = NULL;
 1977 
 1978 	/* set SD */
 1979 	tmp = readl(&dev->regs->ctl);
 1980 	tmp |= AMD_BIT(UDC_DEVCTL_SD);
 1981 	writel(tmp, &dev->regs->ctl);
 1982 
 1983 	return 0;
 1984 }
 1985 
 1986 /* Clear pending NAK bits */
 1987 static void udc_process_cnak_queue(struct udc *dev)
 1988 {
 1989 	u32 tmp;
 1990 	u32 reg;
 1991 
 1992 	/* check epin's */
 1993 	DBG(dev, "CNAK pending queue processing\n");
 1994 	for (tmp = 0; tmp < UDC_EPIN_NUM_USED; tmp++) {
 1995 		if (cnak_pending & (1 << tmp)) {
 1996 			DBG(dev, "CNAK pending for ep%d\n", tmp);
 1997 			/* clear NAK by writing CNAK */
 1998 			reg = readl(&dev->ep[tmp].regs->ctl);
 1999 			reg |= AMD_BIT(UDC_EPCTL_CNAK);
 2000 			writel(reg, &dev->ep[tmp].regs->ctl);
 2001 			dev->ep[tmp].naking = 0;
 2002 			UDC_QUEUE_CNAK(&dev->ep[tmp], dev->ep[tmp].num);
 2003 		}
 2004 	}
 2005 	/* ...	and ep0out */
 2006 	if (cnak_pending & (1 << UDC_EP0OUT_IX)) {
 2007 		DBG(dev, "CNAK pending for ep%d\n", UDC_EP0OUT_IX);
 2008 		/* clear NAK by writing CNAK */
 2009 		reg = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
 2010 		reg |= AMD_BIT(UDC_EPCTL_CNAK);
 2011 		writel(reg, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
 2012 		dev->ep[UDC_EP0OUT_IX].naking = 0;
 2013 		UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX],
 2014 				dev->ep[UDC_EP0OUT_IX].num);
 2015 	}
 2016 }
 2017 
 2018 /* Enabling RX DMA after setup packet */
 2019 static void udc_ep0_set_rde(struct udc *dev)
 2020 {
 2021 	if (use_dma) {
 2022 		/*
 2023 		 * only enable RXDMA when no data endpoint enabled
 2024 		 * or data is queued
 2025 		 */
 2026 		if (!dev->data_ep_enabled || dev->data_ep_queued) {
 2027 			udc_set_rde(dev);
 2028 		} else {
 2029 			/*
 2030 			 * setup timer for enabling RDE (to not enable
 2031 			 * RXFIFO DMA for data endpoints to early)
 2032 			 */
 2033 			if (set_rde != 0 && !timer_pending(&udc_timer)) {
 2034 				udc_timer.expires =
 2035 					jiffies + HZ/UDC_RDE_TIMER_DIV;
 2036 				set_rde = 1;
 2037 				if (!stop_timer)
 2038 					add_timer(&udc_timer);
 2039 			}
 2040 		}
 2041 	}
 2042 }
 2043 
 2044 
 2045 /* Interrupt handler for data OUT traffic */
 2046 static irqreturn_t udc_data_out_isr(struct udc *dev, int ep_ix)
 2047 {
 2048 	irqreturn_t		ret_val = IRQ_NONE;
 2049 	u32			tmp;
 2050 	struct udc_ep		*ep;
 2051 	struct udc_request	*req;
 2052 	unsigned int		count;
 2053 	struct udc_data_dma	*td = NULL;
 2054 	unsigned		dma_done;
 2055 
 2056 	VDBG(dev, "ep%d irq\n", ep_ix);
 2057 	ep = &dev->ep[ep_ix];
 2058 
 2059 	tmp = readl(&ep->regs->sts);
 2060 	if (use_dma) {
 2061 		/* BNA event ? */
 2062 		if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
 2063 			DBG(dev, "BNA ep%dout occurred - DESPTR = %x\n",
 2064 					ep->num, readl(&ep->regs->desptr));
 2065 			/* clear BNA */
 2066 			writel(tmp | AMD_BIT(UDC_EPSTS_BNA), &ep->regs->sts);
 2067 			if (!ep->cancel_transfer)
 2068 				ep->bna_occurred = 1;
 2069 			else
 2070 				ep->cancel_transfer = 0;
 2071 			ret_val = IRQ_HANDLED;
 2072 			goto finished;
 2073 		}
 2074 	}
 2075 	/* HE event ? */
 2076 	if (tmp & AMD_BIT(UDC_EPSTS_HE)) {
 2077 		dev_err(&dev->pdev->dev, "HE ep%dout occurred\n", ep->num);
 2078 
 2079 		/* clear HE */
 2080 		writel(tmp | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
 2081 		ret_val = IRQ_HANDLED;
 2082 		goto finished;
 2083 	}
 2084 
 2085 	if (!list_empty(&ep->queue)) {
 2086 
 2087 		/* next request */
 2088 		req = list_entry(ep->queue.next,
 2089 			struct udc_request, queue);
 2090 	} else {
 2091 		req = NULL;
 2092 		udc_rxfifo_pending = 1;
 2093 	}
 2094 	VDBG(dev, "req = %p\n", req);
 2095 	/* fifo mode */
 2096 	if (!use_dma) {
 2097 
 2098 		/* read fifo */
 2099 		if (req && udc_rxfifo_read(ep, req)) {
 2100 			ret_val = IRQ_HANDLED;
 2101 
 2102 			/* finish */
 2103 			complete_req(ep, req, 0);
 2104 			/* next request */
 2105 			if (!list_empty(&ep->queue) && !ep->halted) {
 2106 				req = list_entry(ep->queue.next,
 2107 					struct udc_request, queue);
 2108 			} else
 2109 				req = NULL;
 2110 		}
 2111 
 2112 	/* DMA */
 2113 	} else if (!ep->cancel_transfer && req != NULL) {
 2114 		ret_val = IRQ_HANDLED;
 2115 
 2116 		/* check for DMA done */
 2117 		if (!use_dma_ppb) {
 2118 			dma_done = AMD_GETBITS(req->td_data->status,
 2119 						UDC_DMA_OUT_STS_BS);
 2120 		/* packet per buffer mode - rx bytes */
 2121 		} else {
 2122 			/*
 2123 			 * if BNA occurred then recover desc. from
 2124 			 * BNA dummy desc.
 2125 			 */
 2126 			if (ep->bna_occurred) {
 2127 				VDBG(dev, "Recover desc. from BNA dummy\n");
 2128 				memcpy(req->td_data, ep->bna_dummy_req->td_data,
 2129 						sizeof(struct udc_data_dma));
 2130 				ep->bna_occurred = 0;
 2131 				udc_init_bna_dummy(ep->req);
 2132 			}
 2133 			td = udc_get_last_dma_desc(req);
 2134 			dma_done = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_BS);
 2135 		}
 2136 		if (dma_done == UDC_DMA_OUT_STS_BS_DMA_DONE) {
 2137 			/* buffer fill mode - rx bytes */
 2138 			if (!use_dma_ppb) {
 2139 				/* received number bytes */
 2140 				count = AMD_GETBITS(req->td_data->status,
 2141 						UDC_DMA_OUT_STS_RXBYTES);
 2142 				VDBG(dev, "rx bytes=%u\n", count);
 2143 			/* packet per buffer mode - rx bytes */
 2144 			} else {
 2145 				VDBG(dev, "req->td_data=%p\n", req->td_data);
 2146 				VDBG(dev, "last desc = %p\n", td);
 2147 				/* received number bytes */
 2148 				if (use_dma_ppb_du) {
 2149 					/* every desc. counts bytes */
 2150 					count = udc_get_ppbdu_rxbytes(req);
 2151 				} else {
 2152 					/* last desc. counts bytes */
 2153 					count = AMD_GETBITS(td->status,
 2154 						UDC_DMA_OUT_STS_RXBYTES);
 2155 					if (!count && req->req.length
 2156 						== UDC_DMA_MAXPACKET) {
 2157 						/*
 2158 						 * on 64k packets the RXBYTES
 2159 						 * field is zero
 2160 						 */
 2161 						count = UDC_DMA_MAXPACKET;
 2162 					}
 2163 				}
 2164 				VDBG(dev, "last desc rx bytes=%u\n", count);
 2165 			}
 2166 
 2167 			tmp = req->req.length - req->req.actual;
 2168 			if (count > tmp) {
 2169 				if ((tmp % ep->ep.maxpacket) != 0) {
 2170 					DBG(dev, "%s: rx %db, space=%db\n",
 2171 						ep->ep.name, count, tmp);
 2172 					req->req.status = -EOVERFLOW;
 2173 				}
 2174 				count = tmp;
 2175 			}
 2176 			req->req.actual += count;
 2177 			req->dma_going = 0;
 2178 			/* complete request */
 2179 			complete_req(ep, req, 0);
 2180 
 2181 			/* next request */
 2182 			if (!list_empty(&ep->queue) && !ep->halted) {
 2183 				req = list_entry(ep->queue.next,
 2184 					struct udc_request,
 2185 					queue);
 2186 				/*
 2187 				 * DMA may be already started by udc_queue()
 2188 				 * called by gadget drivers completion
 2189 				 * routine. This happens when queue
 2190 				 * holds one request only.
 2191 				 */
 2192 				if (req->dma_going == 0) {
 2193 					/* next dma */
 2194 					if (prep_dma(ep, req, GFP_ATOMIC) != 0)
 2195 						goto finished;
 2196 					/* write desc pointer */
 2197 					writel(req->td_phys,
 2198 						&ep->regs->desptr);
 2199 					req->dma_going = 1;
 2200 					/* enable DMA */
 2201 					udc_set_rde(dev);
 2202 				}
 2203 			} else {
 2204 				/*
 2205 				 * implant BNA dummy descriptor to allow
 2206 				 * RXFIFO opening by RDE
 2207 				 */
 2208 				if (ep->bna_dummy_req) {
 2209 					/* write desc pointer */
 2210 					writel(ep->bna_dummy_req->td_phys,
 2211 						&ep->regs->desptr);
 2212 					ep->bna_occurred = 0;
 2213 				}
 2214 
 2215 				/*
 2216 				 * schedule timer for setting RDE if queue
 2217 				 * remains empty to allow ep0 packets pass
 2218 				 * through
 2219 				 */
 2220 				if (set_rde != 0
 2221 						&& !timer_pending(&udc_timer)) {
 2222 					udc_timer.expires =
 2223 						jiffies
 2224 						+ HZ*UDC_RDE_TIMER_SECONDS;
 2225 					set_rde = 1;
 2226 					if (!stop_timer)
 2227 						add_timer(&udc_timer);
 2228 				}
 2229 				if (ep->num != UDC_EP0OUT_IX)
 2230 					dev->data_ep_queued = 0;
 2231 			}
 2232 
 2233 		} else {
 2234 			/*
 2235 			* RX DMA must be reenabled for each desc in PPBDU mode
 2236 			* and must be enabled for PPBNDU mode in case of BNA
 2237 			*/
 2238 			udc_set_rde(dev);
 2239 		}
 2240 
 2241 	} else if (ep->cancel_transfer) {
 2242 		ret_val = IRQ_HANDLED;
 2243 		ep->cancel_transfer = 0;
 2244 	}
 2245 
 2246 	/* check pending CNAKS */
 2247 	if (cnak_pending) {
 2248 		/* CNAk processing when rxfifo empty only */
 2249 		if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
 2250 			udc_process_cnak_queue(dev);
 2251 	}
 2252 
 2253 	/* clear OUT bits in ep status */
 2254 	writel(UDC_EPSTS_OUT_CLEAR, &ep->regs->sts);
 2255 finished:
 2256 	return ret_val;
 2257 }
 2258 
 2259 /* Interrupt handler for data IN traffic */
 2260 static irqreturn_t udc_data_in_isr(struct udc *dev, int ep_ix)
 2261 {
 2262 	irqreturn_t ret_val = IRQ_NONE;
 2263 	u32 tmp;
 2264 	u32 epsts;
 2265 	struct udc_ep *ep;
 2266 	struct udc_request *req;
 2267 	struct udc_data_dma *td;
 2268 	unsigned dma_done;
 2269 	unsigned len;
 2270 
 2271 	ep = &dev->ep[ep_ix];
 2272 
 2273 	epsts = readl(&ep->regs->sts);
 2274 	if (use_dma) {
 2275 		/* BNA ? */
 2276 		if (epsts & AMD_BIT(UDC_EPSTS_BNA)) {
 2277 			dev_err(&dev->pdev->dev,
 2278 				"BNA ep%din occurred - DESPTR = %08lx\n",
 2279 				ep->num,
 2280 				(unsigned long) readl(&ep->regs->desptr));
 2281 
 2282 			/* clear BNA */
 2283 			writel(epsts, &ep->regs->sts);
 2284 			ret_val = IRQ_HANDLED;
 2285 			goto finished;
 2286 		}
 2287 	}
 2288 	/* HE event ? */
 2289 	if (epsts & AMD_BIT(UDC_EPSTS_HE)) {
 2290 		dev_err(&dev->pdev->dev,
 2291 			"HE ep%dn occurred - DESPTR = %08lx\n",
 2292 			ep->num, (unsigned long) readl(&ep->regs->desptr));
 2293 
 2294 		/* clear HE */
 2295 		writel(epsts | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
 2296 		ret_val = IRQ_HANDLED;
 2297 		goto finished;
 2298 	}
 2299 
 2300 	/* DMA completion */
 2301 	if (epsts & AMD_BIT(UDC_EPSTS_TDC)) {
 2302 		VDBG(dev, "TDC set- completion\n");
 2303 		ret_val = IRQ_HANDLED;
 2304 		if (!ep->cancel_transfer && !list_empty(&ep->queue)) {
 2305 			req = list_entry(ep->queue.next,
 2306 					struct udc_request, queue);
 2307 			/*
 2308 			 * length bytes transferred
 2309 			 * check dma done of last desc. in PPBDU mode
 2310 			 */
 2311 			if (use_dma_ppb_du) {
 2312 				td = udc_get_last_dma_desc(req);
 2313 				if (td) {
 2314 					dma_done =
 2315 						AMD_GETBITS(td->status,
 2316 						UDC_DMA_IN_STS_BS);
 2317 					/* don't care DMA done */
 2318 					req->req.actual = req->req.length;
 2319 				}
 2320 			} else {
 2321 				/* assume all bytes transferred */
 2322 				req->req.actual = req->req.length;
 2323 			}
 2324 
 2325 			if (req->req.actual == req->req.length) {
 2326 				/* complete req */
 2327 				complete_req(ep, req, 0);
 2328 				req->dma_going = 0;
 2329 				/* further request available ? */
 2330 				if (list_empty(&ep->queue)) {
 2331 					/* disable interrupt */
 2332 					tmp = readl(&dev->regs->ep_irqmsk);
 2333 					tmp |= AMD_BIT(ep->num);
 2334 					writel(tmp, &dev->regs->ep_irqmsk);
 2335 				}
 2336 			}
 2337 		}
 2338 		ep->cancel_transfer = 0;
 2339 
 2340 	}
 2341 	/*
 2342 	 * status reg has IN bit set and TDC not set (if TDC was handled,
 2343 	 * IN must not be handled (UDC defect) ?
 2344 	 */
 2345 	if ((epsts & AMD_BIT(UDC_EPSTS_IN))
 2346 			&& !(epsts & AMD_BIT(UDC_EPSTS_TDC))) {
 2347 		ret_val = IRQ_HANDLED;
 2348 		if (!list_empty(&ep->queue)) {
 2349 			/* next request */
 2350 			req = list_entry(ep->queue.next,
 2351 					struct udc_request, queue);
 2352 			/* FIFO mode */
 2353 			if (!use_dma) {
 2354 				/* write fifo */
 2355 				udc_txfifo_write(ep, &req->req);
 2356 				len = req->req.length - req->req.actual;
 2357 				if (len > ep->ep.maxpacket)
 2358 					len = ep->ep.maxpacket;
 2359 				req->req.actual += len;
 2360 				if (req->req.actual == req->req.length
 2361 					|| (len != ep->ep.maxpacket)) {
 2362 					/* complete req */
 2363 					complete_req(ep, req, 0);
 2364 				}
 2365 			/* DMA */
 2366 			} else if (req && !req->dma_going) {
 2367 				VDBG(dev, "IN DMA : req=%p req->td_data=%p\n",
 2368 					req, req->td_data);
 2369 				if (req->td_data) {
 2370 
 2371 					req->dma_going = 1;
 2372 
 2373 					/*
 2374 					 * unset L bit of first desc.
 2375 					 * for chain
 2376 					 */
 2377 					if (use_dma_ppb && req->req.length >
 2378 							ep->ep.maxpacket) {
 2379 						req->td_data->status &=
 2380 							AMD_CLEAR_BIT(
 2381 							UDC_DMA_IN_STS_L);
 2382 					}
 2383 
 2384 					/* write desc pointer */
 2385 					writel(req->td_phys, &ep->regs->desptr);
 2386 
 2387 					/* set HOST READY */
 2388 					req->td_data->status =
 2389 						AMD_ADDBITS(
 2390 						req->td_data->status,
 2391 						UDC_DMA_IN_STS_BS_HOST_READY,
 2392 						UDC_DMA_IN_STS_BS);
 2393 
 2394 					/* set poll demand bit */
 2395 					tmp = readl(&ep->regs->ctl);
 2396 					tmp |= AMD_BIT(UDC_EPCTL_P);
 2397 					writel(tmp, &ep->regs->ctl);
 2398 				}
 2399 			}
 2400 
 2401 		} else if (!use_dma && ep->in) {
 2402 			/* disable interrupt */
 2403 			tmp = readl(
 2404 				&dev->regs->ep_irqmsk);
 2405 			tmp |= AMD_BIT(ep->num);
 2406 			writel(tmp,
 2407 				&dev->regs->ep_irqmsk);
 2408 		}
 2409 	}
 2410 	/* clear status bits */
 2411 	writel(epsts, &ep->regs->sts);
 2412 
 2413 finished:
 2414 	return ret_val;
 2415 
 2416 }
 2417 
 2418 /* Interrupt handler for Control OUT traffic */
 2419 static irqreturn_t udc_control_out_isr(struct udc *dev)
 2420 __releases(dev->lock)
 2421 __acquires(dev->lock)
 2422 {
 2423 	irqreturn_t ret_val = IRQ_NONE;
 2424 	u32 tmp;
 2425 	int setup_supported;
 2426 	u32 count;
 2427 	int set = 0;
 2428 	struct udc_ep	*ep;
 2429 	struct udc_ep	*ep_tmp;
 2430 
 2431 	ep = &dev->ep[UDC_EP0OUT_IX];
 2432 
 2433 	/* clear irq */
 2434 	writel(AMD_BIT(UDC_EPINT_OUT_EP0), &dev->regs->ep_irqsts);
 2435 
 2436 	tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
 2437 	/* check BNA and clear if set */
 2438 	if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
 2439 		VDBG(dev, "ep0: BNA set\n");
 2440 		writel(AMD_BIT(UDC_EPSTS_BNA),
 2441 			&dev->ep[UDC_EP0OUT_IX].regs->sts);
 2442 		ep->bna_occurred = 1;
 2443 		ret_val = IRQ_HANDLED;
 2444 		goto finished;
 2445 	}
 2446 
 2447 	/* type of data: SETUP or DATA 0 bytes */
 2448 	tmp = AMD_GETBITS(tmp, UDC_EPSTS_OUT);
 2449 	VDBG(dev, "data_typ = %x\n", tmp);
 2450 
 2451 	/* setup data */
 2452 	if (tmp == UDC_EPSTS_OUT_SETUP) {
 2453 		ret_val = IRQ_HANDLED;
 2454 
 2455 		ep->dev->stall_ep0in = 0;
 2456 		dev->waiting_zlp_ack_ep0in = 0;
 2457 
 2458 		/* set NAK for EP0_IN */
 2459 		tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
 2460 		tmp |= AMD_BIT(UDC_EPCTL_SNAK);
 2461 		writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
 2462 		dev->ep[UDC_EP0IN_IX].naking = 1;
 2463 		/* get setup data */
 2464 		if (use_dma) {
 2465 
 2466 			/* clear OUT bits in ep status */
 2467 			writel(UDC_EPSTS_OUT_CLEAR,
 2468 				&dev->ep[UDC_EP0OUT_IX].regs->sts);
 2469 
 2470 			setup_data.data[0] =
 2471 				dev->ep[UDC_EP0OUT_IX].td_stp->data12;
 2472 			setup_data.data[1] =
 2473 				dev->ep[UDC_EP0OUT_IX].td_stp->data34;
 2474 			/* set HOST READY */
 2475 			dev->ep[UDC_EP0OUT_IX].td_stp->status =
 2476 					UDC_DMA_STP_STS_BS_HOST_READY;
 2477 		} else {
 2478 			/* read fifo */
 2479 			udc_rxfifo_read_dwords(dev, setup_data.data, 2);
 2480 		}
 2481 
 2482 		/* determine direction of control data */
 2483 		if ((setup_data.request.bRequestType & USB_DIR_IN) != 0) {
 2484 			dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
 2485 			/* enable RDE */
 2486 			udc_ep0_set_rde(dev);
 2487 			set = 0;
 2488 		} else {
 2489 			dev->gadget.ep0 = &dev->ep[UDC_EP0OUT_IX].ep;
 2490 			/*
 2491 			 * implant BNA dummy descriptor to allow RXFIFO opening
 2492 			 * by RDE
 2493 			 */
 2494 			if (ep->bna_dummy_req) {
 2495 				/* write desc pointer */
 2496 				writel(ep->bna_dummy_req->td_phys,
 2497 					&dev->ep[UDC_EP0OUT_IX].regs->desptr);
 2498 				ep->bna_occurred = 0;
 2499 			}
 2500 
 2501 			set = 1;
 2502 			dev->ep[UDC_EP0OUT_IX].naking = 1;
 2503 			/*
 2504 			 * setup timer for enabling RDE (to not enable
 2505 			 * RXFIFO DMA for data to early)
 2506 			 */
 2507 			set_rde = 1;
 2508 			if (!timer_pending(&udc_timer)) {
 2509 				udc_timer.expires = jiffies +
 2510 							HZ/UDC_RDE_TIMER_DIV;
 2511 				if (!stop_timer)
 2512 					add_timer(&udc_timer);
 2513 			}
 2514 		}
 2515 
 2516 		/*
 2517 		 * mass storage reset must be processed here because
 2518 		 * next packet may be a CLEAR_FEATURE HALT which would not
 2519 		 * clear the stall bit when no STALL handshake was received
 2520 		 * before (autostall can cause this)
 2521 		 */
 2522 		if (setup_data.data[0] == UDC_MSCRES_DWORD0
 2523 				&& setup_data.data[1] == UDC_MSCRES_DWORD1) {
 2524 			DBG(dev, "MSC Reset\n");
 2525 			/*
 2526 			 * clear stall bits
 2527 			 * only one IN and OUT endpoints are handled
 2528 			 */
 2529 			ep_tmp = &udc->ep[UDC_EPIN_IX];
 2530 			udc_set_halt(&ep_tmp->ep, 0);
 2531 			ep_tmp = &udc->ep[UDC_EPOUT_IX];
 2532 			udc_set_halt(&ep_tmp->ep, 0);
 2533 		}
 2534 
 2535 		/* call gadget with setup data received */
 2536 		spin_unlock(&dev->lock);
 2537 		setup_supported = dev->driver->setup(&dev->gadget,
 2538 						&setup_data.request);
 2539 		spin_lock(&dev->lock);
 2540 
 2541 		tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
 2542 		/* ep0 in returns data (not zlp) on IN phase */
 2543 		if (setup_supported >= 0 && setup_supported <
 2544 				UDC_EP0IN_MAXPACKET) {
 2545 			/* clear NAK by writing CNAK in EP0_IN */
 2546 			tmp |= AMD_BIT(UDC_EPCTL_CNAK);
 2547 			writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
 2548 			dev->ep[UDC_EP0IN_IX].naking = 0;
 2549 			UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
 2550 
 2551 		/* if unsupported request then stall */
 2552 		} else if (setup_supported < 0) {
 2553 			tmp |= AMD_BIT(UDC_EPCTL_S);
 2554 			writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
 2555 		} else
 2556 			dev->waiting_zlp_ack_ep0in = 1;
 2557 
 2558 
 2559 		/* clear NAK by writing CNAK in EP0_OUT */
 2560 		if (!set) {
 2561 			tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
 2562 			tmp |= AMD_BIT(UDC_EPCTL_CNAK);
 2563 			writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
 2564 			dev->ep[UDC_EP0OUT_IX].naking = 0;
 2565 			UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
 2566 		}
 2567 
 2568 		if (!use_dma) {
 2569 			/* clear OUT bits in ep status */
 2570 			writel(UDC_EPSTS_OUT_CLEAR,
 2571 				&dev->ep[UDC_EP0OUT_IX].regs->sts);
 2572 		}
 2573 
 2574 	/* data packet 0 bytes */
 2575 	} else if (tmp == UDC_EPSTS_OUT_DATA) {
 2576 		/* clear OUT bits in ep status */
 2577 		writel(UDC_EPSTS_OUT_CLEAR, &dev->ep[UDC_EP0OUT_IX].regs->sts);
 2578 
 2579 		/* get setup data: only 0 packet */
 2580 		if (use_dma) {
 2581 			/* no req if 0 packet, just reactivate */
 2582 			if (list_empty(&dev->ep[UDC_EP0OUT_IX].queue)) {
 2583 				VDBG(dev, "ZLP\n");
 2584 
 2585 				/* set HOST READY */
 2586 				dev->ep[UDC_EP0OUT_IX].td->status =
 2587 					AMD_ADDBITS(
 2588 					dev->ep[UDC_EP0OUT_IX].td->status,
 2589 					UDC_DMA_OUT_STS_BS_HOST_READY,
 2590 					UDC_DMA_OUT_STS_BS);
 2591 				/* enable RDE */
 2592 				udc_ep0_set_rde(dev);
 2593 				ret_val = IRQ_HANDLED;
 2594 
 2595 			} else {
 2596 				/* control write */
 2597 				ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
 2598 				/* re-program desc. pointer for possible ZLPs */
 2599 				writel(dev->ep[UDC_EP0OUT_IX].td_phys,
 2600 					&dev->ep[UDC_EP0OUT_IX].regs->desptr);
 2601 				/* enable RDE */
 2602 				udc_ep0_set_rde(dev);
 2603 			}
 2604 		} else {
 2605 
 2606 			/* received number bytes */
 2607 			count = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
 2608 			count = AMD_GETBITS(count, UDC_EPSTS_RX_PKT_SIZE);
 2609 			/* out data for fifo mode not working */
 2610 			count = 0;
 2611 
 2612 			/* 0 packet or real data ? */
 2613 			if (count != 0) {
 2614 				ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
 2615 			} else {
 2616 				/* dummy read confirm */
 2617 				readl(&dev->ep[UDC_EP0OUT_IX].regs->confirm);
 2618 				ret_val = IRQ_HANDLED;
 2619 			}
 2620 		}
 2621 	}
 2622 
 2623 	/* check pending CNAKS */
 2624 	if (cnak_pending) {
 2625 		/* CNAk processing when rxfifo empty only */
 2626 		if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
 2627 			udc_process_cnak_queue(dev);
 2628 	}
 2629 
 2630 finished:
 2631 	return ret_val;
 2632 }
 2633 
 2634 /* Interrupt handler for Control IN traffic */
 2635 static irqreturn_t udc_control_in_isr(struct udc *dev)
 2636 {
 2637 	irqreturn_t ret_val = IRQ_NONE;
 2638 	u32 tmp;
 2639 	struct udc_ep *ep;
 2640 	struct udc_request *req;
 2641 	unsigned len;
 2642 
 2643 	ep = &dev->ep[UDC_EP0IN_IX];
 2644 
 2645 	/* clear irq */
 2646 	writel(AMD_BIT(UDC_EPINT_IN_EP0), &dev->regs->ep_irqsts);
 2647 
 2648 	tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->sts);
 2649 	/* DMA completion */
 2650 	if (tmp & AMD_BIT(UDC_EPSTS_TDC)) {
 2651 		VDBG(dev, "isr: TDC clear\n");
 2652 		ret_val = IRQ_HANDLED;
 2653 
 2654 		/* clear TDC bit */
 2655 		writel(AMD_BIT(UDC_EPSTS_TDC),
 2656 				&dev->ep[UDC_EP0IN_IX].regs->sts);
 2657 
 2658 	/* status reg has IN bit set ? */
 2659 	} else if (tmp & AMD_BIT(UDC_EPSTS_IN)) {
 2660 		ret_val = IRQ_HANDLED;
 2661 
 2662 		if (ep->dma) {
 2663 			/* clear IN bit */
 2664 			writel(AMD_BIT(UDC_EPSTS_IN),
 2665 				&dev->ep[UDC_EP0IN_IX].regs->sts);
 2666 		}
 2667 		if (dev->stall_ep0in) {
 2668 			DBG(dev, "stall ep0in\n");
 2669 			/* halt ep0in */
 2670 			tmp = readl(&ep->regs->ctl);
 2671 			tmp |= AMD_BIT(UDC_EPCTL_S);
 2672 			writel(tmp, &ep->regs->ctl);
 2673 		} else {
 2674 			if (!list_empty(&ep->queue)) {
 2675 				/* next request */
 2676 				req = list_entry(ep->queue.next,
 2677 						struct udc_request, queue);
 2678 
 2679 				if (ep->dma) {
 2680 					/* write desc pointer */
 2681 					writel(req->td_phys, &ep->regs->desptr);
 2682 					/* set HOST READY */
 2683 					req->td_data->status =
 2684 						AMD_ADDBITS(
 2685 						req->td_data->status,
 2686 						UDC_DMA_STP_STS_BS_HOST_READY,
 2687 						UDC_DMA_STP_STS_BS);
 2688 
 2689 					/* set poll demand bit */
 2690 					tmp =
 2691 					readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
 2692 					tmp |= AMD_BIT(UDC_EPCTL_P);
 2693 					writel(tmp,
 2694 					&dev->ep[UDC_EP0IN_IX].regs->ctl);
 2695 
 2696 					/* all bytes will be transferred */
 2697 					req->req.actual = req->req.length;
 2698 
 2699 					/* complete req */
 2700 					complete_req(ep, req, 0);
 2701 
 2702 				} else {
 2703 					/* write fifo */
 2704 					udc_txfifo_write(ep, &req->req);
 2705 
 2706 					/* lengh bytes transferred */
 2707 					len = req->req.length - req->req.actual;
 2708 					if (len > ep->ep.maxpacket)
 2709 						len = ep->ep.maxpacket;
 2710 
 2711 					req->req.actual += len;
 2712 					if (req->req.actual == req->req.length
 2713 						|| (len != ep->ep.maxpacket)) {
 2714 						/* complete req */
 2715 						complete_req(ep, req, 0);
 2716 					}
 2717 				}
 2718 
 2719 			}
 2720 		}
 2721 		ep->halted = 0;
 2722 		dev->stall_ep0in = 0;
 2723 		if (!ep->dma) {
 2724 			/* clear IN bit */
 2725 			writel(AMD_BIT(UDC_EPSTS_IN),
 2726 				&dev->ep[UDC_EP0IN_IX].regs->sts);
 2727 		}
 2728 	}
 2729 
 2730 	return ret_val;
 2731 }
 2732 
 2733 
 2734 /* Interrupt handler for global device events */
 2735 static irqreturn_t udc_dev_isr(struct udc *dev, u32 dev_irq)
 2736 __releases(dev->lock)
 2737 __acquires(dev->lock)
 2738 {
 2739 	irqreturn_t ret_val = IRQ_NONE;
 2740 	u32 tmp;
 2741 	u32 cfg;
 2742 	struct udc_ep *ep;
 2743 	u16 i;
 2744 	u8 udc_csr_epix;
 2745 
 2746 	/* SET_CONFIG irq ? */
 2747 	if (dev_irq & AMD_BIT(UDC_DEVINT_SC)) {
 2748 		ret_val = IRQ_HANDLED;
 2749 
 2750 		/* read config value */
 2751 		tmp = readl(&dev->regs->sts);
 2752 		cfg = AMD_GETBITS(tmp, UDC_DEVSTS_CFG);
 2753 		DBG(dev, "SET_CONFIG interrupt: config=%d\n", cfg);
 2754 		dev->cur_config = cfg;
 2755 		dev->set_cfg_not_acked = 1;
 2756 
 2757 		/* make usb request for gadget driver */
 2758 		memset(&setup_data, 0 , sizeof(union udc_setup_data));
 2759 		setup_data.request.bRequest = USB_REQ_SET_CONFIGURATION;
 2760 		setup_data.request.wValue = cpu_to_le16(dev->cur_config);
 2761 
 2762 		/* programm the NE registers */
 2763 		for (i = 0; i < UDC_EP_NUM; i++) {
 2764 			ep = &dev->ep[i];
 2765 			if (ep->in) {
 2766 
 2767 				/* ep ix in UDC CSR register space */
 2768 				udc_csr_epix = ep->num;
 2769 
 2770 
 2771 			/* OUT ep */
 2772 			} else {
 2773 				/* ep ix in UDC CSR register space */
 2774 				udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
 2775 			}
 2776 
 2777 			tmp = readl(&dev->csr->ne[udc_csr_epix]);
 2778 			/* ep cfg */
 2779 			tmp = AMD_ADDBITS(tmp, ep->dev->cur_config,
 2780 						UDC_CSR_NE_CFG);
 2781 			/* write reg */
 2782 			writel(tmp, &dev->csr->ne[udc_csr_epix]);
 2783 
 2784 			/* clear stall bits */
 2785 			ep->halted = 0;
 2786 			tmp = readl(&ep->regs->ctl);
 2787 			tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
 2788 			writel(tmp, &ep->regs->ctl);
 2789 		}
 2790 		/* call gadget zero with setup data received */
 2791 		spin_unlock(&dev->lock);
 2792 		tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
 2793 		spin_lock(&dev->lock);
 2794 
 2795 	} /* SET_INTERFACE ? */
 2796 	if (dev_irq & AMD_BIT(UDC_DEVINT_SI)) {
 2797 		ret_val = IRQ_HANDLED;
 2798 
 2799 		dev->set_cfg_not_acked = 1;
 2800 		/* read interface and alt setting values */
 2801 		tmp = readl(&dev->regs->sts);
 2802 		dev->cur_alt = AMD_GETBITS(tmp, UDC_DEVSTS_ALT);
 2803 		dev->cur_intf = AMD_GETBITS(tmp, UDC_DEVSTS_INTF);
 2804 
 2805 		/* make usb request for gadget driver */
 2806 		memset(&setup_data, 0 , sizeof(union udc_setup_data));
 2807 		setup_data.request.bRequest = USB_REQ_SET_INTERFACE;
 2808 		setup_data.request.bRequestType = USB_RECIP_INTERFACE;
 2809 		setup_data.request.wValue = cpu_to_le16(dev->cur_alt);
 2810 		setup_data.request.wIndex = cpu_to_le16(dev->cur_intf);
 2811 
 2812 		DBG(dev, "SET_INTERFACE interrupt: alt=%d intf=%d\n",
 2813 				dev->cur_alt, dev->cur_intf);
 2814 
 2815 		/* programm the NE registers */
 2816 		for (i = 0; i < UDC_EP_NUM; i++) {
 2817 			ep = &dev->ep[i];
 2818 			if (ep->in) {
 2819 
 2820 				/* ep ix in UDC CSR register space */
 2821 				udc_csr_epix = ep->num;
 2822 
 2823 
 2824 			/* OUT ep */
 2825 			} else {
 2826 				/* ep ix in UDC CSR register space */
 2827 				udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
 2828 			}
 2829 
 2830 			/* UDC CSR reg */
 2831 			/* set ep values */
 2832 			tmp = readl(&dev->csr->ne[udc_csr_epix]);
 2833 			/* ep interface */
 2834 			tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf,
 2835 						UDC_CSR_NE_INTF);
 2836 			/* tmp = AMD_ADDBITS(tmp, 2, UDC_CSR_NE_INTF); */
 2837 			/* ep alt */
 2838 			tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt,
 2839 						UDC_CSR_NE_ALT);
 2840 			/* write reg */
 2841 			writel(tmp, &dev->csr->ne[udc_csr_epix]);
 2842 
 2843 			/* clear stall bits */
 2844 			ep->halted = 0;
 2845 			tmp = readl(&ep->regs->ctl);
 2846 			tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
 2847 			writel(tmp, &ep->regs->ctl);
 2848 		}
 2849 
 2850 		/* call gadget zero with setup data received */
 2851 		spin_unlock(&dev->lock);
 2852 		tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
 2853 		spin_lock(&dev->lock);
 2854 
 2855 	} /* USB reset */
 2856 	if (dev_irq & AMD_BIT(UDC_DEVINT_UR)) {
 2857 		DBG(dev, "USB Reset interrupt\n");
 2858 		ret_val = IRQ_HANDLED;
 2859 
 2860 		/* allow soft reset when suspend occurs */
 2861 		soft_reset_occured = 0;
 2862 
 2863 		dev->waiting_zlp_ack_ep0in = 0;
 2864 		dev->set_cfg_not_acked = 0;
 2865 
 2866 		/* mask not needed interrupts */
 2867 		udc_mask_unused_interrupts(dev);
 2868 
 2869 		/* call gadget to resume and reset configs etc. */
 2870 		spin_unlock(&dev->lock);
 2871 		if (dev->sys_suspended && dev->driver->resume) {
 2872 			dev->driver->resume(&dev->gadget);
 2873 			dev->sys_suspended = 0;
 2874 		}
 2875 		usb_gadget_udc_reset(&dev->gadget, dev->driver);
 2876 		spin_lock(&dev->lock);
 2877 
 2878 		/* disable ep0 to empty req queue */
 2879 		empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
 2880 		ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
 2881 
 2882 		/* soft reset when rxfifo not empty */
 2883 		tmp = readl(&dev->regs->sts);
 2884 		if (!(tmp & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
 2885 				&& !soft_reset_after_usbreset_occured) {
 2886 			udc_soft_reset(dev);
 2887 			soft_reset_after_usbreset_occured++;
 2888 		}
 2889 
 2890 		/*
 2891 		 * DMA reset to kill potential old DMA hw hang,
 2892 		 * POLL bit is already reset by ep_init() through
 2893 		 * disconnect()
 2894 		 */
 2895 		DBG(dev, "DMA machine reset\n");
 2896 		tmp = readl(&dev->regs->cfg);
 2897 		writel(tmp | AMD_BIT(UDC_DEVCFG_DMARST), &dev->regs->cfg);
 2898 		writel(tmp, &dev->regs->cfg);
 2899 
 2900 		/* put into initial config */
 2901 		udc_basic_init(dev);
 2902 
 2903 		/* enable device setup interrupts */
 2904 		udc_enable_dev_setup_interrupts(dev);
 2905 
 2906 		/* enable suspend interrupt */
 2907 		tmp = readl(&dev->regs->irqmsk);
 2908 		tmp &= AMD_UNMASK_BIT(UDC_DEVINT_US);
 2909 		writel(tmp, &dev->regs->irqmsk);
 2910 
 2911 	} /* USB suspend */
 2912 	if (dev_irq & AMD_BIT(UDC_DEVINT_US)) {
 2913 		DBG(dev, "USB Suspend interrupt\n");
 2914 		ret_val = IRQ_HANDLED;
 2915 		if (dev->driver->suspend) {
 2916 			spin_unlock(&dev->lock);
 2917 			dev->sys_suspended = 1;
 2918 			dev->driver->suspend(&dev->gadget);
 2919 			spin_lock(&dev->lock);
 2920 		}
 2921 	} /* new speed ? */
 2922 	if (dev_irq & AMD_BIT(UDC_DEVINT_ENUM)) {
 2923 		DBG(dev, "ENUM interrupt\n");
 2924 		ret_val = IRQ_HANDLED;
 2925 		soft_reset_after_usbreset_occured = 0;
 2926 
 2927 		/* disable ep0 to empty req queue */
 2928 		empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
 2929 		ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
 2930 
 2931 		/* link up all endpoints */
 2932 		udc_setup_endpoints(dev);
 2933 		dev_info(&dev->pdev->dev, "Connect: %s\n",
 2934 			 usb_speed_string(dev->gadget.speed));
 2935 
 2936 		/* init ep 0 */
 2937 		activate_control_endpoints(dev);
 2938 
 2939 		/* enable ep0 interrupts */
 2940 		udc_enable_ep0_interrupts(dev);
 2941 	}
 2942 	/* session valid change interrupt */
 2943 	if (dev_irq & AMD_BIT(UDC_DEVINT_SVC)) {
 2944 		DBG(dev, "USB SVC interrupt\n");
 2945 		ret_val = IRQ_HANDLED;
 2946 
 2947 		/* check that session is not valid to detect disconnect */
 2948 		tmp = readl(&dev->regs->sts);
 2949 		if (!(tmp & AMD_BIT(UDC_DEVSTS_SESSVLD))) {
 2950 			/* disable suspend interrupt */
 2951 			tmp = readl(&dev->regs->irqmsk);
 2952 			tmp |= AMD_BIT(UDC_DEVINT_US);
 2953 			writel(tmp, &dev->regs->irqmsk);
 2954 			DBG(dev, "USB Disconnect (session valid low)\n");
 2955 			/* cleanup on disconnect */
 2956 			usb_disconnect(udc);
 2957 		}
 2958 
 2959 	}
 2960 
 2961 	return ret_val;
 2962 }
 2963 
 2964 /* Interrupt Service Routine, see Linux Kernel Doc for parameters */
 2965 static irqreturn_t udc_irq(int irq, void *pdev)
 2966 {
 2967 	struct udc *dev = pdev;
 2968 	u32 reg;
 2969 	u16 i;
 2970 	u32 ep_irq;
 2971 	irqreturn_t ret_val = IRQ_NONE;
 2972 
 2973 	spin_lock(&dev->lock);
 2974 
 2975 	/* check for ep irq */
 2976 	reg = readl(&dev->regs->ep_irqsts);
 2977 	if (reg) {
 2978 		if (reg & AMD_BIT(UDC_EPINT_OUT_EP0))
 2979 			ret_val |= udc_control_out_isr(dev);
 2980 		if (reg & AMD_BIT(UDC_EPINT_IN_EP0))
 2981 			ret_val |= udc_control_in_isr(dev);
 2982 
 2983 		/*
 2984 		 * data endpoint
 2985 		 * iterate ep's
 2986 		 */
 2987 		for (i = 1; i < UDC_EP_NUM; i++) {
 2988 			ep_irq = 1 << i;
 2989 			if (!(reg & ep_irq) || i == UDC_EPINT_OUT_EP0)
 2990 				continue;
 2991 
 2992 			/* clear irq status */
 2993 			writel(ep_irq, &dev->regs->ep_irqsts);
 2994 
 2995 			/* irq for out ep ? */
 2996 			if (i > UDC_EPIN_NUM)
 2997 				ret_val |= udc_data_out_isr(dev, i);
 2998 			else
 2999 				ret_val |= udc_data_in_isr(dev, i);
 3000 		}
 3001 
 3002 	}
 3003 
 3004 
 3005 	/* check for dev irq */
 3006 	reg = readl(&dev->regs->irqsts);
 3007 	if (reg) {
 3008 		/* clear irq */
 3009 		writel(reg, &dev->regs->irqsts);
 3010 		ret_val |= udc_dev_isr(dev, reg);
 3011 	}
 3012 
 3013 
 3014 	spin_unlock(&dev->lock);
 3015 	return ret_val;
 3016 }
 3017 
 3018 /* Tears down device */
 3019 static void gadget_release(struct device *pdev)
 3020 {
 3021 	struct amd5536udc *dev = dev_get_drvdata(pdev);
 3022 	kfree(dev);
 3023 }
 3024 
 3025 /* Cleanup on device remove */
 3026 static void udc_remove(struct udc *dev)
 3027 {
 3028 	/* remove timer */
 3029 	stop_timer++;
 3030 	if (timer_pending(&udc_timer))
 3031 		wait_for_completion(&on_exit);
 3032 	if (udc_timer.data)
 3033 		del_timer_sync(&udc_timer);
 3034 	/* remove pollstall timer */
 3035 	stop_pollstall_timer++;
 3036 	if (timer_pending(&udc_pollstall_timer))
 3037 		wait_for_completion(&on_pollstall_exit);
 3038 	if (udc_pollstall_timer.data)
 3039 		del_timer_sync(&udc_pollstall_timer);
 3040 	udc = NULL;
 3041 }
 3042 
 3043 /* Reset all pci context */
 3044 static void udc_pci_remove(struct pci_dev *pdev)
 3045 {
 3046 	struct udc		*dev;
 3047 
 3048 	dev = pci_get_drvdata(pdev);
 3049 
 3050 	usb_del_gadget_udc(&udc->gadget);
 3051 	/* gadget driver must not be registered */
 3052 	BUG_ON(dev->driver != NULL);
 3053 
 3054 	/* dma pool cleanup */
 3055 	if (dev->data_requests)
 3056 		pci_pool_destroy(dev->data_requests);
 3057 
 3058 	if (dev->stp_requests) {
 3059 		/* cleanup DMA desc's for ep0in */
 3060 		pci_pool_free(dev->stp_requests,
 3061 			dev->ep[UDC_EP0OUT_IX].td_stp,
 3062 			dev->ep[UDC_EP0OUT_IX].td_stp_dma);
 3063 		pci_pool_free(dev->stp_requests,
 3064 			dev->ep[UDC_EP0OUT_IX].td,
 3065 			dev->ep[UDC_EP0OUT_IX].td_phys);
 3066 
 3067 		pci_pool_destroy(dev->stp_requests);
 3068 	}
 3069 
 3070 	/* reset controller */
 3071 	writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
 3072 	if (dev->irq_registered)
 3073 		free_irq(pdev->irq, dev);
 3074 	if (dev->regs)
 3075 		iounmap(dev->regs);
 3076 	if (dev->mem_region)
 3077 		release_mem_region(pci_resource_start(pdev, 0),
 3078 				pci_resource_len(pdev, 0));
 3079 	if (dev->active)
 3080 		pci_disable_device(pdev);
 3081 
 3082 	udc_remove(dev);
 3083 }
 3084 
 3085 /* create dma pools on init */
 3086 static int init_dma_pools(struct udc *dev)
 3087 {
 3088 	struct udc_stp_dma	*td_stp;
 3089 	struct udc_data_dma	*td_data;
 3090 	int retval;
 3091 
 3092 	/* consistent DMA mode setting ? */
 3093 	if (use_dma_ppb) {
 3094 		use_dma_bufferfill_mode = 0;
 3095 	} else {
 3096 		use_dma_ppb_du = 0;
 3097 		use_dma_bufferfill_mode = 1;
 3098 	}
 3099 
 3100 	/* DMA setup */
 3101 	dev->data_requests = dma_pool_create("data_requests", NULL,
 3102 		sizeof(struct udc_data_dma), 0, 0);
 3103 	if (!dev->data_requests) {
 3104 		DBG(dev, "can't get request data pool\n");
 3105 		retval = -ENOMEM;
 3106 		goto finished;
 3107 	}
 3108 
 3109 	/* EP0 in dma regs = dev control regs */
 3110 	dev->ep[UDC_EP0IN_IX].dma = &dev->regs->ctl;
 3111 
 3112 	/* dma desc for setup data */
 3113 	dev->stp_requests = dma_pool_create("setup requests", NULL,
 3114 		sizeof(struct udc_stp_dma), 0, 0);
 3115 	if (!dev->stp_requests) {
 3116 		DBG(dev, "can't get stp request pool\n");
 3117 		retval = -ENOMEM;
 3118 		goto finished;
 3119 	}
 3120 	/* setup */
 3121 	td_stp = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
 3122 				&dev->ep[UDC_EP0OUT_IX].td_stp_dma);
 3123 	if (td_stp == NULL) {
 3124 		retval = -ENOMEM;
 3125 		goto finished;
 3126 	}
 3127 	dev->ep[UDC_EP0OUT_IX].td_stp = td_stp;
 3128 
 3129 	/* data: 0 packets !? */
 3130 	td_data = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
 3131 				&dev->ep[UDC_EP0OUT_IX].td_phys);
 3132 	if (td_data == NULL) {
 3133 		retval = -ENOMEM;
 3134 		goto finished;
 3135 	}
 3136 	dev->ep[UDC_EP0OUT_IX].td = td_data;
 3137 	return 0;
 3138 
 3139 finished:
 3140 	return retval;
 3141 }
 3142 
 3143 /* Called by pci bus driver to init pci context */
 3144 static int udc_pci_probe(
 3145 	struct pci_dev *pdev,
 3146 	const struct pci_device_id *id
 3147 )
 3148 {
 3149 	struct udc		*dev;
 3150 	unsigned long		resource;
 3151 	unsigned long		len;
 3152 	int			retval = 0;
 3153 
 3154 	/* one udc only */
 3155 	if (udc) {
 3156 		dev_dbg(&pdev->dev, "already probed\n");
 3157 		return -EBUSY;
 3158 	}
 3159 
 3160 	/* init */
 3161 	dev = kzalloc(sizeof(struct udc), GFP_KERNEL);
 3162 	if (!dev) {
 3163 		retval = -ENOMEM;
 3164 		goto finished;
 3165 	}
 3166 
 3167 	/* pci setup */
 3168 	if (pci_enable_device(pdev) < 0) {
 3169 		kfree(dev);
 3170 		dev = NULL;
 3171 		retval = -ENODEV;
 3172 		goto finished;
 3173 	}
 3174 	dev->active = 1;
 3175 
 3176 	/* PCI resource allocation */
 3177 	resource = pci_resource_start(pdev, 0);
 3178 	len = pci_resource_len(pdev, 0);
 3179 
 3180 	if (!request_mem_region(resource, len, name)) {
 3181 		dev_dbg(&pdev->dev, "pci device used already\n");
 3182 		kfree(dev);
 3183 		dev = NULL;
 3184 		retval = -EBUSY;
 3185 		goto finished;
 3186 	}
 3187 	dev->mem_region = 1;
 3188 
 3189 	dev->virt_addr = ioremap_nocache(resource, len);
 3190 	if (dev->virt_addr == NULL) {
 3191 		dev_dbg(&pdev->dev, "start address cannot be mapped\n");
 3192 		kfree(dev);
 3193 		dev = NULL;
 3194 		retval = -EFAULT;
 3195 		goto finished;
 3196 	}
 3197 
 3198 	if (!pdev->irq) {
 3199 		dev_err(&pdev->dev, "irq not set\n");
 3200 		kfree(dev);
 3201 		dev = NULL;
 3202 		retval = -ENODEV;
 3203 		goto finished;
 3204 	}
 3205 
 3206 	spin_lock_init(&dev->lock);
 3207 	/* udc csr registers base */
 3208 	dev->csr = dev->virt_addr + UDC_CSR_ADDR;
 3209 	/* dev registers base */
 3210 	dev->regs = dev->virt_addr + UDC_DEVCFG_ADDR;
 3211 	/* ep registers base */
 3212 	dev->ep_regs = dev->virt_addr + UDC_EPREGS_ADDR;
 3213 	/* fifo's base */
 3214 	dev->rxfifo = (u32 __iomem *)(dev->virt_addr + UDC_RXFIFO_ADDR);
 3215 	dev->txfifo = (u32 __iomem *)(dev->virt_addr + UDC_TXFIFO_ADDR);
 3216 
 3217 	if (request_irq(pdev->irq, udc_irq, IRQF_SHARED, name, dev) != 0) {
 3218 		dev_dbg(&pdev->dev, "request_irq(%d) fail\n", pdev->irq);
 3219 		kfree(dev);
 3220 		dev = NULL;
 3221 		retval = -EBUSY;
 3222 		goto finished;
 3223 	}
 3224 	dev->irq_registered = 1;
 3225 
 3226 	pci_set_drvdata(pdev, dev);
 3227 
 3228 	/* chip revision for Hs AMD5536 */
 3229 	dev->chiprev = pdev->revision;
 3230 
 3231 	pci_set_master(pdev);
 3232 	pci_try_set_mwi(pdev);
 3233 
 3234 	/* init dma pools */
 3235 	if (use_dma) {
 3236 		retval = init_dma_pools(dev);
 3237 		if (retval != 0)
 3238 			goto finished;
 3239 	}
 3240 
 3241 	dev->phys_addr = resource;
 3242 	dev->irq = pdev->irq;
 3243 	dev->pdev = pdev;
 3244 
 3245 	/* general probing */
 3246 	if (udc_probe(dev) == 0)
 3247 		return 0;
 3248 
 3249 finished:
 3250 	if (dev)
 3251 		udc_pci_remove(pdev);
 3252 	return retval;
 3253 }
 3254 
 3255 /* general probe */
 3256 static int udc_probe(struct udc *dev)
 3257 {
 3258 	char		tmp[128];
 3259 	u32		reg;
 3260 	int		retval;
 3261 
 3262 	/* mark timer as not initialized */
 3263 	udc_timer.data = 0;
 3264 	udc_pollstall_timer.data = 0;
 3265 
 3266 	/* device struct setup */
 3267 	dev->gadget.ops = &udc_ops;
 3268 
 3269 	dev_set_name(&dev->gadget.dev, "gadget");
 3270 	dev->gadget.name = name;
 3271 	dev->gadget.max_speed = USB_SPEED_HIGH;
 3272 
 3273 	/* init registers, interrupts, ... */
 3274 	startup_registers(dev);
 3275 
 3276 	dev_info(&dev->pdev->dev, "%s\n", mod_desc);
 3277 
 3278 	snprintf(tmp, sizeof tmp, "%d", dev->irq);
 3279 	dev_info(&dev->pdev->dev,
 3280 		"irq %s, pci mem %08lx, chip rev %02x(Geode5536 %s)\n",
 3281 		tmp, dev->phys_addr, dev->chiprev,
 3282 		(dev->chiprev == UDC_HSA0_REV) ? "A0" : "B1");
 3283 	strcpy(tmp, UDC_DRIVER_VERSION_STRING);
 3284 	if (dev->chiprev == UDC_HSA0_REV) {
 3285 		dev_err(&dev->pdev->dev, "chip revision is A0; too old\n");
 3286 		retval = -ENODEV;
 3287 		goto finished;
 3288 	}
 3289 	dev_info(&dev->pdev->dev,
 3290 		"driver version: %s(for Geode5536 B1)\n", tmp);
 3291 	udc = dev;
 3292 
 3293 	retval = usb_add_gadget_udc_release(&udc->pdev->dev, &dev->gadget,
 3294 			gadget_release);
 3295 	if (retval)
 3296 		goto finished;
 3297 
 3298 	/* timer init */
 3299 	init_timer(&udc_timer);
 3300 	udc_timer.function = udc_timer_function;
 3301 	udc_timer.data = 1;
 3302 	/* timer pollstall init */
 3303 	init_timer(&udc_pollstall_timer);
 3304 	udc_pollstall_timer.function = udc_pollstall_timer_function;
 3305 	udc_pollstall_timer.data = 1;
 3306 
 3307 	/* set SD */
 3308 	reg = readl(&dev->regs->ctl);
 3309 	reg |= AMD_BIT(UDC_DEVCTL_SD);
 3310 	writel(reg, &dev->regs->ctl);
 3311 
 3312 	/* print dev register info */
 3313 	print_regs(dev);
 3314 
 3315 	return 0;
 3316 
 3317 finished:
 3318 	return retval;
 3319 }
 3320 
 3321 /* Initiates a remote wakeup */
 3322 static int udc_remote_wakeup(struct udc *dev)
 3323 {
 3324 	unsigned long flags;
 3325 	u32 tmp;
 3326 
 3327 	DBG(dev, "UDC initiates remote wakeup\n");
 3328 
 3329 	spin_lock_irqsave(&dev->lock, flags);
 3330 
 3331 	tmp = readl(&dev->regs->ctl);
 3332 	tmp |= AMD_BIT(UDC_DEVCTL_RES);
 3333 	writel(tmp, &dev->regs->ctl);
 3334 	tmp &= AMD_CLEAR_BIT(UDC_DEVCTL_RES);
 3335 	writel(tmp, &dev->regs->ctl);
 3336 
 3337 	spin_unlock_irqrestore(&dev->lock, flags);
 3338 	return 0;
 3339 }
 3340 
 3341 /* PCI device parameters */
 3342 static const struct pci_device_id pci_id[] = {
 3343 	{
 3344 		PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x2096),
 3345 		.class =	(PCI_CLASS_SERIAL_USB << 8) | 0xfe,
 3346 		.class_mask =	0xffffffff,
 3347 	},
 3348 	{},
 3349 };
 3350 MODULE_DEVICE_TABLE(pci, pci_id);
 3351 
 3352 /* PCI functions */
 3353 static struct pci_driver udc_pci_driver = {
 3354 	.name =		(char *) name,
 3355 	.id_table =	pci_id,
 3356 	.probe =	udc_pci_probe,
 3357 	.remove =	udc_pci_remove,
 3358 };
 3359 
 3360 module_pci_driver(udc_pci_driver);
 3361 
 3362 MODULE_DESCRIPTION(UDC_MOD_DESCRIPTION);
 3363 MODULE_AUTHOR("Thomas Dahlmann");
 3364 MODULE_LICENSE("GPL");
 3365 
 3366 
 3367 
 3368 
 3369 
 3370 
 3371 /* LDV_COMMENT_BEGIN_MAIN */
 3372 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful
 3373 
 3374 /*###########################################################################*/
 3375 
 3376 /*############## Driver Environment Generator 0.2 output ####################*/
 3377 
 3378 /*###########################################################################*/
 3379 
 3380 
 3381 
 3382 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
 3383 void ldv_check_final_state(void);
 3384 
 3385 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
 3386 void ldv_check_return_value(int res);
 3387 
 3388 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
 3389 void ldv_check_return_value_probe(int res);
 3390 
 3391 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
 3392 void ldv_initialize(void);
 3393 
 3394 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
 3395 void ldv_handler_precall(void);
 3396 
 3397 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
 3398 int nondet_int(void);
 3399 
 3400 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
 3401 int LDV_IN_INTERRUPT;
 3402 
 3403 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
 3404 void ldv_main0_sequence_infinite_withcheck_stateful(void) {
 3405 
 3406 
 3407 
 3408 	/* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
 3409 	/*============================= VARIABLE DECLARATION PART   =============================*/
 3410 	/** STRUCT: struct type: usb_ep_ops, struct name: udc_ep_ops **/
 3411 	/* content: static int udc_ep_enable(struct usb_ep *usbep, const struct usb_endpoint_descriptor *desc)*/
 3412 	/* LDV_COMMENT_BEGIN_PREP */
 3413 	#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3414 	#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3415 	/* LDV_COMMENT_END_PREP */
 3416 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "udc_ep_enable" */
 3417 	struct usb_ep * var_group1;
 3418 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "udc_ep_enable" */
 3419 	const struct usb_endpoint_descriptor * var_udc_ep_enable_6_p1;
 3420 	/* LDV_COMMENT_BEGIN_PREP */
 3421 	#ifdef UDC_VERBOSE
 3422 	#endif
 3423 	/* LDV_COMMENT_END_PREP */
 3424 	/* content: static int udc_ep_disable(struct usb_ep *usbep)*/
 3425 	/* LDV_COMMENT_BEGIN_PREP */
 3426 	#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3427 	#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3428 	/* LDV_COMMENT_END_PREP */
 3429 	/* LDV_COMMENT_BEGIN_PREP */
 3430 	#ifdef UDC_VERBOSE
 3431 	#endif
 3432 	/* LDV_COMMENT_END_PREP */
 3433 	/* content: static struct usb_request * udc_alloc_request(struct usb_ep *usbep, gfp_t gfp)*/
 3434 	/* LDV_COMMENT_BEGIN_PREP */
 3435 	#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3436 	#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3437 	/* LDV_COMMENT_END_PREP */
 3438 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "udc_alloc_request" */
 3439 	gfp_t  var_udc_alloc_request_9_p1;
 3440 	/* LDV_COMMENT_BEGIN_PREP */
 3441 	#ifdef UDC_VERBOSE
 3442 	#endif
 3443 	/* LDV_COMMENT_END_PREP */
 3444 	/* content: static void udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq)*/
 3445 	/* LDV_COMMENT_BEGIN_PREP */
 3446 	#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3447 	#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3448 	/* LDV_COMMENT_END_PREP */
 3449 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "udc_free_request" */
 3450 	struct usb_request * var_group2;
 3451 	/* LDV_COMMENT_BEGIN_PREP */
 3452 	#ifdef UDC_VERBOSE
 3453 	#endif
 3454 	/* LDV_COMMENT_END_PREP */
 3455 	/* content: static int udc_queue(struct usb_ep *usbep, struct usb_request *usbreq, gfp_t gfp)*/
 3456 	/* LDV_COMMENT_BEGIN_PREP */
 3457 	#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3458 	#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3459 	#ifdef UDC_VERBOSE
 3460 	#endif
 3461 	/* LDV_COMMENT_END_PREP */
 3462 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "udc_queue" */
 3463 	gfp_t  var_udc_queue_23_p2;
 3464 	/* content: static int udc_dequeue(struct usb_ep *usbep, struct usb_request *usbreq)*/
 3465 	/* LDV_COMMENT_BEGIN_PREP */
 3466 	#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3467 	#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3468 	#ifdef UDC_VERBOSE
 3469 	#endif
 3470 	/* LDV_COMMENT_END_PREP */
 3471 	/* content: static int udc_set_halt(struct usb_ep *usbep, int halt)*/
 3472 	/* LDV_COMMENT_BEGIN_PREP */
 3473 	#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3474 	#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3475 	#ifdef UDC_VERBOSE
 3476 	#endif
 3477 	/* LDV_COMMENT_END_PREP */
 3478 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "udc_set_halt" */
 3479 	int  var_udc_set_halt_26_p1;
 3480 
 3481 	/** STRUCT: struct type: usb_gadget_ops, struct name: udc_ops **/
 3482 	/* content: static int udc_wakeup(struct usb_gadget *gadget)*/
 3483 	/* LDV_COMMENT_BEGIN_PREP */
 3484 	#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3485 	#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3486 	#ifdef UDC_VERBOSE
 3487 	#endif
 3488 	/* LDV_COMMENT_END_PREP */
 3489 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "udc_wakeup" */
 3490 	struct usb_gadget * var_group3;
 3491 	/* content: static int udc_get_frame(struct usb_gadget *gadget)*/
 3492 	/* LDV_COMMENT_BEGIN_PREP */
 3493 	#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3494 	#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3495 	#ifdef UDC_VERBOSE
 3496 	#endif
 3497 	/* LDV_COMMENT_END_PREP */
 3498 	/* content: static int amd5536_udc_start(struct usb_gadget *g, struct usb_gadget_driver *driver)*/
 3499 	/* LDV_COMMENT_BEGIN_PREP */
 3500 	#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3501 	#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3502 	#ifdef UDC_VERBOSE
 3503 	#endif
 3504 	/* LDV_COMMENT_END_PREP */
 3505 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "amd5536_udc_start" */
 3506 	struct usb_gadget_driver * var_group4;
 3507 	/* content: static int amd5536_udc_stop(struct usb_gadget *g)*/
 3508 	/* LDV_COMMENT_BEGIN_PREP */
 3509 	#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3510 	#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3511 	#ifdef UDC_VERBOSE
 3512 	#endif
 3513 	/* LDV_COMMENT_END_PREP */
 3514 
 3515 	/** STRUCT: struct type: pci_driver, struct name: udc_pci_driver **/
 3516 	/* content: static int udc_pci_probe( struct pci_dev *pdev, const struct pci_device_id *id )*/
 3517 	/* LDV_COMMENT_BEGIN_PREP */
 3518 	#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3519 	#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3520 	#ifdef UDC_VERBOSE
 3521 	#endif
 3522 	/* LDV_COMMENT_END_PREP */
 3523 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "udc_pci_probe" */
 3524 	struct pci_dev * var_group5;
 3525 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "udc_pci_probe" */
 3526 	const struct pci_device_id * var_udc_pci_probe_54_p1;
 3527 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "udc_pci_probe" */
 3528 	static int res_udc_pci_probe_54;
 3529 	/* content: static void udc_pci_remove(struct pci_dev *pdev)*/
 3530 	/* LDV_COMMENT_BEGIN_PREP */
 3531 	#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3532 	#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3533 	#ifdef UDC_VERBOSE
 3534 	#endif
 3535 	/* LDV_COMMENT_END_PREP */
 3536 
 3537 	/** CALLBACK SECTION request_irq **/
 3538 	/* content: static irqreturn_t udc_irq(int irq, void *pdev)*/
 3539 	/* LDV_COMMENT_BEGIN_PREP */
 3540 	#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3541 	#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3542 	#ifdef UDC_VERBOSE
 3543 	#endif
 3544 	/* LDV_COMMENT_END_PREP */
 3545 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "udc_irq" */
 3546 	int  var_udc_irq_49_p0;
 3547 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "udc_irq" */
 3548 	void * var_udc_irq_49_p1;
 3549 
 3550 	/** TIMER SECTION timer **/
 3551 	/* content: static void udc_timer_function(unsigned long v)*/
 3552 	/* LDV_COMMENT_BEGIN_PREP */
 3553 	#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3554 	#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3555 	#ifdef UDC_VERBOSE
 3556 	#endif
 3557 	/* LDV_COMMENT_END_PREP */
 3558 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "udc_timer_function" */
 3559 	unsigned long  var_udc_timer_function_37_p0;
 3560 	/* content: static void udc_pollstall_timer_function(unsigned long v)*/
 3561 	/* LDV_COMMENT_BEGIN_PREP */
 3562 	#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3563 	#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3564 	#ifdef UDC_VERBOSE
 3565 	#endif
 3566 	/* LDV_COMMENT_END_PREP */
 3567 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "udc_pollstall_timer_function" */
 3568 	unsigned long  var_udc_pollstall_timer_function_39_p0;
 3569 
 3570 
 3571 
 3572 
 3573 	/* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
 3574 	/* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
 3575 	/*============================= VARIABLE INITIALIZING PART  =============================*/
 3576 	LDV_IN_INTERRUPT=1;
 3577 
 3578 
 3579 
 3580 
 3581 	/* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
 3582 	/* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
 3583 	/*============================= FUNCTION CALL SECTION       =============================*/
 3584 	/* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
 3585 	ldv_initialize();
 3586 	
 3587 
 3588 	
 3589 
 3590 	int ldv_s_udc_pci_driver_pci_driver = 0;
 3591 
 3592 	
 3593 
 3594 	
 3595 
 3596 
 3597 	while(  nondet_int()
 3598 		|| !(ldv_s_udc_pci_driver_pci_driver == 0)
 3599 	) {
 3600 
 3601 		switch(nondet_int()) {
 3602 
 3603 			case 0: {
 3604 
 3605 				/** STRUCT: struct type: usb_ep_ops, struct name: udc_ep_ops **/
 3606 				
 3607 
 3608 				/* content: static int udc_ep_enable(struct usb_ep *usbep, const struct usb_endpoint_descriptor *desc)*/
 3609 				/* LDV_COMMENT_BEGIN_PREP */
 3610 				#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3611 				#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3612 				/* LDV_COMMENT_END_PREP */
 3613 				/* LDV_COMMENT_FUNCTION_CALL Function from field "enable" from driver structure with callbacks "udc_ep_ops" */
 3614 				ldv_handler_precall();
 3615 				udc_ep_enable( var_group1, var_udc_ep_enable_6_p1);
 3616 				/* LDV_COMMENT_BEGIN_PREP */
 3617 				#ifdef UDC_VERBOSE
 3618 				#endif
 3619 				/* LDV_COMMENT_END_PREP */
 3620 				
 3621 
 3622 				
 3623 
 3624 			}
 3625 
 3626 			break;
 3627 			case 1: {
 3628 
 3629 				/** STRUCT: struct type: usb_ep_ops, struct name: udc_ep_ops **/
 3630 				
 3631 
 3632 				/* content: static int udc_ep_disable(struct usb_ep *usbep)*/
 3633 				/* LDV_COMMENT_BEGIN_PREP */
 3634 				#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3635 				#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3636 				/* LDV_COMMENT_END_PREP */
 3637 				/* LDV_COMMENT_FUNCTION_CALL Function from field "disable" from driver structure with callbacks "udc_ep_ops" */
 3638 				ldv_handler_precall();
 3639 				udc_ep_disable( var_group1);
 3640 				/* LDV_COMMENT_BEGIN_PREP */
 3641 				#ifdef UDC_VERBOSE
 3642 				#endif
 3643 				/* LDV_COMMENT_END_PREP */
 3644 				
 3645 
 3646 				
 3647 
 3648 			}
 3649 
 3650 			break;
 3651 			case 2: {
 3652 
 3653 				/** STRUCT: struct type: usb_ep_ops, struct name: udc_ep_ops **/
 3654 				
 3655 
 3656 				/* content: static struct usb_request * udc_alloc_request(struct usb_ep *usbep, gfp_t gfp)*/
 3657 				/* LDV_COMMENT_BEGIN_PREP */
 3658 				#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3659 				#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3660 				/* LDV_COMMENT_END_PREP */
 3661 				/* LDV_COMMENT_FUNCTION_CALL Function from field "alloc_request" from driver structure with callbacks "udc_ep_ops" */
 3662 				ldv_handler_precall();
 3663 				udc_alloc_request( var_group1, var_udc_alloc_request_9_p1);
 3664 				/* LDV_COMMENT_BEGIN_PREP */
 3665 				#ifdef UDC_VERBOSE
 3666 				#endif
 3667 				/* LDV_COMMENT_END_PREP */
 3668 				
 3669 
 3670 				
 3671 
 3672 			}
 3673 
 3674 			break;
 3675 			case 3: {
 3676 
 3677 				/** STRUCT: struct type: usb_ep_ops, struct name: udc_ep_ops **/
 3678 				
 3679 
 3680 				/* content: static void udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq)*/
 3681 				/* LDV_COMMENT_BEGIN_PREP */
 3682 				#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3683 				#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3684 				/* LDV_COMMENT_END_PREP */
 3685 				/* LDV_COMMENT_FUNCTION_CALL Function from field "free_request" from driver structure with callbacks "udc_ep_ops" */
 3686 				ldv_handler_precall();
 3687 				udc_free_request( var_group1, var_group2);
 3688 				/* LDV_COMMENT_BEGIN_PREP */
 3689 				#ifdef UDC_VERBOSE
 3690 				#endif
 3691 				/* LDV_COMMENT_END_PREP */
 3692 				
 3693 
 3694 				
 3695 
 3696 			}
 3697 
 3698 			break;
 3699 			case 4: {
 3700 
 3701 				/** STRUCT: struct type: usb_ep_ops, struct name: udc_ep_ops **/
 3702 				
 3703 
 3704 				/* content: static int udc_queue(struct usb_ep *usbep, struct usb_request *usbreq, gfp_t gfp)*/
 3705 				/* LDV_COMMENT_BEGIN_PREP */
 3706 				#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3707 				#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3708 				#ifdef UDC_VERBOSE
 3709 				#endif
 3710 				/* LDV_COMMENT_END_PREP */
 3711 				/* LDV_COMMENT_FUNCTION_CALL Function from field "queue" from driver structure with callbacks "udc_ep_ops" */
 3712 				ldv_handler_precall();
 3713 				udc_queue( var_group1, var_group2, var_udc_queue_23_p2);
 3714 				
 3715 
 3716 				
 3717 
 3718 			}
 3719 
 3720 			break;
 3721 			case 5: {
 3722 
 3723 				/** STRUCT: struct type: usb_ep_ops, struct name: udc_ep_ops **/
 3724 				
 3725 
 3726 				/* content: static int udc_dequeue(struct usb_ep *usbep, struct usb_request *usbreq)*/
 3727 				/* LDV_COMMENT_BEGIN_PREP */
 3728 				#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3729 				#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3730 				#ifdef UDC_VERBOSE
 3731 				#endif
 3732 				/* LDV_COMMENT_END_PREP */
 3733 				/* LDV_COMMENT_FUNCTION_CALL Function from field "dequeue" from driver structure with callbacks "udc_ep_ops" */
 3734 				ldv_handler_precall();
 3735 				udc_dequeue( var_group1, var_group2);
 3736 				
 3737 
 3738 				
 3739 
 3740 			}
 3741 
 3742 			break;
 3743 			case 6: {
 3744 
 3745 				/** STRUCT: struct type: usb_ep_ops, struct name: udc_ep_ops **/
 3746 				
 3747 
 3748 				/* content: static int udc_set_halt(struct usb_ep *usbep, int halt)*/
 3749 				/* LDV_COMMENT_BEGIN_PREP */
 3750 				#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3751 				#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3752 				#ifdef UDC_VERBOSE
 3753 				#endif
 3754 				/* LDV_COMMENT_END_PREP */
 3755 				/* LDV_COMMENT_FUNCTION_CALL Function from field "set_halt" from driver structure with callbacks "udc_ep_ops" */
 3756 				ldv_handler_precall();
 3757 				udc_set_halt( var_group1, var_udc_set_halt_26_p1);
 3758 				
 3759 
 3760 				
 3761 
 3762 			}
 3763 
 3764 			break;
 3765 			case 7: {
 3766 
 3767 				/** STRUCT: struct type: usb_gadget_ops, struct name: udc_ops **/
 3768 				
 3769 
 3770 				/* content: static int udc_wakeup(struct usb_gadget *gadget)*/
 3771 				/* LDV_COMMENT_BEGIN_PREP */
 3772 				#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3773 				#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3774 				#ifdef UDC_VERBOSE
 3775 				#endif
 3776 				/* LDV_COMMENT_END_PREP */
 3777 				/* LDV_COMMENT_FUNCTION_CALL Function from field "wakeup" from driver structure with callbacks "udc_ops" */
 3778 				ldv_handler_precall();
 3779 				udc_wakeup( var_group3);
 3780 				
 3781 
 3782 				
 3783 
 3784 			}
 3785 
 3786 			break;
 3787 			case 8: {
 3788 
 3789 				/** STRUCT: struct type: usb_gadget_ops, struct name: udc_ops **/
 3790 				
 3791 
 3792 				/* content: static int udc_get_frame(struct usb_gadget *gadget)*/
 3793 				/* LDV_COMMENT_BEGIN_PREP */
 3794 				#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3795 				#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3796 				#ifdef UDC_VERBOSE
 3797 				#endif
 3798 				/* LDV_COMMENT_END_PREP */
 3799 				/* LDV_COMMENT_FUNCTION_CALL Function from field "get_frame" from driver structure with callbacks "udc_ops" */
 3800 				ldv_handler_precall();
 3801 				udc_get_frame( var_group3);
 3802 				
 3803 
 3804 				
 3805 
 3806 			}
 3807 
 3808 			break;
 3809 			case 9: {
 3810 
 3811 				/** STRUCT: struct type: usb_gadget_ops, struct name: udc_ops **/
 3812 				
 3813 
 3814 				/* content: static int amd5536_udc_start(struct usb_gadget *g, struct usb_gadget_driver *driver)*/
 3815 				/* LDV_COMMENT_BEGIN_PREP */
 3816 				#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3817 				#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3818 				#ifdef UDC_VERBOSE
 3819 				#endif
 3820 				/* LDV_COMMENT_END_PREP */
 3821 				/* LDV_COMMENT_FUNCTION_CALL Function from field "udc_start" from driver structure with callbacks "udc_ops" */
 3822 				ldv_handler_precall();
 3823 				amd5536_udc_start( var_group3, var_group4);
 3824 				
 3825 
 3826 				
 3827 
 3828 			}
 3829 
 3830 			break;
 3831 			case 10: {
 3832 
 3833 				/** STRUCT: struct type: usb_gadget_ops, struct name: udc_ops **/
 3834 				
 3835 
 3836 				/* content: static int amd5536_udc_stop(struct usb_gadget *g)*/
 3837 				/* LDV_COMMENT_BEGIN_PREP */
 3838 				#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3839 				#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3840 				#ifdef UDC_VERBOSE
 3841 				#endif
 3842 				/* LDV_COMMENT_END_PREP */
 3843 				/* LDV_COMMENT_FUNCTION_CALL Function from field "udc_stop" from driver structure with callbacks "udc_ops" */
 3844 				ldv_handler_precall();
 3845 				amd5536_udc_stop( var_group3);
 3846 				
 3847 
 3848 				
 3849 
 3850 			}
 3851 
 3852 			break;
 3853 			case 11: {
 3854 
 3855 				/** STRUCT: struct type: pci_driver, struct name: udc_pci_driver **/
 3856 				if(ldv_s_udc_pci_driver_pci_driver==0) {
 3857 
 3858 				/* content: static int udc_pci_probe( struct pci_dev *pdev, const struct pci_device_id *id )*/
 3859 				/* LDV_COMMENT_BEGIN_PREP */
 3860 				#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3861 				#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3862 				#ifdef UDC_VERBOSE
 3863 				#endif
 3864 				/* LDV_COMMENT_END_PREP */
 3865 				/* LDV_COMMENT_FUNCTION_CALL Function from field "probe" from driver structure with callbacks "udc_pci_driver". Standart function test for correct return result. */
 3866 				res_udc_pci_probe_54 = udc_pci_probe( var_group5, var_udc_pci_probe_54_p1);
 3867 				 ldv_check_return_value(res_udc_pci_probe_54);
 3868 				 ldv_check_return_value_probe(res_udc_pci_probe_54);
 3869 				 if(res_udc_pci_probe_54) 
 3870 					goto ldv_module_exit;
 3871 				ldv_s_udc_pci_driver_pci_driver++;
 3872 
 3873 				}
 3874 
 3875 			}
 3876 
 3877 			break;
 3878 			case 12: {
 3879 
 3880 				/** STRUCT: struct type: pci_driver, struct name: udc_pci_driver **/
 3881 				if(ldv_s_udc_pci_driver_pci_driver==1) {
 3882 
 3883 				/* content: static void udc_pci_remove(struct pci_dev *pdev)*/
 3884 				/* LDV_COMMENT_BEGIN_PREP */
 3885 				#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3886 				#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3887 				#ifdef UDC_VERBOSE
 3888 				#endif
 3889 				/* LDV_COMMENT_END_PREP */
 3890 				/* LDV_COMMENT_FUNCTION_CALL Function from field "remove" from driver structure with callbacks "udc_pci_driver" */
 3891 				ldv_handler_precall();
 3892 				udc_pci_remove( var_group5);
 3893 				ldv_s_udc_pci_driver_pci_driver=0;
 3894 
 3895 				}
 3896 
 3897 			}
 3898 
 3899 			break;
 3900 			case 13: {
 3901 
 3902 				/** CALLBACK SECTION request_irq **/
 3903 				LDV_IN_INTERRUPT=2;
 3904 
 3905 				/* content: static irqreturn_t udc_irq(int irq, void *pdev)*/
 3906 				/* LDV_COMMENT_BEGIN_PREP */
 3907 				#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3908 				#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3909 				#ifdef UDC_VERBOSE
 3910 				#endif
 3911 				/* LDV_COMMENT_END_PREP */
 3912 				/* LDV_COMMENT_FUNCTION_CALL */
 3913 				ldv_handler_precall();
 3914 				udc_irq( var_udc_irq_49_p0, var_udc_irq_49_p1);
 3915 				LDV_IN_INTERRUPT=1;
 3916 
 3917 				
 3918 
 3919 			}
 3920 
 3921 			break;
 3922 			case 14: {
 3923 
 3924 				/** TIMER SECTION timer **/
 3925 				
 3926 
 3927 				/* content: static void udc_timer_function(unsigned long v)*/
 3928 				/* LDV_COMMENT_BEGIN_PREP */
 3929 				#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3930 				#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3931 				#ifdef UDC_VERBOSE
 3932 				#endif
 3933 				/* LDV_COMMENT_END_PREP */
 3934 				/* LDV_COMMENT_FUNCTION_CALL */
 3935 				ldv_handler_precall();
 3936 				udc_timer_function( var_udc_timer_function_37_p0);
 3937 				
 3938 
 3939 				
 3940 
 3941 			}
 3942 
 3943 			break;
 3944 			case 15: {
 3945 
 3946 				/** TIMER SECTION timer **/
 3947 				
 3948 
 3949 				/* content: static void udc_pollstall_timer_function(unsigned long v)*/
 3950 				/* LDV_COMMENT_BEGIN_PREP */
 3951 				#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3952 				#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3953 				#ifdef UDC_VERBOSE
 3954 				#endif
 3955 				/* LDV_COMMENT_END_PREP */
 3956 				/* LDV_COMMENT_FUNCTION_CALL */
 3957 				ldv_handler_precall();
 3958 				udc_pollstall_timer_function( var_udc_pollstall_timer_function_39_p0);
 3959 				
 3960 
 3961 				
 3962 
 3963 			}
 3964 
 3965 			break;
 3966 			default: break;
 3967 
 3968 		}
 3969 
 3970 	}
 3971 
 3972 	ldv_module_exit: 
 3973 
 3974 	/* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
 3975 	ldv_final: ldv_check_final_state();
 3976 
 3977 	/* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
 3978 	return;
 3979 
 3980 }
 3981 #endif
 3982 
 3983 /* LDV_COMMENT_END_MAIN */           1 
    2 #include <asm/io.h>
    3 #include <verifier/rcv.h>
    4 #include <verifier/set.h>
    5 
    6 
    7 Set LDV_IO_MEMS = 0;
    8 
    9 
   10 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_io_mem_remap') Create some io-memory map for specified address */
   11 void *ldv_io_mem_remap(void *addr) {
   12     ldv_assert(ldv_set_not_contains(LDV_IO_MEMS, addr));
   13 
   14     void *ptr = ldv_undef_ptr();
   15     if (ptr != NULL) {
   16         ldv_set_add(LDV_IO_MEMS, addr);
   17         return ptr;
   18     }
   19     return NULL;
   20 }
   21 
   22 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_io_mem_unmap') Delete some io-memory map for specified address */
   23 void ldv_io_mem_unmap(const volatile void *addr) {
   24     ldv_assert(ldv_set_contains(LDV_IO_MEMS, addr));
   25     ldv_set_remove(LDV_IO_MEMS, addr);
   26 }
   27 
   28 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_check_final_state') Check that all io-memory map are unmapped properly */
   29 void ldv_check_final_state(void) {
   30     ldv_assert(ldv_set_is_empty(LDV_IO_MEMS));
   31 }
   32 #line 1 "/home/ldvuser/ref_launch/work/current--X--drivers--X--defaultlinux-4.1-rc1.tar.xz--X--152_1a--X--cpachecker/linux-4.1-rc1.tar.xz/csd_deg_dscv/8673/dscv_tempdir/dscv/ri/152_1a/drivers/usb/gadget/udc/amd5536udc.c"
   33 
   34 /*
   35  * amd5536.c -- AMD 5536 UDC high/full speed USB device controller
   36  *
   37  * Copyright (C) 2005-2007 AMD (http://www.amd.com)
   38  * Author: Thomas Dahlmann
   39  *
   40  * This program is free software; you can redistribute it and/or modify
   41  * it under the terms of the GNU General Public License as published by
   42  * the Free Software Foundation; either version 2 of the License, or
   43  * (at your option) any later version.
   44  */
   45 
   46 /*
   47  * The AMD5536 UDC is part of the x86 southbridge AMD Geode CS5536.
   48  * It is a USB Highspeed DMA capable USB device controller. Beside ep0 it
   49  * provides 4 IN and 4 OUT endpoints (bulk or interrupt type).
   50  *
   51  * Make sure that UDC is assigned to port 4 by BIOS settings (port can also
   52  * be used as host port) and UOC bits PAD_EN and APU are set (should be done
   53  * by BIOS init).
   54  *
   55  * UDC DMA requires 32-bit aligned buffers so DMA with gadget ether does not
   56  * work without updating NET_IP_ALIGN. Or PIO mode (module param "use_dma=0")
   57  * can be used with gadget ether.
   58  */
   59 
   60 /* debug control */
   61 /* #define UDC_VERBOSE */
   62 
   63 /* Driver strings */
   64 #define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
   65 #define UDC_DRIVER_VERSION_STRING	"01.00.0206"
   66 
   67 /* system */
   68 #include <linux/module.h>
   69 #include <linux/pci.h>
   70 #include <linux/kernel.h>
   71 #include <linux/delay.h>
   72 #include <linux/ioport.h>
   73 #include <linux/sched.h>
   74 #include <linux/slab.h>
   75 #include <linux/errno.h>
   76 #include <linux/timer.h>
   77 #include <linux/list.h>
   78 #include <linux/interrupt.h>
   79 #include <linux/ioctl.h>
   80 #include <linux/fs.h>
   81 #include <linux/dmapool.h>
   82 #include <linux/moduleparam.h>
   83 #include <linux/device.h>
   84 #include <linux/io.h>
   85 #include <linux/irq.h>
   86 #include <linux/prefetch.h>
   87 
   88 #include <asm/byteorder.h>
   89 #include <asm/unaligned.h>
   90 
   91 /* gadget stack */
   92 #include <linux/usb/ch9.h>
   93 #include <linux/usb/gadget.h>
   94 
   95 /* udc specific */
   96 #include "amd5536udc.h"
   97 
   98 
   99 static void udc_tasklet_disconnect(unsigned long);
  100 static void empty_req_queue(struct udc_ep *);
  101 static int udc_probe(struct udc *dev);
  102 static void udc_basic_init(struct udc *dev);
  103 static void udc_setup_endpoints(struct udc *dev);
  104 static void udc_soft_reset(struct udc *dev);
  105 static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep);
  106 static void udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq);
  107 static int udc_free_dma_chain(struct udc *dev, struct udc_request *req);
  108 static int udc_create_dma_chain(struct udc_ep *ep, struct udc_request *req,
  109 				unsigned long buf_len, gfp_t gfp_flags);
  110 static int udc_remote_wakeup(struct udc *dev);
  111 static int udc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id);
  112 static void udc_pci_remove(struct pci_dev *pdev);
  113 
  114 /* description */
  115 static const char mod_desc[] = UDC_MOD_DESCRIPTION;
  116 static const char name[] = "amd5536udc";
  117 
  118 /* structure to hold endpoint function pointers */
  119 static const struct usb_ep_ops udc_ep_ops;
  120 
  121 /* received setup data */
  122 static union udc_setup_data setup_data;
  123 
  124 /* pointer to device object */
  125 static struct udc *udc;
  126 
  127 /* irq spin lock for soft reset */
  128 static DEFINE_SPINLOCK(udc_irq_spinlock);
  129 /* stall spin lock */
  130 static DEFINE_SPINLOCK(udc_stall_spinlock);
  131 
  132 /*
  133 * slave mode: pending bytes in rx fifo after nyet,
  134 * used if EPIN irq came but no req was available
  135 */
  136 static unsigned int udc_rxfifo_pending;
  137 
  138 /* count soft resets after suspend to avoid loop */
  139 static int soft_reset_occured;
  140 static int soft_reset_after_usbreset_occured;
  141 
  142 /* timer */
  143 static struct timer_list udc_timer;
  144 static int stop_timer;
  145 
  146 /* set_rde -- Is used to control enabling of RX DMA. Problem is
  147  * that UDC has only one bit (RDE) to enable/disable RX DMA for
  148  * all OUT endpoints. So we have to handle race conditions like
  149  * when OUT data reaches the fifo but no request was queued yet.
  150  * This cannot be solved by letting the RX DMA disabled until a
  151  * request gets queued because there may be other OUT packets
  152  * in the FIFO (important for not blocking control traffic).
  153  * The value of set_rde controls the correspondig timer.
  154  *
  155  * set_rde -1 == not used, means it is alloed to be set to 0 or 1
  156  * set_rde  0 == do not touch RDE, do no start the RDE timer
  157  * set_rde  1 == timer function will look whether FIFO has data
  158  * set_rde  2 == set by timer function to enable RX DMA on next call
  159  */
  160 static int set_rde = -1;
  161 
  162 static DECLARE_COMPLETION(on_exit);
  163 static struct timer_list udc_pollstall_timer;
  164 static int stop_pollstall_timer;
  165 static DECLARE_COMPLETION(on_pollstall_exit);
  166 
  167 /* tasklet for usb disconnect */
  168 static DECLARE_TASKLET(disconnect_tasklet, udc_tasklet_disconnect,
  169 		(unsigned long) &udc);
  170 
  171 
  172 /* endpoint names used for print */
  173 static const char ep0_string[] = "ep0in";
  174 static const char *const ep_string[] = {
  175 	ep0_string,
  176 	"ep1in-int", "ep2in-bulk", "ep3in-bulk", "ep4in-bulk", "ep5in-bulk",
  177 	"ep6in-bulk", "ep7in-bulk", "ep8in-bulk", "ep9in-bulk", "ep10in-bulk",
  178 	"ep11in-bulk", "ep12in-bulk", "ep13in-bulk", "ep14in-bulk",
  179 	"ep15in-bulk", "ep0out", "ep1out-bulk", "ep2out-bulk", "ep3out-bulk",
  180 	"ep4out-bulk", "ep5out-bulk", "ep6out-bulk", "ep7out-bulk",
  181 	"ep8out-bulk", "ep9out-bulk", "ep10out-bulk", "ep11out-bulk",
  182 	"ep12out-bulk", "ep13out-bulk", "ep14out-bulk", "ep15out-bulk"
  183 };
  184 
  185 /* DMA usage flag */
  186 static bool use_dma = 1;
  187 /* packet per buffer dma */
  188 static bool use_dma_ppb = 1;
  189 /* with per descr. update */
  190 static bool use_dma_ppb_du;
  191 /* buffer fill mode */
  192 static int use_dma_bufferfill_mode;
  193 /* full speed only mode */
  194 static bool use_fullspeed;
  195 /* tx buffer size for high speed */
  196 static unsigned long hs_tx_buf = UDC_EPIN_BUFF_SIZE;
  197 
  198 /* module parameters */
  199 module_param(use_dma, bool, S_IRUGO);
  200 MODULE_PARM_DESC(use_dma, "true for DMA");
  201 module_param(use_dma_ppb, bool, S_IRUGO);
  202 MODULE_PARM_DESC(use_dma_ppb, "true for DMA in packet per buffer mode");
  203 module_param(use_dma_ppb_du, bool, S_IRUGO);
  204 MODULE_PARM_DESC(use_dma_ppb_du,
  205 	"true for DMA in packet per buffer mode with descriptor update");
  206 module_param(use_fullspeed, bool, S_IRUGO);
  207 MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
  208 
  209 /*---------------------------------------------------------------------------*/
  210 /* Prints UDC device registers and endpoint irq registers */
  211 static void print_regs(struct udc *dev)
  212 {
  213 	DBG(dev, "------- Device registers -------\n");
  214 	DBG(dev, "dev config     = %08x\n", readl(&dev->regs->cfg));
  215 	DBG(dev, "dev control    = %08x\n", readl(&dev->regs->ctl));
  216 	DBG(dev, "dev status     = %08x\n", readl(&dev->regs->sts));
  217 	DBG(dev, "\n");
  218 	DBG(dev, "dev int's      = %08x\n", readl(&dev->regs->irqsts));
  219 	DBG(dev, "dev intmask    = %08x\n", readl(&dev->regs->irqmsk));
  220 	DBG(dev, "\n");
  221 	DBG(dev, "dev ep int's   = %08x\n", readl(&dev->regs->ep_irqsts));
  222 	DBG(dev, "dev ep intmask = %08x\n", readl(&dev->regs->ep_irqmsk));
  223 	DBG(dev, "\n");
  224 	DBG(dev, "USE DMA        = %d\n", use_dma);
  225 	if (use_dma && use_dma_ppb && !use_dma_ppb_du) {
  226 		DBG(dev, "DMA mode       = PPBNDU (packet per buffer "
  227 			"WITHOUT desc. update)\n");
  228 		dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBNDU");
  229 	} else if (use_dma && use_dma_ppb && use_dma_ppb_du) {
  230 		DBG(dev, "DMA mode       = PPBDU (packet per buffer "
  231 			"WITH desc. update)\n");
  232 		dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBDU");
  233 	}
  234 	if (use_dma && use_dma_bufferfill_mode) {
  235 		DBG(dev, "DMA mode       = BF (buffer fill mode)\n");
  236 		dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "BF");
  237 	}
  238 	if (!use_dma)
  239 		dev_info(&dev->pdev->dev, "FIFO mode\n");
  240 	DBG(dev, "-------------------------------------------------------\n");
  241 }
  242 
  243 /* Masks unused interrupts */
  244 static int udc_mask_unused_interrupts(struct udc *dev)
  245 {
  246 	u32 tmp;
  247 
  248 	/* mask all dev interrupts */
  249 	tmp =	AMD_BIT(UDC_DEVINT_SVC) |
  250 		AMD_BIT(UDC_DEVINT_ENUM) |
  251 		AMD_BIT(UDC_DEVINT_US) |
  252 		AMD_BIT(UDC_DEVINT_UR) |
  253 		AMD_BIT(UDC_DEVINT_ES) |
  254 		AMD_BIT(UDC_DEVINT_SI) |
  255 		AMD_BIT(UDC_DEVINT_SOF)|
  256 		AMD_BIT(UDC_DEVINT_SC);
  257 	writel(tmp, &dev->regs->irqmsk);
  258 
  259 	/* mask all ep interrupts */
  260 	writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqmsk);
  261 
  262 	return 0;
  263 }
  264 
  265 /* Enables endpoint 0 interrupts */
  266 static int udc_enable_ep0_interrupts(struct udc *dev)
  267 {
  268 	u32 tmp;
  269 
  270 	DBG(dev, "udc_enable_ep0_interrupts()\n");
  271 
  272 	/* read irq mask */
  273 	tmp = readl(&dev->regs->ep_irqmsk);
  274 	/* enable ep0 irq's */
  275 	tmp &= AMD_UNMASK_BIT(UDC_EPINT_IN_EP0)
  276 		& AMD_UNMASK_BIT(UDC_EPINT_OUT_EP0);
  277 	writel(tmp, &dev->regs->ep_irqmsk);
  278 
  279 	return 0;
  280 }
  281 
  282 /* Enables device interrupts for SET_INTF and SET_CONFIG */
  283 static int udc_enable_dev_setup_interrupts(struct udc *dev)
  284 {
  285 	u32 tmp;
  286 
  287 	DBG(dev, "enable device interrupts for setup data\n");
  288 
  289 	/* read irq mask */
  290 	tmp = readl(&dev->regs->irqmsk);
  291 
  292 	/* enable SET_INTERFACE, SET_CONFIG and other needed irq's */
  293 	tmp &= AMD_UNMASK_BIT(UDC_DEVINT_SI)
  294 		& AMD_UNMASK_BIT(UDC_DEVINT_SC)
  295 		& AMD_UNMASK_BIT(UDC_DEVINT_UR)
  296 		& AMD_UNMASK_BIT(UDC_DEVINT_SVC)
  297 		& AMD_UNMASK_BIT(UDC_DEVINT_ENUM);
  298 	writel(tmp, &dev->regs->irqmsk);
  299 
  300 	return 0;
  301 }
  302 
  303 /* Calculates fifo start of endpoint based on preceding endpoints */
  304 static int udc_set_txfifo_addr(struct udc_ep *ep)
  305 {
  306 	struct udc	*dev;
  307 	u32 tmp;
  308 	int i;
  309 
  310 	if (!ep || !(ep->in))
  311 		return -EINVAL;
  312 
  313 	dev = ep->dev;
  314 	ep->txfifo = dev->txfifo;
  315 
  316 	/* traverse ep's */
  317 	for (i = 0; i < ep->num; i++) {
  318 		if (dev->ep[i].regs) {
  319 			/* read fifo size */
  320 			tmp = readl(&dev->ep[i].regs->bufin_framenum);
  321 			tmp = AMD_GETBITS(tmp, UDC_EPIN_BUFF_SIZE);
  322 			ep->txfifo += tmp;
  323 		}
  324 	}
  325 	return 0;
  326 }
  327 
  328 /* CNAK pending field: bit0 = ep0in, bit16 = ep0out */
  329 static u32 cnak_pending;
  330 
  331 static void UDC_QUEUE_CNAK(struct udc_ep *ep, unsigned num)
  332 {
  333 	if (readl(&ep->regs->ctl) & AMD_BIT(UDC_EPCTL_NAK)) {
  334 		DBG(ep->dev, "NAK could not be cleared for ep%d\n", num);
  335 		cnak_pending |= 1 << (num);
  336 		ep->naking = 1;
  337 	} else
  338 		cnak_pending = cnak_pending & (~(1 << (num)));
  339 }
  340 
  341 
  342 /* Enables endpoint, is called by gadget driver */
  343 static int
  344 udc_ep_enable(struct usb_ep *usbep, const struct usb_endpoint_descriptor *desc)
  345 {
  346 	struct udc_ep		*ep;
  347 	struct udc		*dev;
  348 	u32			tmp;
  349 	unsigned long		iflags;
  350 	u8 udc_csr_epix;
  351 	unsigned		maxpacket;
  352 
  353 	if (!usbep
  354 			|| usbep->name == ep0_string
  355 			|| !desc
  356 			|| desc->bDescriptorType != USB_DT_ENDPOINT)
  357 		return -EINVAL;
  358 
  359 	ep = container_of(usbep, struct udc_ep, ep);
  360 	dev = ep->dev;
  361 
  362 	DBG(dev, "udc_ep_enable() ep %d\n", ep->num);
  363 
  364 	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
  365 		return -ESHUTDOWN;
  366 
  367 	spin_lock_irqsave(&dev->lock, iflags);
  368 	ep->ep.desc = desc;
  369 
  370 	ep->halted = 0;
  371 
  372 	/* set traffic type */
  373 	tmp = readl(&dev->ep[ep->num].regs->ctl);
  374 	tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_EPCTL_ET);
  375 	writel(tmp, &dev->ep[ep->num].regs->ctl);
  376 
  377 	/* set max packet size */
  378 	maxpacket = usb_endpoint_maxp(desc);
  379 	tmp = readl(&dev->ep[ep->num].regs->bufout_maxpkt);
  380 	tmp = AMD_ADDBITS(tmp, maxpacket, UDC_EP_MAX_PKT_SIZE);
  381 	ep->ep.maxpacket = maxpacket;
  382 	writel(tmp, &dev->ep[ep->num].regs->bufout_maxpkt);
  383 
  384 	/* IN ep */
  385 	if (ep->in) {
  386 
  387 		/* ep ix in UDC CSR register space */
  388 		udc_csr_epix = ep->num;
  389 
  390 		/* set buffer size (tx fifo entries) */
  391 		tmp = readl(&dev->ep[ep->num].regs->bufin_framenum);
  392 		/* double buffering: fifo size = 2 x max packet size */
  393 		tmp = AMD_ADDBITS(
  394 				tmp,
  395 				maxpacket * UDC_EPIN_BUFF_SIZE_MULT
  396 					  / UDC_DWORD_BYTES,
  397 				UDC_EPIN_BUFF_SIZE);
  398 		writel(tmp, &dev->ep[ep->num].regs->bufin_framenum);
  399 
  400 		/* calc. tx fifo base addr */
  401 		udc_set_txfifo_addr(ep);
  402 
  403 		/* flush fifo */
  404 		tmp = readl(&ep->regs->ctl);
  405 		tmp |= AMD_BIT(UDC_EPCTL_F);
  406 		writel(tmp, &ep->regs->ctl);
  407 
  408 	/* OUT ep */
  409 	} else {
  410 		/* ep ix in UDC CSR register space */
  411 		udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
  412 
  413 		/* set max packet size UDC CSR	*/
  414 		tmp = readl(&dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
  415 		tmp = AMD_ADDBITS(tmp, maxpacket,
  416 					UDC_CSR_NE_MAX_PKT);
  417 		writel(tmp, &dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
  418 
  419 		if (use_dma && !ep->in) {
  420 			/* alloc and init BNA dummy request */
  421 			ep->bna_dummy_req = udc_alloc_bna_dummy(ep);
  422 			ep->bna_occurred = 0;
  423 		}
  424 
  425 		if (ep->num != UDC_EP0OUT_IX)
  426 			dev->data_ep_enabled = 1;
  427 	}
  428 
  429 	/* set ep values */
  430 	tmp = readl(&dev->csr->ne[udc_csr_epix]);
  431 	/* max packet */
  432 	tmp = AMD_ADDBITS(tmp, maxpacket, UDC_CSR_NE_MAX_PKT);
  433 	/* ep number */
  434 	tmp = AMD_ADDBITS(tmp, desc->bEndpointAddress, UDC_CSR_NE_NUM);
  435 	/* ep direction */
  436 	tmp = AMD_ADDBITS(tmp, ep->in, UDC_CSR_NE_DIR);
  437 	/* ep type */
  438 	tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_CSR_NE_TYPE);
  439 	/* ep config */
  440 	tmp = AMD_ADDBITS(tmp, ep->dev->cur_config, UDC_CSR_NE_CFG);
  441 	/* ep interface */
  442 	tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf, UDC_CSR_NE_INTF);
  443 	/* ep alt */
  444 	tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt, UDC_CSR_NE_ALT);
  445 	/* write reg */
  446 	writel(tmp, &dev->csr->ne[udc_csr_epix]);
  447 
  448 	/* enable ep irq */
  449 	tmp = readl(&dev->regs->ep_irqmsk);
  450 	tmp &= AMD_UNMASK_BIT(ep->num);
  451 	writel(tmp, &dev->regs->ep_irqmsk);
  452 
  453 	/*
  454 	 * clear NAK by writing CNAK
  455 	 * avoid BNA for OUT DMA, don't clear NAK until DMA desc. written
  456 	 */
  457 	if (!use_dma || ep->in) {
  458 		tmp = readl(&ep->regs->ctl);
  459 		tmp |= AMD_BIT(UDC_EPCTL_CNAK);
  460 		writel(tmp, &ep->regs->ctl);
  461 		ep->naking = 0;
  462 		UDC_QUEUE_CNAK(ep, ep->num);
  463 	}
  464 	tmp = desc->bEndpointAddress;
  465 	DBG(dev, "%s enabled\n", usbep->name);
  466 
  467 	spin_unlock_irqrestore(&dev->lock, iflags);
  468 	return 0;
  469 }
  470 
  471 /* Resets endpoint */
  472 static void ep_init(struct udc_regs __iomem *regs, struct udc_ep *ep)
  473 {
  474 	u32		tmp;
  475 
  476 	VDBG(ep->dev, "ep-%d reset\n", ep->num);
  477 	ep->ep.desc = NULL;
  478 	ep->ep.ops = &udc_ep_ops;
  479 	INIT_LIST_HEAD(&ep->queue);
  480 
  481 	usb_ep_set_maxpacket_limit(&ep->ep,(u16) ~0);
  482 	/* set NAK */
  483 	tmp = readl(&ep->regs->ctl);
  484 	tmp |= AMD_BIT(UDC_EPCTL_SNAK);
  485 	writel(tmp, &ep->regs->ctl);
  486 	ep->naking = 1;
  487 
  488 	/* disable interrupt */
  489 	tmp = readl(®s->ep_irqmsk);
  490 	tmp |= AMD_BIT(ep->num);
  491 	writel(tmp, ®s->ep_irqmsk);
  492 
  493 	if (ep->in) {
  494 		/* unset P and IN bit of potential former DMA */
  495 		tmp = readl(&ep->regs->ctl);
  496 		tmp &= AMD_UNMASK_BIT(UDC_EPCTL_P);
  497 		writel(tmp, &ep->regs->ctl);
  498 
  499 		tmp = readl(&ep->regs->sts);
  500 		tmp |= AMD_BIT(UDC_EPSTS_IN);
  501 		writel(tmp, &ep->regs->sts);
  502 
  503 		/* flush the fifo */
  504 		tmp = readl(&ep->regs->ctl);
  505 		tmp |= AMD_BIT(UDC_EPCTL_F);
  506 		writel(tmp, &ep->regs->ctl);
  507 
  508 	}
  509 	/* reset desc pointer */
  510 	writel(0, &ep->regs->desptr);
  511 }
  512 
  513 /* Disables endpoint, is called by gadget driver */
  514 static int udc_ep_disable(struct usb_ep *usbep)
  515 {
  516 	struct udc_ep	*ep = NULL;
  517 	unsigned long	iflags;
  518 
  519 	if (!usbep)
  520 		return -EINVAL;
  521 
  522 	ep = container_of(usbep, struct udc_ep, ep);
  523 	if (usbep->name == ep0_string || !ep->ep.desc)
  524 		return -EINVAL;
  525 
  526 	DBG(ep->dev, "Disable ep-%d\n", ep->num);
  527 
  528 	spin_lock_irqsave(&ep->dev->lock, iflags);
  529 	udc_free_request(&ep->ep, &ep->bna_dummy_req->req);
  530 	empty_req_queue(ep);
  531 	ep_init(ep->dev->regs, ep);
  532 	spin_unlock_irqrestore(&ep->dev->lock, iflags);
  533 
  534 	return 0;
  535 }
  536 
  537 /* Allocates request packet, called by gadget driver */
  538 static struct usb_request *
  539 udc_alloc_request(struct usb_ep *usbep, gfp_t gfp)
  540 {
  541 	struct udc_request	*req;
  542 	struct udc_data_dma	*dma_desc;
  543 	struct udc_ep	*ep;
  544 
  545 	if (!usbep)
  546 		return NULL;
  547 
  548 	ep = container_of(usbep, struct udc_ep, ep);
  549 
  550 	VDBG(ep->dev, "udc_alloc_req(): ep%d\n", ep->num);
  551 	req = kzalloc(sizeof(struct udc_request), gfp);
  552 	if (!req)
  553 		return NULL;
  554 
  555 	req->req.dma = DMA_DONT_USE;
  556 	INIT_LIST_HEAD(&req->queue);
  557 
  558 	if (ep->dma) {
  559 		/* ep0 in requests are allocated from data pool here */
  560 		dma_desc = pci_pool_alloc(ep->dev->data_requests, gfp,
  561 						&req->td_phys);
  562 		if (!dma_desc) {
  563 			kfree(req);
  564 			return NULL;
  565 		}
  566 
  567 		VDBG(ep->dev, "udc_alloc_req: req = %p dma_desc = %p, "
  568 				"td_phys = %lx\n",
  569 				req, dma_desc,
  570 				(unsigned long)req->td_phys);
  571 		/* prevent from using desc. - set HOST BUSY */
  572 		dma_desc->status = AMD_ADDBITS(dma_desc->status,
  573 						UDC_DMA_STP_STS_BS_HOST_BUSY,
  574 						UDC_DMA_STP_STS_BS);
  575 		dma_desc->bufptr = cpu_to_le32(DMA_DONT_USE);
  576 		req->td_data = dma_desc;
  577 		req->td_data_last = NULL;
  578 		req->chain_len = 1;
  579 	}
  580 
  581 	return &req->req;
  582 }
  583 
  584 /* Frees request packet, called by gadget driver */
  585 static void
  586 udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq)
  587 {
  588 	struct udc_ep	*ep;
  589 	struct udc_request	*req;
  590 
  591 	if (!usbep || !usbreq)
  592 		return;
  593 
  594 	ep = container_of(usbep, struct udc_ep, ep);
  595 	req = container_of(usbreq, struct udc_request, req);
  596 	VDBG(ep->dev, "free_req req=%p\n", req);
  597 	BUG_ON(!list_empty(&req->queue));
  598 	if (req->td_data) {
  599 		VDBG(ep->dev, "req->td_data=%p\n", req->td_data);
  600 
  601 		/* free dma chain if created */
  602 		if (req->chain_len > 1)
  603 			udc_free_dma_chain(ep->dev, req);
  604 
  605 		pci_pool_free(ep->dev->data_requests, req->td_data,
  606 							req->td_phys);
  607 	}
  608 	kfree(req);
  609 }
  610 
  611 /* Init BNA dummy descriptor for HOST BUSY and pointing to itself */
  612 static void udc_init_bna_dummy(struct udc_request *req)
  613 {
  614 	if (req) {
  615 		/* set last bit */
  616 		req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
  617 		/* set next pointer to itself */
  618 		req->td_data->next = req->td_phys;
  619 		/* set HOST BUSY */
  620 		req->td_data->status
  621 			= AMD_ADDBITS(req->td_data->status,
  622 					UDC_DMA_STP_STS_BS_DMA_DONE,
  623 					UDC_DMA_STP_STS_BS);
  624 #ifdef UDC_VERBOSE
  625 		pr_debug("bna desc = %p, sts = %08x\n",
  626 			req->td_data, req->td_data->status);
  627 #endif
  628 	}
  629 }
  630 
  631 /* Allocate BNA dummy descriptor */
  632 static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep)
  633 {
  634 	struct udc_request *req = NULL;
  635 	struct usb_request *_req = NULL;
  636 
  637 	/* alloc the dummy request */
  638 	_req = udc_alloc_request(&ep->ep, GFP_ATOMIC);
  639 	if (_req) {
  640 		req = container_of(_req, struct udc_request, req);
  641 		ep->bna_dummy_req = req;
  642 		udc_init_bna_dummy(req);
  643 	}
  644 	return req;
  645 }
  646 
  647 /* Write data to TX fifo for IN packets */
  648 static void
  649 udc_txfifo_write(struct udc_ep *ep, struct usb_request *req)
  650 {
  651 	u8			*req_buf;
  652 	u32			*buf;
  653 	int			i, j;
  654 	unsigned		bytes = 0;
  655 	unsigned		remaining = 0;
  656 
  657 	if (!req || !ep)
  658 		return;
  659 
  660 	req_buf = req->buf + req->actual;
  661 	prefetch(req_buf);
  662 	remaining = req->length - req->actual;
  663 
  664 	buf = (u32 *) req_buf;
  665 
  666 	bytes = ep->ep.maxpacket;
  667 	if (bytes > remaining)
  668 		bytes = remaining;
  669 
  670 	/* dwords first */
  671 	for (i = 0; i < bytes / UDC_DWORD_BYTES; i++)
  672 		writel(*(buf + i), ep->txfifo);
  673 
  674 	/* remaining bytes must be written by byte access */
  675 	for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
  676 		writeb((u8)(*(buf + i) >> (j << UDC_BITS_PER_BYTE_SHIFT)),
  677 							ep->txfifo);
  678 	}
  679 
  680 	/* dummy write confirm */
  681 	writel(0, &ep->regs->confirm);
  682 }
  683 
  684 /* Read dwords from RX fifo for OUT transfers */
  685 static int udc_rxfifo_read_dwords(struct udc *dev, u32 *buf, int dwords)
  686 {
  687 	int i;
  688 
  689 	VDBG(dev, "udc_read_dwords(): %d dwords\n", dwords);
  690 
  691 	for (i = 0; i < dwords; i++)
  692 		*(buf + i) = readl(dev->rxfifo);
  693 	return 0;
  694 }
  695 
  696 /* Read bytes from RX fifo for OUT transfers */
  697 static int udc_rxfifo_read_bytes(struct udc *dev, u8 *buf, int bytes)
  698 {
  699 	int i, j;
  700 	u32 tmp;
  701 
  702 	VDBG(dev, "udc_read_bytes(): %d bytes\n", bytes);
  703 
  704 	/* dwords first */
  705 	for (i = 0; i < bytes / UDC_DWORD_BYTES; i++)
  706 		*((u32 *)(buf + (i<<2))) = readl(dev->rxfifo);
  707 
  708 	/* remaining bytes must be read by byte access */
  709 	if (bytes % UDC_DWORD_BYTES) {
  710 		tmp = readl(dev->rxfifo);
  711 		for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
  712 			*(buf + (i<<2) + j) = (u8)(tmp & UDC_BYTE_MASK);
  713 			tmp = tmp >> UDC_BITS_PER_BYTE;
  714 		}
  715 	}
  716 
  717 	return 0;
  718 }
  719 
  720 /* Read data from RX fifo for OUT transfers */
  721 static int
  722 udc_rxfifo_read(struct udc_ep *ep, struct udc_request *req)
  723 {
  724 	u8 *buf;
  725 	unsigned buf_space;
  726 	unsigned bytes = 0;
  727 	unsigned finished = 0;
  728 
  729 	/* received number bytes */
  730 	bytes = readl(&ep->regs->sts);
  731 	bytes = AMD_GETBITS(bytes, UDC_EPSTS_RX_PKT_SIZE);
  732 
  733 	buf_space = req->req.length - req->req.actual;
  734 	buf = req->req.buf + req->req.actual;
  735 	if (bytes > buf_space) {
  736 		if ((buf_space % ep->ep.maxpacket) != 0) {
  737 			DBG(ep->dev,
  738 				"%s: rx %d bytes, rx-buf space = %d bytesn\n",
  739 				ep->ep.name, bytes, buf_space);
  740 			req->req.status = -EOVERFLOW;
  741 		}
  742 		bytes = buf_space;
  743 	}
  744 	req->req.actual += bytes;
  745 
  746 	/* last packet ? */
  747 	if (((bytes % ep->ep.maxpacket) != 0) || (!bytes)
  748 		|| ((req->req.actual == req->req.length) && !req->req.zero))
  749 		finished = 1;
  750 
  751 	/* read rx fifo bytes */
  752 	VDBG(ep->dev, "ep %s: rxfifo read %d bytes\n", ep->ep.name, bytes);
  753 	udc_rxfifo_read_bytes(ep->dev, buf, bytes);
  754 
  755 	return finished;
  756 }
  757 
  758 /* create/re-init a DMA descriptor or a DMA descriptor chain */
  759 static int prep_dma(struct udc_ep *ep, struct udc_request *req, gfp_t gfp)
  760 {
  761 	int	retval = 0;
  762 	u32	tmp;
  763 
  764 	VDBG(ep->dev, "prep_dma\n");
  765 	VDBG(ep->dev, "prep_dma ep%d req->td_data=%p\n",
  766 			ep->num, req->td_data);
  767 
  768 	/* set buffer pointer */
  769 	req->td_data->bufptr = req->req.dma;
  770 
  771 	/* set last bit */
  772 	req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
  773 
  774 	/* build/re-init dma chain if maxpkt scatter mode, not for EP0 */
  775 	if (use_dma_ppb) {
  776 
  777 		retval = udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
  778 		if (retval != 0) {
  779 			if (retval == -ENOMEM)
  780 				DBG(ep->dev, "Out of DMA memory\n");
  781 			return retval;
  782 		}
  783 		if (ep->in) {
  784 			if (req->req.length == ep->ep.maxpacket) {
  785 				/* write tx bytes */
  786 				req->td_data->status =
  787 					AMD_ADDBITS(req->td_data->status,
  788 						ep->ep.maxpacket,
  789 						UDC_DMA_IN_STS_TXBYTES);
  790 
  791 			}
  792 		}
  793 
  794 	}
  795 
  796 	if (ep->in) {
  797 		VDBG(ep->dev, "IN: use_dma_ppb=%d req->req.len=%d "
  798 				"maxpacket=%d ep%d\n",
  799 				use_dma_ppb, req->req.length,
  800 				ep->ep.maxpacket, ep->num);
  801 		/*
  802 		 * if bytes < max packet then tx bytes must
  803 		 * be written in packet per buffer mode
  804 		 */
  805 		if (!use_dma_ppb || req->req.length < ep->ep.maxpacket
  806 				|| ep->num == UDC_EP0OUT_IX
  807 				|| ep->num == UDC_EP0IN_IX) {
  808 			/* write tx bytes */
  809 			req->td_data->status =
  810 				AMD_ADDBITS(req->td_data->status,
  811 						req->req.length,
  812 						UDC_DMA_IN_STS_TXBYTES);
  813 			/* reset frame num */
  814 			req->td_data->status =
  815 				AMD_ADDBITS(req->td_data->status,
  816 						0,
  817 						UDC_DMA_IN_STS_FRAMENUM);
  818 		}
  819 		/* set HOST BUSY */
  820 		req->td_data->status =
  821 			AMD_ADDBITS(req->td_data->status,
  822 				UDC_DMA_STP_STS_BS_HOST_BUSY,
  823 				UDC_DMA_STP_STS_BS);
  824 	} else {
  825 		VDBG(ep->dev, "OUT set host ready\n");
  826 		/* set HOST READY */
  827 		req->td_data->status =
  828 			AMD_ADDBITS(req->td_data->status,
  829 				UDC_DMA_STP_STS_BS_HOST_READY,
  830 				UDC_DMA_STP_STS_BS);
  831 
  832 
  833 			/* clear NAK by writing CNAK */
  834 			if (ep->naking) {
  835 				tmp = readl(&ep->regs->ctl);
  836 				tmp |= AMD_BIT(UDC_EPCTL_CNAK);
  837 				writel(tmp, &ep->regs->ctl);
  838 				ep->naking = 0;
  839 				UDC_QUEUE_CNAK(ep, ep->num);
  840 			}
  841 
  842 	}
  843 
  844 	return retval;
  845 }
  846 
  847 /* Completes request packet ... caller MUST hold lock */
  848 static void
  849 complete_req(struct udc_ep *ep, struct udc_request *req, int sts)
  850 __releases(ep->dev->lock)
  851 __acquires(ep->dev->lock)
  852 {
  853 	struct udc		*dev;
  854 	unsigned		halted;
  855 
  856 	VDBG(ep->dev, "complete_req(): ep%d\n", ep->num);
  857 
  858 	dev = ep->dev;
  859 	/* unmap DMA */
  860 	if (ep->dma)
  861 		usb_gadget_unmap_request(&dev->gadget, &req->req, ep->in);
  862 
  863 	halted = ep->halted;
  864 	ep->halted = 1;
  865 
  866 	/* set new status if pending */
  867 	if (req->req.status == -EINPROGRESS)
  868 		req->req.status = sts;
  869 
  870 	/* remove from ep queue */
  871 	list_del_init(&req->queue);
  872 
  873 	VDBG(ep->dev, "req %p => complete %d bytes at %s with sts %d\n",
  874 		&req->req, req->req.length, ep->ep.name, sts);
  875 
  876 	spin_unlock(&dev->lock);
  877 	usb_gadget_giveback_request(&ep->ep, &req->req);
  878 	spin_lock(&dev->lock);
  879 	ep->halted = halted;
  880 }
  881 
  882 /* frees pci pool descriptors of a DMA chain */
  883 static int udc_free_dma_chain(struct udc *dev, struct udc_request *req)
  884 {
  885 
  886 	int ret_val = 0;
  887 	struct udc_data_dma	*td;
  888 	struct udc_data_dma	*td_last = NULL;
  889 	unsigned int i;
  890 
  891 	DBG(dev, "free chain req = %p\n", req);
  892 
  893 	/* do not free first desc., will be done by free for request */
  894 	td_last = req->td_data;
  895 	td = phys_to_virt(td_last->next);
  896 
  897 	for (i = 1; i < req->chain_len; i++) {
  898 
  899 		pci_pool_free(dev->data_requests, td,
  900 				(dma_addr_t) td_last->next);
  901 		td_last = td;
  902 		td = phys_to_virt(td_last->next);
  903 	}
  904 
  905 	return ret_val;
  906 }
  907 
  908 /* Iterates to the end of a DMA chain and returns last descriptor */
  909 static struct udc_data_dma *udc_get_last_dma_desc(struct udc_request *req)
  910 {
  911 	struct udc_data_dma	*td;
  912 
  913 	td = req->td_data;
  914 	while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L)))
  915 		td = phys_to_virt(td->next);
  916 
  917 	return td;
  918 
  919 }
  920 
  921 /* Iterates to the end of a DMA chain and counts bytes received */
  922 static u32 udc_get_ppbdu_rxbytes(struct udc_request *req)
  923 {
  924 	struct udc_data_dma	*td;
  925 	u32 count;
  926 
  927 	td = req->td_data;
  928 	/* received number bytes */
  929 	count = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_RXBYTES);
  930 
  931 	while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L))) {
  932 		td = phys_to_virt(td->next);
  933 		/* received number bytes */
  934 		if (td) {
  935 			count += AMD_GETBITS(td->status,
  936 				UDC_DMA_OUT_STS_RXBYTES);
  937 		}
  938 	}
  939 
  940 	return count;
  941 
  942 }
  943 
  944 /* Creates or re-inits a DMA chain */
  945 static int udc_create_dma_chain(
  946 	struct udc_ep *ep,
  947 	struct udc_request *req,
  948 	unsigned long buf_len, gfp_t gfp_flags
  949 )
  950 {
  951 	unsigned long bytes = req->req.length;
  952 	unsigned int i;
  953 	dma_addr_t dma_addr;
  954 	struct udc_data_dma	*td = NULL;
  955 	struct udc_data_dma	*last = NULL;
  956 	unsigned long txbytes;
  957 	unsigned create_new_chain = 0;
  958 	unsigned len;
  959 
  960 	VDBG(ep->dev, "udc_create_dma_chain: bytes=%ld buf_len=%ld\n",
  961 			bytes, buf_len);
  962 	dma_addr = DMA_DONT_USE;
  963 
  964 	/* unset L bit in first desc for OUT */
  965 	if (!ep->in)
  966 		req->td_data->status &= AMD_CLEAR_BIT(UDC_DMA_IN_STS_L);
  967 
  968 	/* alloc only new desc's if not already available */
  969 	len = req->req.length / ep->ep.maxpacket;
  970 	if (req->req.length % ep->ep.maxpacket)
  971 		len++;
  972 
  973 	if (len > req->chain_len) {
  974 		/* shorter chain already allocated before */
  975 		if (req->chain_len > 1)
  976 			udc_free_dma_chain(ep->dev, req);
  977 		req->chain_len = len;
  978 		create_new_chain = 1;
  979 	}
  980 
  981 	td = req->td_data;
  982 	/* gen. required number of descriptors and buffers */
  983 	for (i = buf_len; i < bytes; i += buf_len) {
  984 		/* create or determine next desc. */
  985 		if (create_new_chain) {
  986 
  987 			td = pci_pool_alloc(ep->dev->data_requests,
  988 					gfp_flags, &dma_addr);
  989 			if (!td)
  990 				return -ENOMEM;
  991 
  992 			td->status = 0;
  993 		} else if (i == buf_len) {
  994 			/* first td */
  995 			td = (struct udc_data_dma *) phys_to_virt(
  996 						req->td_data->next);
  997 			td->status = 0;
  998 		} else {
  999 			td = (struct udc_data_dma *) phys_to_virt(last->next);
 1000 			td->status = 0;
 1001 		}
 1002 
 1003 
 1004 		if (td)
 1005 			td->bufptr = req->req.dma + i; /* assign buffer */
 1006 		else
 1007 			break;
 1008 
 1009 		/* short packet ? */
 1010 		if ((bytes - i) >= buf_len) {
 1011 			txbytes = buf_len;
 1012 		} else {
 1013 			/* short packet */
 1014 			txbytes = bytes - i;
 1015 		}
 1016 
 1017 		/* link td and assign tx bytes */
 1018 		if (i == buf_len) {
 1019 			if (create_new_chain)
 1020 				req->td_data->next = dma_addr;
 1021 			/*
 1022 			else
 1023 				req->td_data->next = virt_to_phys(td);
 1024 			*/
 1025 			/* write tx bytes */
 1026 			if (ep->in) {
 1027 				/* first desc */
 1028 				req->td_data->status =
 1029 					AMD_ADDBITS(req->td_data->status,
 1030 							ep->ep.maxpacket,
 1031 							UDC_DMA_IN_STS_TXBYTES);
 1032 				/* second desc */
 1033 				td->status = AMD_ADDBITS(td->status,
 1034 							txbytes,
 1035 							UDC_DMA_IN_STS_TXBYTES);
 1036 			}
 1037 		} else {
 1038 			if (create_new_chain)
 1039 				last->next = dma_addr;
 1040 			/*
 1041 			else
 1042 				last->next = virt_to_phys(td);
 1043 			*/
 1044 			if (ep->in) {
 1045 				/* write tx bytes */
 1046 				td->status = AMD_ADDBITS(td->status,
 1047 							txbytes,
 1048 							UDC_DMA_IN_STS_TXBYTES);
 1049 			}
 1050 		}
 1051 		last = td;
 1052 	}
 1053 	/* set last bit */
 1054 	if (td) {
 1055 		td->status |= AMD_BIT(UDC_DMA_IN_STS_L);
 1056 		/* last desc. points to itself */
 1057 		req->td_data_last = td;
 1058 	}
 1059 
 1060 	return 0;
 1061 }
 1062 
 1063 /* Enabling RX DMA */
 1064 static void udc_set_rde(struct udc *dev)
 1065 {
 1066 	u32 tmp;
 1067 
 1068 	VDBG(dev, "udc_set_rde()\n");
 1069 	/* stop RDE timer */
 1070 	if (timer_pending(&udc_timer)) {
 1071 		set_rde = 0;
 1072 		mod_timer(&udc_timer, jiffies - 1);
 1073 	}
 1074 	/* set RDE */
 1075 	tmp = readl(&dev->regs->ctl);
 1076 	tmp |= AMD_BIT(UDC_DEVCTL_RDE);
 1077 	writel(tmp, &dev->regs->ctl);
 1078 }
 1079 
 1080 /* Queues a request packet, called by gadget driver */
 1081 static int
 1082 udc_queue(struct usb_ep *usbep, struct usb_request *usbreq, gfp_t gfp)
 1083 {
 1084 	int			retval = 0;
 1085 	u8			open_rxfifo = 0;
 1086 	unsigned long		iflags;
 1087 	struct udc_ep		*ep;
 1088 	struct udc_request	*req;
 1089 	struct udc		*dev;
 1090 	u32			tmp;
 1091 
 1092 	/* check the inputs */
 1093 	req = container_of(usbreq, struct udc_request, req);
 1094 
 1095 	if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf
 1096 			|| !list_empty(&req->queue))
 1097 		return -EINVAL;
 1098 
 1099 	ep = container_of(usbep, struct udc_ep, ep);
 1100 	if (!ep->ep.desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
 1101 		return -EINVAL;
 1102 
 1103 	VDBG(ep->dev, "udc_queue(): ep%d-in=%d\n", ep->num, ep->in);
 1104 	dev = ep->dev;
 1105 
 1106 	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
 1107 		return -ESHUTDOWN;
 1108 
 1109 	/* map dma (usually done before) */
 1110 	if (ep->dma) {
 1111 		VDBG(dev, "DMA map req %p\n", req);
 1112 		retval = usb_gadget_map_request(&udc->gadget, usbreq, ep->in);
 1113 		if (retval)
 1114 			return retval;
 1115 	}
 1116 
 1117 	VDBG(dev, "%s queue req %p, len %d req->td_data=%p buf %p\n",
 1118 			usbep->name, usbreq, usbreq->length,
 1119 			req->td_data, usbreq->buf);
 1120 
 1121 	spin_lock_irqsave(&dev->lock, iflags);
 1122 	usbreq->actual = 0;
 1123 	usbreq->status = -EINPROGRESS;
 1124 	req->dma_done = 0;
 1125 
 1126 	/* on empty queue just do first transfer */
 1127 	if (list_empty(&ep->queue)) {
 1128 		/* zlp */
 1129 		if (usbreq->length == 0) {
 1130 			/* IN zlp's are handled by hardware */
 1131 			complete_req(ep, req, 0);
 1132 			VDBG(dev, "%s: zlp\n", ep->ep.name);
 1133 			/*
 1134 			 * if set_config or set_intf is waiting for ack by zlp
 1135 			 * then set CSR_DONE
 1136 			 */
 1137 			if (dev->set_cfg_not_acked) {
 1138 				tmp = readl(&dev->regs->ctl);
 1139 				tmp |= AMD_BIT(UDC_DEVCTL_CSR_DONE);
 1140 				writel(tmp, &dev->regs->ctl);
 1141 				dev->set_cfg_not_acked = 0;
 1142 			}
 1143 			/* setup command is ACK'ed now by zlp */
 1144 			if (dev->waiting_zlp_ack_ep0in) {
 1145 				/* clear NAK by writing CNAK in EP0_IN */
 1146 				tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
 1147 				tmp |= AMD_BIT(UDC_EPCTL_CNAK);
 1148 				writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
 1149 				dev->ep[UDC_EP0IN_IX].naking = 0;
 1150 				UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX],
 1151 							UDC_EP0IN_IX);
 1152 				dev->waiting_zlp_ack_ep0in = 0;
 1153 			}
 1154 			goto finished;
 1155 		}
 1156 		if (ep->dma) {
 1157 			retval = prep_dma(ep, req, GFP_ATOMIC);
 1158 			if (retval != 0)
 1159 				goto finished;
 1160 			/* write desc pointer to enable DMA */
 1161 			if (ep->in) {
 1162 				/* set HOST READY */
 1163 				req->td_data->status =
 1164 					AMD_ADDBITS(req->td_data->status,
 1165 						UDC_DMA_IN_STS_BS_HOST_READY,
 1166 						UDC_DMA_IN_STS_BS);
 1167 			}
 1168 
 1169 			/* disabled rx dma while descriptor update */
 1170 			if (!ep->in) {
 1171 				/* stop RDE timer */
 1172 				if (timer_pending(&udc_timer)) {
 1173 					set_rde = 0;
 1174 					mod_timer(&udc_timer, jiffies - 1);
 1175 				}
 1176 				/* clear RDE */
 1177 				tmp = readl(&dev->regs->ctl);
 1178 				tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
 1179 				writel(tmp, &dev->regs->ctl);
 1180 				open_rxfifo = 1;
 1181 
 1182 				/*
 1183 				 * if BNA occurred then let BNA dummy desc.
 1184 				 * point to current desc.
 1185 				 */
 1186 				if (ep->bna_occurred) {
 1187 					VDBG(dev, "copy to BNA dummy desc.\n");
 1188 					memcpy(ep->bna_dummy_req->td_data,
 1189 						req->td_data,
 1190 						sizeof(struct udc_data_dma));
 1191 				}
 1192 			}
 1193 			/* write desc pointer */
 1194 			writel(req->td_phys, &ep->regs->desptr);
 1195 
 1196 			/* clear NAK by writing CNAK */
 1197 			if (ep->naking) {
 1198 				tmp = readl(&ep->regs->ctl);
 1199 				tmp |= AMD_BIT(UDC_EPCTL_CNAK);
 1200 				writel(tmp, &ep->regs->ctl);
 1201 				ep->naking = 0;
 1202 				UDC_QUEUE_CNAK(ep, ep->num);
 1203 			}
 1204 
 1205 			if (ep->in) {
 1206 				/* enable ep irq */
 1207 				tmp = readl(&dev->regs->ep_irqmsk);
 1208 				tmp &= AMD_UNMASK_BIT(ep->num);
 1209 				writel(tmp, &dev->regs->ep_irqmsk);
 1210 			}
 1211 		} else if (ep->in) {
 1212 				/* enable ep irq */
 1213 				tmp = readl(&dev->regs->ep_irqmsk);
 1214 				tmp &= AMD_UNMASK_BIT(ep->num);
 1215 				writel(tmp, &dev->regs->ep_irqmsk);
 1216 			}
 1217 
 1218 	} else if (ep->dma) {
 1219 
 1220 		/*
 1221 		 * prep_dma not used for OUT ep's, this is not possible
 1222 		 * for PPB modes, because of chain creation reasons
 1223 		 */
 1224 		if (ep->in) {
 1225 			retval = prep_dma(ep, req, GFP_ATOMIC);
 1226 			if (retval != 0)
 1227 				goto finished;
 1228 		}
 1229 	}
 1230 	VDBG(dev, "list_add\n");
 1231 	/* add request to ep queue */
 1232 	if (req) {
 1233 
 1234 		list_add_tail(&req->queue, &ep->queue);
 1235 
 1236 		/* open rxfifo if out data queued */
 1237 		if (open_rxfifo) {
 1238 			/* enable DMA */
 1239 			req->dma_going = 1;
 1240 			udc_set_rde(dev);
 1241 			if (ep->num != UDC_EP0OUT_IX)
 1242 				dev->data_ep_queued = 1;
 1243 		}
 1244 		/* stop OUT naking */
 1245 		if (!ep->in) {
 1246 			if (!use_dma && udc_rxfifo_pending) {
 1247 				DBG(dev, "udc_queue(): pending bytes in "
 1248 					"rxfifo after nyet\n");
 1249 				/*
 1250 				 * read pending bytes afer nyet:
 1251 				 * referring to isr
 1252 				 */
 1253 				if (udc_rxfifo_read(ep, req)) {
 1254 					/* finish */
 1255 					complete_req(ep, req, 0);
 1256 				}
 1257 				udc_rxfifo_pending = 0;
 1258 
 1259 			}
 1260 		}
 1261 	}
 1262 
 1263 finished:
 1264 	spin_unlock_irqrestore(&dev->lock, iflags);
 1265 	return retval;
 1266 }
 1267 
 1268 /* Empty request queue of an endpoint; caller holds spinlock */
 1269 static void empty_req_queue(struct udc_ep *ep)
 1270 {
 1271 	struct udc_request	*req;
 1272 
 1273 	ep->halted = 1;
 1274 	while (!list_empty(&ep->queue)) {
 1275 		req = list_entry(ep->queue.next,
 1276 			struct udc_request,
 1277 			queue);
 1278 		complete_req(ep, req, -ESHUTDOWN);
 1279 	}
 1280 }
 1281 
 1282 /* Dequeues a request packet, called by gadget driver */
 1283 static int udc_dequeue(struct usb_ep *usbep, struct usb_request *usbreq)
 1284 {
 1285 	struct udc_ep		*ep;
 1286 	struct udc_request	*req;
 1287 	unsigned		halted;
 1288 	unsigned long		iflags;
 1289 
 1290 	ep = container_of(usbep, struct udc_ep, ep);
 1291 	if (!usbep || !usbreq || (!ep->ep.desc && (ep->num != 0
 1292 				&& ep->num != UDC_EP0OUT_IX)))
 1293 		return -EINVAL;
 1294 
 1295 	req = container_of(usbreq, struct udc_request, req);
 1296 
 1297 	spin_lock_irqsave(&ep->dev->lock, iflags);
 1298 	halted = ep->halted;
 1299 	ep->halted = 1;
 1300 	/* request in processing or next one */
 1301 	if (ep->queue.next == &req->queue) {
 1302 		if (ep->dma && req->dma_going) {
 1303 			if (ep->in)
 1304 				ep->cancel_transfer = 1;
 1305 			else {
 1306 				u32 tmp;
 1307 				u32 dma_sts;
 1308 				/* stop potential receive DMA */
 1309 				tmp = readl(&udc->regs->ctl);
 1310 				writel(tmp & AMD_UNMASK_BIT(UDC_DEVCTL_RDE),
 1311 							&udc->regs->ctl);
 1312 				/*
 1313 				 * Cancel transfer later in ISR
 1314 				 * if descriptor was touched.
 1315 				 */
 1316 				dma_sts = AMD_GETBITS(req->td_data->status,
 1317 							UDC_DMA_OUT_STS_BS);
 1318 				if (dma_sts != UDC_DMA_OUT_STS_BS_HOST_READY)
 1319 					ep->cancel_transfer = 1;
 1320 				else {
 1321 					udc_init_bna_dummy(ep->req);
 1322 					writel(ep->bna_dummy_req->td_phys,
 1323 						&ep->regs->desptr);
 1324 				}
 1325 				writel(tmp, &udc->regs->ctl);
 1326 			}
 1327 		}
 1328 	}
 1329 	complete_req(ep, req, -ECONNRESET);
 1330 	ep->halted = halted;
 1331 
 1332 	spin_unlock_irqrestore(&ep->dev->lock, iflags);
 1333 	return 0;
 1334 }
 1335 
 1336 /* Halt or clear halt of endpoint */
 1337 static int
 1338 udc_set_halt(struct usb_ep *usbep, int halt)
 1339 {
 1340 	struct udc_ep	*ep;
 1341 	u32 tmp;
 1342 	unsigned long iflags;
 1343 	int retval = 0;
 1344 
 1345 	if (!usbep)
 1346 		return -EINVAL;
 1347 
 1348 	pr_debug("set_halt %s: halt=%d\n", usbep->name, halt);
 1349 
 1350 	ep = container_of(usbep, struct udc_ep, ep);
 1351 	if (!ep->ep.desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
 1352 		return -EINVAL;
 1353 	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
 1354 		return -ESHUTDOWN;
 1355 
 1356 	spin_lock_irqsave(&udc_stall_spinlock, iflags);
 1357 	/* halt or clear halt */
 1358 	if (halt) {
 1359 		if (ep->num == 0)
 1360 			ep->dev->stall_ep0in = 1;
 1361 		else {
 1362 			/*
 1363 			 * set STALL
 1364 			 * rxfifo empty not taken into acount
 1365 			 */
 1366 			tmp = readl(&ep->regs->ctl);
 1367 			tmp |= AMD_BIT(UDC_EPCTL_S);
 1368 			writel(tmp, &ep->regs->ctl);
 1369 			ep->halted = 1;
 1370 
 1371 			/* setup poll timer */
 1372 			if (!timer_pending(&udc_pollstall_timer)) {
 1373 				udc_pollstall_timer.expires = jiffies +
 1374 					HZ * UDC_POLLSTALL_TIMER_USECONDS
 1375 					/ (1000 * 1000);
 1376 				if (!stop_pollstall_timer) {
 1377 					DBG(ep->dev, "start polltimer\n");
 1378 					add_timer(&udc_pollstall_timer);
 1379 				}
 1380 			}
 1381 		}
 1382 	} else {
 1383 		/* ep is halted by set_halt() before */
 1384 		if (ep->halted) {
 1385 			tmp = readl(&ep->regs->ctl);
 1386 			/* clear stall bit */
 1387 			tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
 1388 			/* clear NAK by writing CNAK */
 1389 			tmp |= AMD_BIT(UDC_EPCTL_CNAK);
 1390 			writel(tmp, &ep->regs->ctl);
 1391 			ep->halted = 0;
 1392 			UDC_QUEUE_CNAK(ep, ep->num);
 1393 		}
 1394 	}
 1395 	spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
 1396 	return retval;
 1397 }
 1398 
 1399 /* gadget interface */
 1400 static const struct usb_ep_ops udc_ep_ops = {
 1401 	.enable		= udc_ep_enable,
 1402 	.disable	= udc_ep_disable,
 1403 
 1404 	.alloc_request	= udc_alloc_request,
 1405 	.free_request	= udc_free_request,
 1406 
 1407 	.queue		= udc_queue,
 1408 	.dequeue	= udc_dequeue,
 1409 
 1410 	.set_halt	= udc_set_halt,
 1411 	/* fifo ops not implemented */
 1412 };
 1413 
 1414 /*-------------------------------------------------------------------------*/
 1415 
 1416 /* Get frame counter (not implemented) */
 1417 static int udc_get_frame(struct usb_gadget *gadget)
 1418 {
 1419 	return -EOPNOTSUPP;
 1420 }
 1421 
 1422 /* Remote wakeup gadget interface */
 1423 static int udc_wakeup(struct usb_gadget *gadget)
 1424 {
 1425 	struct udc		*dev;
 1426 
 1427 	if (!gadget)
 1428 		return -EINVAL;
 1429 	dev = container_of(gadget, struct udc, gadget);
 1430 	udc_remote_wakeup(dev);
 1431 
 1432 	return 0;
 1433 }
 1434 
 1435 static int amd5536_udc_start(struct usb_gadget *g,
 1436 		struct usb_gadget_driver *driver);
 1437 static int amd5536_udc_stop(struct usb_gadget *g);
 1438 
 1439 static const struct usb_gadget_ops udc_ops = {
 1440 	.wakeup		= udc_wakeup,
 1441 	.get_frame	= udc_get_frame,
 1442 	.udc_start	= amd5536_udc_start,
 1443 	.udc_stop	= amd5536_udc_stop,
 1444 };
 1445 
 1446 /* Setups endpoint parameters, adds endpoints to linked list */
 1447 static void make_ep_lists(struct udc *dev)
 1448 {
 1449 	/* make gadget ep lists */
 1450 	INIT_LIST_HEAD(&dev->gadget.ep_list);
 1451 	list_add_tail(&dev->ep[UDC_EPIN_STATUS_IX].ep.ep_list,
 1452 						&dev->gadget.ep_list);
 1453 	list_add_tail(&dev->ep[UDC_EPIN_IX].ep.ep_list,
 1454 						&dev->gadget.ep_list);
 1455 	list_add_tail(&dev->ep[UDC_EPOUT_IX].ep.ep_list,
 1456 						&dev->gadget.ep_list);
 1457 
 1458 	/* fifo config */
 1459 	dev->ep[UDC_EPIN_STATUS_IX].fifo_depth = UDC_EPIN_SMALLINT_BUFF_SIZE;
 1460 	if (dev->gadget.speed == USB_SPEED_FULL)
 1461 		dev->ep[UDC_EPIN_IX].fifo_depth = UDC_FS_EPIN_BUFF_SIZE;
 1462 	else if (dev->gadget.speed == USB_SPEED_HIGH)
 1463 		dev->ep[UDC_EPIN_IX].fifo_depth = hs_tx_buf;
 1464 	dev->ep[UDC_EPOUT_IX].fifo_depth = UDC_RXFIFO_SIZE;
 1465 }
 1466 
 1467 /* init registers at driver load time */
 1468 static int startup_registers(struct udc *dev)
 1469 {
 1470 	u32 tmp;
 1471 
 1472 	/* init controller by soft reset */
 1473 	udc_soft_reset(dev);
 1474 
 1475 	/* mask not needed interrupts */
 1476 	udc_mask_unused_interrupts(dev);
 1477 
 1478 	/* put into initial config */
 1479 	udc_basic_init(dev);
 1480 	/* link up all endpoints */
 1481 	udc_setup_endpoints(dev);
 1482 
 1483 	/* program speed */
 1484 	tmp = readl(&dev->regs->cfg);
 1485 	if (use_fullspeed)
 1486 		tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
 1487 	else
 1488 		tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_HS, UDC_DEVCFG_SPD);
 1489 	writel(tmp, &dev->regs->cfg);
 1490 
 1491 	return 0;
 1492 }
 1493 
 1494 /* Inits UDC context */
 1495 static void udc_basic_init(struct udc *dev)
 1496 {
 1497 	u32	tmp;
 1498 
 1499 	DBG(dev, "udc_basic_init()\n");
 1500 
 1501 	dev->gadget.speed = USB_SPEED_UNKNOWN;
 1502 
 1503 	/* stop RDE timer */
 1504 	if (timer_pending(&udc_timer)) {
 1505 		set_rde = 0;
 1506 		mod_timer(&udc_timer, jiffies - 1);
 1507 	}
 1508 	/* stop poll stall timer */
 1509 	if (timer_pending(&udc_pollstall_timer))
 1510 		mod_timer(&udc_pollstall_timer, jiffies - 1);
 1511 	/* disable DMA */
 1512 	tmp = readl(&dev->regs->ctl);
 1513 	tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
 1514 	tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_TDE);
 1515 	writel(tmp, &dev->regs->ctl);
 1516 
 1517 	/* enable dynamic CSR programming */
 1518 	tmp = readl(&dev->regs->cfg);
 1519 	tmp |= AMD_BIT(UDC_DEVCFG_CSR_PRG);
 1520 	/* set self powered */
 1521 	tmp |= AMD_BIT(UDC_DEVCFG_SP);
 1522 	/* set remote wakeupable */
 1523 	tmp |= AMD_BIT(UDC_DEVCFG_RWKP);
 1524 	writel(tmp, &dev->regs->cfg);
 1525 
 1526 	make_ep_lists(dev);
 1527 
 1528 	dev->data_ep_enabled = 0;
 1529 	dev->data_ep_queued = 0;
 1530 }
 1531 
 1532 /* Sets initial endpoint parameters */
 1533 static void udc_setup_endpoints(struct udc *dev)
 1534 {
 1535 	struct udc_ep	*ep;
 1536 	u32	tmp;
 1537 	u32	reg;
 1538 
 1539 	DBG(dev, "udc_setup_endpoints()\n");
 1540 
 1541 	/* read enum speed */
 1542 	tmp = readl(&dev->regs->sts);
 1543 	tmp = AMD_GETBITS(tmp, UDC_DEVSTS_ENUM_SPEED);
 1544 	if (tmp == UDC_DEVSTS_ENUM_SPEED_HIGH)
 1545 		dev->gadget.speed = USB_SPEED_HIGH;
 1546 	else if (tmp == UDC_DEVSTS_ENUM_SPEED_FULL)
 1547 		dev->gadget.speed = USB_SPEED_FULL;
 1548 
 1549 	/* set basic ep parameters */
 1550 	for (tmp = 0; tmp < UDC_EP_NUM; tmp++) {
 1551 		ep = &dev->ep[tmp];
 1552 		ep->dev = dev;
 1553 		ep->ep.name = ep_string[tmp];
 1554 		ep->num = tmp;
 1555 		/* txfifo size is calculated at enable time */
 1556 		ep->txfifo = dev->txfifo;
 1557 
 1558 		/* fifo size */
 1559 		if (tmp < UDC_EPIN_NUM) {
 1560 			ep->fifo_depth = UDC_TXFIFO_SIZE;
 1561 			ep->in = 1;
 1562 		} else {
 1563 			ep->fifo_depth = UDC_RXFIFO_SIZE;
 1564 			ep->in = 0;
 1565 
 1566 		}
 1567 		ep->regs = &dev->ep_regs[tmp];
 1568 		/*
 1569 		 * ep will be reset only if ep was not enabled before to avoid
 1570 		 * disabling ep interrupts when ENUM interrupt occurs but ep is
 1571 		 * not enabled by gadget driver
 1572 		 */
 1573 		if (!ep->ep.desc)
 1574 			ep_init(dev->regs, ep);
 1575 
 1576 		if (use_dma) {
 1577 			/*
 1578 			 * ep->dma is not really used, just to indicate that
 1579 			 * DMA is active: remove this
 1580 			 * dma regs = dev control regs
 1581 			 */
 1582 			ep->dma = &dev->regs->ctl;
 1583 
 1584 			/* nak OUT endpoints until enable - not for ep0 */
 1585 			if (tmp != UDC_EP0IN_IX && tmp != UDC_EP0OUT_IX
 1586 						&& tmp > UDC_EPIN_NUM) {
 1587 				/* set NAK */
 1588 				reg = readl(&dev->ep[tmp].regs->ctl);
 1589 				reg |= AMD_BIT(UDC_EPCTL_SNAK);
 1590 				writel(reg, &dev->ep[tmp].regs->ctl);
 1591 				dev->ep[tmp].naking = 1;
 1592 
 1593 			}
 1594 		}
 1595 	}
 1596 	/* EP0 max packet */
 1597 	if (dev->gadget.speed == USB_SPEED_FULL) {
 1598 		usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IX].ep,
 1599 					   UDC_FS_EP0IN_MAX_PKT_SIZE);
 1600 		usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IX].ep,
 1601 					   UDC_FS_EP0OUT_MAX_PKT_SIZE);
 1602 	} else if (dev->gadget.speed == USB_SPEED_HIGH) {
 1603 		usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IX].ep,
 1604 					   UDC_EP0IN_MAX_PKT_SIZE);
 1605 		usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IX].ep,
 1606 					   UDC_EP0OUT_MAX_PKT_SIZE);
 1607 	}
 1608 
 1609 	/*
 1610 	 * with suspend bug workaround, ep0 params for gadget driver
 1611 	 * are set at gadget driver bind() call
 1612 	 */
 1613 	dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
 1614 	dev->ep[UDC_EP0IN_IX].halted = 0;
 1615 	INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
 1616 
 1617 	/* init cfg/alt/int */
 1618 	dev->cur_config = 0;
 1619 	dev->cur_intf = 0;
 1620 	dev->cur_alt = 0;
 1621 }
 1622 
 1623 /* Bringup after Connect event, initial bringup to be ready for ep0 events */
 1624 static void usb_connect(struct udc *dev)
 1625 {
 1626 
 1627 	dev_info(&dev->pdev->dev, "USB Connect\n");
 1628 
 1629 	dev->connected = 1;
 1630 
 1631 	/* put into initial config */
 1632 	udc_basic_init(dev);
 1633 
 1634 	/* enable device setup interrupts */
 1635 	udc_enable_dev_setup_interrupts(dev);
 1636 }
 1637 
 1638 /*
 1639  * Calls gadget with disconnect event and resets the UDC and makes
 1640  * initial bringup to be ready for ep0 events
 1641  */
 1642 static void usb_disconnect(struct udc *dev)
 1643 {
 1644 
 1645 	dev_info(&dev->pdev->dev, "USB Disconnect\n");
 1646 
 1647 	dev->connected = 0;
 1648 
 1649 	/* mask interrupts */
 1650 	udc_mask_unused_interrupts(dev);
 1651 
 1652 	/* REVISIT there doesn't seem to be a point to having this
 1653 	 * talk to a tasklet ... do it directly, we already hold
 1654 	 * the spinlock needed to process the disconnect.
 1655 	 */
 1656 
 1657 	tasklet_schedule(&disconnect_tasklet);
 1658 }
 1659 
 1660 /* Tasklet for disconnect to be outside of interrupt context */
 1661 static void udc_tasklet_disconnect(unsigned long par)
 1662 {
 1663 	struct udc *dev = (struct udc *)(*((struct udc **) par));
 1664 	u32 tmp;
 1665 
 1666 	DBG(dev, "Tasklet disconnect\n");
 1667 	spin_lock_irq(&dev->lock);
 1668 
 1669 	if (dev->driver) {
 1670 		spin_unlock(&dev->lock);
 1671 		dev->driver->disconnect(&dev->gadget);
 1672 		spin_lock(&dev->lock);
 1673 
 1674 		/* empty queues */
 1675 		for (tmp = 0; tmp < UDC_EP_NUM; tmp++)
 1676 			empty_req_queue(&dev->ep[tmp]);
 1677 
 1678 	}
 1679 
 1680 	/* disable ep0 */
 1681 	ep_init(dev->regs,
 1682 			&dev->ep[UDC_EP0IN_IX]);
 1683 
 1684 
 1685 	if (!soft_reset_occured) {
 1686 		/* init controller by soft reset */
 1687 		udc_soft_reset(dev);
 1688 		soft_reset_occured++;
 1689 	}
 1690 
 1691 	/* re-enable dev interrupts */
 1692 	udc_enable_dev_setup_interrupts(dev);
 1693 	/* back to full speed ? */
 1694 	if (use_fullspeed) {
 1695 		tmp = readl(&dev->regs->cfg);
 1696 		tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
 1697 		writel(tmp, &dev->regs->cfg);
 1698 	}
 1699 
 1700 	spin_unlock_irq(&dev->lock);
 1701 }
 1702 
 1703 /* Reset the UDC core */
 1704 static void udc_soft_reset(struct udc *dev)
 1705 {
 1706 	unsigned long	flags;
 1707 
 1708 	DBG(dev, "Soft reset\n");
 1709 	/*
 1710 	 * reset possible waiting interrupts, because int.
 1711 	 * status is lost after soft reset,
 1712 	 * ep int. status reset
 1713 	 */
 1714 	writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqsts);
 1715 	/* device int. status reset */
 1716 	writel(UDC_DEV_MSK_DISABLE, &dev->regs->irqsts);
 1717 
 1718 	spin_lock_irqsave(&udc_irq_spinlock, flags);
 1719 	writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
 1720 	readl(&dev->regs->cfg);
 1721 	spin_unlock_irqrestore(&udc_irq_spinlock, flags);
 1722 
 1723 }
 1724 
 1725 /* RDE timer callback to set RDE bit */
 1726 static void udc_timer_function(unsigned long v)
 1727 {
 1728 	u32 tmp;
 1729 
 1730 	spin_lock_irq(&udc_irq_spinlock);
 1731 
 1732 	if (set_rde > 0) {
 1733 		/*
 1734 		 * open the fifo if fifo was filled on last timer call
 1735 		 * conditionally
 1736 		 */
 1737 		if (set_rde > 1) {
 1738 			/* set RDE to receive setup data */
 1739 			tmp = readl(&udc->regs->ctl);
 1740 			tmp |= AMD_BIT(UDC_DEVCTL_RDE);
 1741 			writel(tmp, &udc->regs->ctl);
 1742 			set_rde = -1;
 1743 		} else if (readl(&udc->regs->sts)
 1744 				& AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) {
 1745 			/*
 1746 			 * if fifo empty setup polling, do not just
 1747 			 * open the fifo
 1748 			 */
 1749 			udc_timer.expires = jiffies + HZ/UDC_RDE_TIMER_DIV;
 1750 			if (!stop_timer)
 1751 				add_timer(&udc_timer);
 1752 		} else {
 1753 			/*
 1754 			 * fifo contains data now, setup timer for opening
 1755 			 * the fifo when timer expires to be able to receive
 1756 			 * setup packets, when data packets gets queued by
 1757 			 * gadget layer then timer will forced to expire with
 1758 			 * set_rde=0 (RDE is set in udc_queue())
 1759 			 */
 1760 			set_rde++;
 1761 			/* debug: lhadmot_timer_start = 221070 */
 1762 			udc_timer.expires = jiffies + HZ*UDC_RDE_TIMER_SECONDS;
 1763 			if (!stop_timer)
 1764 				add_timer(&udc_timer);
 1765 		}
 1766 
 1767 	} else
 1768 		set_rde = -1; /* RDE was set by udc_queue() */
 1769 	spin_unlock_irq(&udc_irq_spinlock);
 1770 	if (stop_timer)
 1771 		complete(&on_exit);
 1772 
 1773 }
 1774 
 1775 /* Handle halt state, used in stall poll timer */
 1776 static void udc_handle_halt_state(struct udc_ep *ep)
 1777 {
 1778 	u32 tmp;
 1779 	/* set stall as long not halted */
 1780 	if (ep->halted == 1) {
 1781 		tmp = readl(&ep->regs->ctl);
 1782 		/* STALL cleared ? */
 1783 		if (!(tmp & AMD_BIT(UDC_EPCTL_S))) {
 1784 			/*
 1785 			 * FIXME: MSC spec requires that stall remains
 1786 			 * even on receivng of CLEAR_FEATURE HALT. So
 1787 			 * we would set STALL again here to be compliant.
 1788 			 * But with current mass storage drivers this does
 1789 			 * not work (would produce endless host retries).
 1790 			 * So we clear halt on CLEAR_FEATURE.
 1791 			 *
 1792 			DBG(ep->dev, "ep %d: set STALL again\n", ep->num);
 1793 			tmp |= AMD_BIT(UDC_EPCTL_S);
 1794 			writel(tmp, &ep->regs->ctl);*/
 1795 
 1796 			/* clear NAK by writing CNAK */
 1797 			tmp |= AMD_BIT(UDC_EPCTL_CNAK);
 1798 			writel(tmp, &ep->regs->ctl);
 1799 			ep->halted = 0;
 1800 			UDC_QUEUE_CNAK(ep, ep->num);
 1801 		}
 1802 	}
 1803 }
 1804 
 1805 /* Stall timer callback to poll S bit and set it again after */
 1806 static void udc_pollstall_timer_function(unsigned long v)
 1807 {
 1808 	struct udc_ep *ep;
 1809 	int halted = 0;
 1810 
 1811 	spin_lock_irq(&udc_stall_spinlock);
 1812 	/*
 1813 	 * only one IN and OUT endpoints are handled
 1814 	 * IN poll stall
 1815 	 */
 1816 	ep = &udc->ep[UDC_EPIN_IX];
 1817 	udc_handle_halt_state(ep);
 1818 	if (ep->halted)
 1819 		halted = 1;
 1820 	/* OUT poll stall */
 1821 	ep = &udc->ep[UDC_EPOUT_IX];
 1822 	udc_handle_halt_state(ep);
 1823 	if (ep->halted)
 1824 		halted = 1;
 1825 
 1826 	/* setup timer again when still halted */
 1827 	if (!stop_pollstall_timer && halted) {
 1828 		udc_pollstall_timer.expires = jiffies +
 1829 					HZ * UDC_POLLSTALL_TIMER_USECONDS
 1830 					/ (1000 * 1000);
 1831 		add_timer(&udc_pollstall_timer);
 1832 	}
 1833 	spin_unlock_irq(&udc_stall_spinlock);
 1834 
 1835 	if (stop_pollstall_timer)
 1836 		complete(&on_pollstall_exit);
 1837 }
 1838 
 1839 /* Inits endpoint 0 so that SETUP packets are processed */
 1840 static void activate_control_endpoints(struct udc *dev)
 1841 {
 1842 	u32 tmp;
 1843 
 1844 	DBG(dev, "activate_control_endpoints\n");
 1845 
 1846 	/* flush fifo */
 1847 	tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
 1848 	tmp |= AMD_BIT(UDC_EPCTL_F);
 1849 	writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
 1850 
 1851 	/* set ep0 directions */
 1852 	dev->ep[UDC_EP0IN_IX].in = 1;
 1853 	dev->ep[UDC_EP0OUT_IX].in = 0;
 1854 
 1855 	/* set buffer size (tx fifo entries) of EP0_IN */
 1856 	tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
 1857 	if (dev->gadget.speed == USB_SPEED_FULL)
 1858 		tmp = AMD_ADDBITS(tmp, UDC_FS_EPIN0_BUFF_SIZE,
 1859 					UDC_EPIN_BUFF_SIZE);
 1860 	else if (dev->gadget.speed == USB_SPEED_HIGH)
 1861 		tmp = AMD_ADDBITS(tmp, UDC_EPIN0_BUFF_SIZE,
 1862 					UDC_EPIN_BUFF_SIZE);
 1863 	writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
 1864 
 1865 	/* set max packet size of EP0_IN */
 1866 	tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
 1867 	if (dev->gadget.speed == USB_SPEED_FULL)
 1868 		tmp = AMD_ADDBITS(tmp, UDC_FS_EP0IN_MAX_PKT_SIZE,
 1869 					UDC_EP_MAX_PKT_SIZE);
 1870 	else if (dev->gadget.speed == USB_SPEED_HIGH)
 1871 		tmp = AMD_ADDBITS(tmp, UDC_EP0IN_MAX_PKT_SIZE,
 1872 				UDC_EP_MAX_PKT_SIZE);
 1873 	writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
 1874 
 1875 	/* set max packet size of EP0_OUT */
 1876 	tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
 1877 	if (dev->gadget.speed == USB_SPEED_FULL)
 1878 		tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
 1879 					UDC_EP_MAX_PKT_SIZE);
 1880 	else if (dev->gadget.speed == USB_SPEED_HIGH)
 1881 		tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
 1882 					UDC_EP_MAX_PKT_SIZE);
 1883 	writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
 1884 
 1885 	/* set max packet size of EP0 in UDC CSR */
 1886 	tmp = readl(&dev->csr->ne[0]);
 1887 	if (dev->gadget.speed == USB_SPEED_FULL)
 1888 		tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
 1889 					UDC_CSR_NE_MAX_PKT);
 1890 	else if (dev->gadget.speed == USB_SPEED_HIGH)
 1891 		tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
 1892 					UDC_CSR_NE_MAX_PKT);
 1893 	writel(tmp, &dev->csr->ne[0]);
 1894 
 1895 	if (use_dma) {
 1896 		dev->ep[UDC_EP0OUT_IX].td->status |=
 1897 			AMD_BIT(UDC_DMA_OUT_STS_L);
 1898 		/* write dma desc address */
 1899 		writel(dev->ep[UDC_EP0OUT_IX].td_stp_dma,
 1900 			&dev->ep[UDC_EP0OUT_IX].regs->subptr);
 1901 		writel(dev->ep[UDC_EP0OUT_IX].td_phys,
 1902 			&dev->ep[UDC_EP0OUT_IX].regs->desptr);
 1903 		/* stop RDE timer */
 1904 		if (timer_pending(&udc_timer)) {
 1905 			set_rde = 0;
 1906 			mod_timer(&udc_timer, jiffies - 1);
 1907 		}
 1908 		/* stop pollstall timer */
 1909 		if (timer_pending(&udc_pollstall_timer))
 1910 			mod_timer(&udc_pollstall_timer, jiffies - 1);
 1911 		/* enable DMA */
 1912 		tmp = readl(&dev->regs->ctl);
 1913 		tmp |= AMD_BIT(UDC_DEVCTL_MODE)
 1914 				| AMD_BIT(UDC_DEVCTL_RDE)
 1915 				| AMD_BIT(UDC_DEVCTL_TDE);
 1916 		if (use_dma_bufferfill_mode)
 1917 			tmp |= AMD_BIT(UDC_DEVCTL_BF);
 1918 		else if (use_dma_ppb_du)
 1919 			tmp |= AMD_BIT(UDC_DEVCTL_DU);
 1920 		writel(tmp, &dev->regs->ctl);
 1921 	}
 1922 
 1923 	/* clear NAK by writing CNAK for EP0IN */
 1924 	tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
 1925 	tmp |= AMD_BIT(UDC_EPCTL_CNAK);
 1926 	writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
 1927 	dev->ep[UDC_EP0IN_IX].naking = 0;
 1928 	UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
 1929 
 1930 	/* clear NAK by writing CNAK for EP0OUT */
 1931 	tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
 1932 	tmp |= AMD_BIT(UDC_EPCTL_CNAK);
 1933 	writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
 1934 	dev->ep[UDC_EP0OUT_IX].naking = 0;
 1935 	UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
 1936 }
 1937 
 1938 /* Make endpoint 0 ready for control traffic */
 1939 static int setup_ep0(struct udc *dev)
 1940 {
 1941 	activate_control_endpoints(dev);
 1942 	/* enable ep0 interrupts */
 1943 	udc_enable_ep0_interrupts(dev);
 1944 	/* enable device setup interrupts */
 1945 	udc_enable_dev_setup_interrupts(dev);
 1946 
 1947 	return 0;
 1948 }
 1949 
 1950 /* Called by gadget driver to register itself */
 1951 static int amd5536_udc_start(struct usb_gadget *g,
 1952 		struct usb_gadget_driver *driver)
 1953 {
 1954 	struct udc *dev = to_amd5536_udc(g);
 1955 	u32 tmp;
 1956 
 1957 	driver->driver.bus = NULL;
 1958 	dev->driver = driver;
 1959 
 1960 	/* Some gadget drivers use both ep0 directions.
 1961 	 * NOTE: to gadget driver, ep0 is just one endpoint...
 1962 	 */
 1963 	dev->ep[UDC_EP0OUT_IX].ep.driver_data =
 1964 		dev->ep[UDC_EP0IN_IX].ep.driver_data;
 1965 
 1966 	/* get ready for ep0 traffic */
 1967 	setup_ep0(dev);
 1968 
 1969 	/* clear SD */
 1970 	tmp = readl(&dev->regs->ctl);
 1971 	tmp = tmp & AMD_CLEAR_BIT(UDC_DEVCTL_SD);
 1972 	writel(tmp, &dev->regs->ctl);
 1973 
 1974 	usb_connect(dev);
 1975 
 1976 	return 0;
 1977 }
 1978 
 1979 /* shutdown requests and disconnect from gadget */
 1980 static void
 1981 shutdown(struct udc *dev, struct usb_gadget_driver *driver)
 1982 __releases(dev->lock)
 1983 __acquires(dev->lock)
 1984 {
 1985 	int tmp;
 1986 
 1987 	/* empty queues and init hardware */
 1988 	udc_basic_init(dev);
 1989 
 1990 	for (tmp = 0; tmp < UDC_EP_NUM; tmp++)
 1991 		empty_req_queue(&dev->ep[tmp]);
 1992 
 1993 	udc_setup_endpoints(dev);
 1994 }
 1995 
 1996 /* Called by gadget driver to unregister itself */
 1997 static int amd5536_udc_stop(struct usb_gadget *g)
 1998 {
 1999 	struct udc *dev = to_amd5536_udc(g);
 2000 	unsigned long flags;
 2001 	u32 tmp;
 2002 
 2003 	spin_lock_irqsave(&dev->lock, flags);
 2004 	udc_mask_unused_interrupts(dev);
 2005 	shutdown(dev, NULL);
 2006 	spin_unlock_irqrestore(&dev->lock, flags);
 2007 
 2008 	dev->driver = NULL;
 2009 
 2010 	/* set SD */
 2011 	tmp = readl(&dev->regs->ctl);
 2012 	tmp |= AMD_BIT(UDC_DEVCTL_SD);
 2013 	writel(tmp, &dev->regs->ctl);
 2014 
 2015 	return 0;
 2016 }
 2017 
 2018 /* Clear pending NAK bits */
 2019 static void udc_process_cnak_queue(struct udc *dev)
 2020 {
 2021 	u32 tmp;
 2022 	u32 reg;
 2023 
 2024 	/* check epin's */
 2025 	DBG(dev, "CNAK pending queue processing\n");
 2026 	for (tmp = 0; tmp < UDC_EPIN_NUM_USED; tmp++) {
 2027 		if (cnak_pending & (1 << tmp)) {
 2028 			DBG(dev, "CNAK pending for ep%d\n", tmp);
 2029 			/* clear NAK by writing CNAK */
 2030 			reg = readl(&dev->ep[tmp].regs->ctl);
 2031 			reg |= AMD_BIT(UDC_EPCTL_CNAK);
 2032 			writel(reg, &dev->ep[tmp].regs->ctl);
 2033 			dev->ep[tmp].naking = 0;
 2034 			UDC_QUEUE_CNAK(&dev->ep[tmp], dev->ep[tmp].num);
 2035 		}
 2036 	}
 2037 	/* ...	and ep0out */
 2038 	if (cnak_pending & (1 << UDC_EP0OUT_IX)) {
 2039 		DBG(dev, "CNAK pending for ep%d\n", UDC_EP0OUT_IX);
 2040 		/* clear NAK by writing CNAK */
 2041 		reg = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
 2042 		reg |= AMD_BIT(UDC_EPCTL_CNAK);
 2043 		writel(reg, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
 2044 		dev->ep[UDC_EP0OUT_IX].naking = 0;
 2045 		UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX],
 2046 				dev->ep[UDC_EP0OUT_IX].num);
 2047 	}
 2048 }
 2049 
 2050 /* Enabling RX DMA after setup packet */
 2051 static void udc_ep0_set_rde(struct udc *dev)
 2052 {
 2053 	if (use_dma) {
 2054 		/*
 2055 		 * only enable RXDMA when no data endpoint enabled
 2056 		 * or data is queued
 2057 		 */
 2058 		if (!dev->data_ep_enabled || dev->data_ep_queued) {
 2059 			udc_set_rde(dev);
 2060 		} else {
 2061 			/*
 2062 			 * setup timer for enabling RDE (to not enable
 2063 			 * RXFIFO DMA for data endpoints to early)
 2064 			 */
 2065 			if (set_rde != 0 && !timer_pending(&udc_timer)) {
 2066 				udc_timer.expires =
 2067 					jiffies + HZ/UDC_RDE_TIMER_DIV;
 2068 				set_rde = 1;
 2069 				if (!stop_timer)
 2070 					add_timer(&udc_timer);
 2071 			}
 2072 		}
 2073 	}
 2074 }
 2075 
 2076 
 2077 /* Interrupt handler for data OUT traffic */
 2078 static irqreturn_t udc_data_out_isr(struct udc *dev, int ep_ix)
 2079 {
 2080 	irqreturn_t		ret_val = IRQ_NONE;
 2081 	u32			tmp;
 2082 	struct udc_ep		*ep;
 2083 	struct udc_request	*req;
 2084 	unsigned int		count;
 2085 	struct udc_data_dma	*td = NULL;
 2086 	unsigned		dma_done;
 2087 
 2088 	VDBG(dev, "ep%d irq\n", ep_ix);
 2089 	ep = &dev->ep[ep_ix];
 2090 
 2091 	tmp = readl(&ep->regs->sts);
 2092 	if (use_dma) {
 2093 		/* BNA event ? */
 2094 		if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
 2095 			DBG(dev, "BNA ep%dout occurred - DESPTR = %x\n",
 2096 					ep->num, readl(&ep->regs->desptr));
 2097 			/* clear BNA */
 2098 			writel(tmp | AMD_BIT(UDC_EPSTS_BNA), &ep->regs->sts);
 2099 			if (!ep->cancel_transfer)
 2100 				ep->bna_occurred = 1;
 2101 			else
 2102 				ep->cancel_transfer = 0;
 2103 			ret_val = IRQ_HANDLED;
 2104 			goto finished;
 2105 		}
 2106 	}
 2107 	/* HE event ? */
 2108 	if (tmp & AMD_BIT(UDC_EPSTS_HE)) {
 2109 		dev_err(&dev->pdev->dev, "HE ep%dout occurred\n", ep->num);
 2110 
 2111 		/* clear HE */
 2112 		writel(tmp | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
 2113 		ret_val = IRQ_HANDLED;
 2114 		goto finished;
 2115 	}
 2116 
 2117 	if (!list_empty(&ep->queue)) {
 2118 
 2119 		/* next request */
 2120 		req = list_entry(ep->queue.next,
 2121 			struct udc_request, queue);
 2122 	} else {
 2123 		req = NULL;
 2124 		udc_rxfifo_pending = 1;
 2125 	}
 2126 	VDBG(dev, "req = %p\n", req);
 2127 	/* fifo mode */
 2128 	if (!use_dma) {
 2129 
 2130 		/* read fifo */
 2131 		if (req && udc_rxfifo_read(ep, req)) {
 2132 			ret_val = IRQ_HANDLED;
 2133 
 2134 			/* finish */
 2135 			complete_req(ep, req, 0);
 2136 			/* next request */
 2137 			if (!list_empty(&ep->queue) && !ep->halted) {
 2138 				req = list_entry(ep->queue.next,
 2139 					struct udc_request, queue);
 2140 			} else
 2141 				req = NULL;
 2142 		}
 2143 
 2144 	/* DMA */
 2145 	} else if (!ep->cancel_transfer && req != NULL) {
 2146 		ret_val = IRQ_HANDLED;
 2147 
 2148 		/* check for DMA done */
 2149 		if (!use_dma_ppb) {
 2150 			dma_done = AMD_GETBITS(req->td_data->status,
 2151 						UDC_DMA_OUT_STS_BS);
 2152 		/* packet per buffer mode - rx bytes */
 2153 		} else {
 2154 			/*
 2155 			 * if BNA occurred then recover desc. from
 2156 			 * BNA dummy desc.
 2157 			 */
 2158 			if (ep->bna_occurred) {
 2159 				VDBG(dev, "Recover desc. from BNA dummy\n");
 2160 				memcpy(req->td_data, ep->bna_dummy_req->td_data,
 2161 						sizeof(struct udc_data_dma));
 2162 				ep->bna_occurred = 0;
 2163 				udc_init_bna_dummy(ep->req);
 2164 			}
 2165 			td = udc_get_last_dma_desc(req);
 2166 			dma_done = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_BS);
 2167 		}
 2168 		if (dma_done == UDC_DMA_OUT_STS_BS_DMA_DONE) {
 2169 			/* buffer fill mode - rx bytes */
 2170 			if (!use_dma_ppb) {
 2171 				/* received number bytes */
 2172 				count = AMD_GETBITS(req->td_data->status,
 2173 						UDC_DMA_OUT_STS_RXBYTES);
 2174 				VDBG(dev, "rx bytes=%u\n", count);
 2175 			/* packet per buffer mode - rx bytes */
 2176 			} else {
 2177 				VDBG(dev, "req->td_data=%p\n", req->td_data);
 2178 				VDBG(dev, "last desc = %p\n", td);
 2179 				/* received number bytes */
 2180 				if (use_dma_ppb_du) {
 2181 					/* every desc. counts bytes */
 2182 					count = udc_get_ppbdu_rxbytes(req);
 2183 				} else {
 2184 					/* last desc. counts bytes */
 2185 					count = AMD_GETBITS(td->status,
 2186 						UDC_DMA_OUT_STS_RXBYTES);
 2187 					if (!count && req->req.length
 2188 						== UDC_DMA_MAXPACKET) {
 2189 						/*
 2190 						 * on 64k packets the RXBYTES
 2191 						 * field is zero
 2192 						 */
 2193 						count = UDC_DMA_MAXPACKET;
 2194 					}
 2195 				}
 2196 				VDBG(dev, "last desc rx bytes=%u\n", count);
 2197 			}
 2198 
 2199 			tmp = req->req.length - req->req.actual;
 2200 			if (count > tmp) {
 2201 				if ((tmp % ep->ep.maxpacket) != 0) {
 2202 					DBG(dev, "%s: rx %db, space=%db\n",
 2203 						ep->ep.name, count, tmp);
 2204 					req->req.status = -EOVERFLOW;
 2205 				}
 2206 				count = tmp;
 2207 			}
 2208 			req->req.actual += count;
 2209 			req->dma_going = 0;
 2210 			/* complete request */
 2211 			complete_req(ep, req, 0);
 2212 
 2213 			/* next request */
 2214 			if (!list_empty(&ep->queue) && !ep->halted) {
 2215 				req = list_entry(ep->queue.next,
 2216 					struct udc_request,
 2217 					queue);
 2218 				/*
 2219 				 * DMA may be already started by udc_queue()
 2220 				 * called by gadget drivers completion
 2221 				 * routine. This happens when queue
 2222 				 * holds one request only.
 2223 				 */
 2224 				if (req->dma_going == 0) {
 2225 					/* next dma */
 2226 					if (prep_dma(ep, req, GFP_ATOMIC) != 0)
 2227 						goto finished;
 2228 					/* write desc pointer */
 2229 					writel(req->td_phys,
 2230 						&ep->regs->desptr);
 2231 					req->dma_going = 1;
 2232 					/* enable DMA */
 2233 					udc_set_rde(dev);
 2234 				}
 2235 			} else {
 2236 				/*
 2237 				 * implant BNA dummy descriptor to allow
 2238 				 * RXFIFO opening by RDE
 2239 				 */
 2240 				if (ep->bna_dummy_req) {
 2241 					/* write desc pointer */
 2242 					writel(ep->bna_dummy_req->td_phys,
 2243 						&ep->regs->desptr);
 2244 					ep->bna_occurred = 0;
 2245 				}
 2246 
 2247 				/*
 2248 				 * schedule timer for setting RDE if queue
 2249 				 * remains empty to allow ep0 packets pass
 2250 				 * through
 2251 				 */
 2252 				if (set_rde != 0
 2253 						&& !timer_pending(&udc_timer)) {
 2254 					udc_timer.expires =
 2255 						jiffies
 2256 						+ HZ*UDC_RDE_TIMER_SECONDS;
 2257 					set_rde = 1;
 2258 					if (!stop_timer)
 2259 						add_timer(&udc_timer);
 2260 				}
 2261 				if (ep->num != UDC_EP0OUT_IX)
 2262 					dev->data_ep_queued = 0;
 2263 			}
 2264 
 2265 		} else {
 2266 			/*
 2267 			* RX DMA must be reenabled for each desc in PPBDU mode
 2268 			* and must be enabled for PPBNDU mode in case of BNA
 2269 			*/
 2270 			udc_set_rde(dev);
 2271 		}
 2272 
 2273 	} else if (ep->cancel_transfer) {
 2274 		ret_val = IRQ_HANDLED;
 2275 		ep->cancel_transfer = 0;
 2276 	}
 2277 
 2278 	/* check pending CNAKS */
 2279 	if (cnak_pending) {
 2280 		/* CNAk processing when rxfifo empty only */
 2281 		if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
 2282 			udc_process_cnak_queue(dev);
 2283 	}
 2284 
 2285 	/* clear OUT bits in ep status */
 2286 	writel(UDC_EPSTS_OUT_CLEAR, &ep->regs->sts);
 2287 finished:
 2288 	return ret_val;
 2289 }
 2290 
 2291 /* Interrupt handler for data IN traffic */
 2292 static irqreturn_t udc_data_in_isr(struct udc *dev, int ep_ix)
 2293 {
 2294 	irqreturn_t ret_val = IRQ_NONE;
 2295 	u32 tmp;
 2296 	u32 epsts;
 2297 	struct udc_ep *ep;
 2298 	struct udc_request *req;
 2299 	struct udc_data_dma *td;
 2300 	unsigned dma_done;
 2301 	unsigned len;
 2302 
 2303 	ep = &dev->ep[ep_ix];
 2304 
 2305 	epsts = readl(&ep->regs->sts);
 2306 	if (use_dma) {
 2307 		/* BNA ? */
 2308 		if (epsts & AMD_BIT(UDC_EPSTS_BNA)) {
 2309 			dev_err(&dev->pdev->dev,
 2310 				"BNA ep%din occurred - DESPTR = %08lx\n",
 2311 				ep->num,
 2312 				(unsigned long) readl(&ep->regs->desptr));
 2313 
 2314 			/* clear BNA */
 2315 			writel(epsts, &ep->regs->sts);
 2316 			ret_val = IRQ_HANDLED;
 2317 			goto finished;
 2318 		}
 2319 	}
 2320 	/* HE event ? */
 2321 	if (epsts & AMD_BIT(UDC_EPSTS_HE)) {
 2322 		dev_err(&dev->pdev->dev,
 2323 			"HE ep%dn occurred - DESPTR = %08lx\n",
 2324 			ep->num, (unsigned long) readl(&ep->regs->desptr));
 2325 
 2326 		/* clear HE */
 2327 		writel(epsts | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
 2328 		ret_val = IRQ_HANDLED;
 2329 		goto finished;
 2330 	}
 2331 
 2332 	/* DMA completion */
 2333 	if (epsts & AMD_BIT(UDC_EPSTS_TDC)) {
 2334 		VDBG(dev, "TDC set- completion\n");
 2335 		ret_val = IRQ_HANDLED;
 2336 		if (!ep->cancel_transfer && !list_empty(&ep->queue)) {
 2337 			req = list_entry(ep->queue.next,
 2338 					struct udc_request, queue);
 2339 			/*
 2340 			 * length bytes transferred
 2341 			 * check dma done of last desc. in PPBDU mode
 2342 			 */
 2343 			if (use_dma_ppb_du) {
 2344 				td = udc_get_last_dma_desc(req);
 2345 				if (td) {
 2346 					dma_done =
 2347 						AMD_GETBITS(td->status,
 2348 						UDC_DMA_IN_STS_BS);
 2349 					/* don't care DMA done */
 2350 					req->req.actual = req->req.length;
 2351 				}
 2352 			} else {
 2353 				/* assume all bytes transferred */
 2354 				req->req.actual = req->req.length;
 2355 			}
 2356 
 2357 			if (req->req.actual == req->req.length) {
 2358 				/* complete req */
 2359 				complete_req(ep, req, 0);
 2360 				req->dma_going = 0;
 2361 				/* further request available ? */
 2362 				if (list_empty(&ep->queue)) {
 2363 					/* disable interrupt */
 2364 					tmp = readl(&dev->regs->ep_irqmsk);
 2365 					tmp |= AMD_BIT(ep->num);
 2366 					writel(tmp, &dev->regs->ep_irqmsk);
 2367 				}
 2368 			}
 2369 		}
 2370 		ep->cancel_transfer = 0;
 2371 
 2372 	}
 2373 	/*
 2374 	 * status reg has IN bit set and TDC not set (if TDC was handled,
 2375 	 * IN must not be handled (UDC defect) ?
 2376 	 */
 2377 	if ((epsts & AMD_BIT(UDC_EPSTS_IN))
 2378 			&& !(epsts & AMD_BIT(UDC_EPSTS_TDC))) {
 2379 		ret_val = IRQ_HANDLED;
 2380 		if (!list_empty(&ep->queue)) {
 2381 			/* next request */
 2382 			req = list_entry(ep->queue.next,
 2383 					struct udc_request, queue);
 2384 			/* FIFO mode */
 2385 			if (!use_dma) {
 2386 				/* write fifo */
 2387 				udc_txfifo_write(ep, &req->req);
 2388 				len = req->req.length - req->req.actual;
 2389 				if (len > ep->ep.maxpacket)
 2390 					len = ep->ep.maxpacket;
 2391 				req->req.actual += len;
 2392 				if (req->req.actual == req->req.length
 2393 					|| (len != ep->ep.maxpacket)) {
 2394 					/* complete req */
 2395 					complete_req(ep, req, 0);
 2396 				}
 2397 			/* DMA */
 2398 			} else if (req && !req->dma_going) {
 2399 				VDBG(dev, "IN DMA : req=%p req->td_data=%p\n",
 2400 					req, req->td_data);
 2401 				if (req->td_data) {
 2402 
 2403 					req->dma_going = 1;
 2404 
 2405 					/*
 2406 					 * unset L bit of first desc.
 2407 					 * for chain
 2408 					 */
 2409 					if (use_dma_ppb && req->req.length >
 2410 							ep->ep.maxpacket) {
 2411 						req->td_data->status &=
 2412 							AMD_CLEAR_BIT(
 2413 							UDC_DMA_IN_STS_L);
 2414 					}
 2415 
 2416 					/* write desc pointer */
 2417 					writel(req->td_phys, &ep->regs->desptr);
 2418 
 2419 					/* set HOST READY */
 2420 					req->td_data->status =
 2421 						AMD_ADDBITS(
 2422 						req->td_data->status,
 2423 						UDC_DMA_IN_STS_BS_HOST_READY,
 2424 						UDC_DMA_IN_STS_BS);
 2425 
 2426 					/* set poll demand bit */
 2427 					tmp = readl(&ep->regs->ctl);
 2428 					tmp |= AMD_BIT(UDC_EPCTL_P);
 2429 					writel(tmp, &ep->regs->ctl);
 2430 				}
 2431 			}
 2432 
 2433 		} else if (!use_dma && ep->in) {
 2434 			/* disable interrupt */
 2435 			tmp = readl(
 2436 				&dev->regs->ep_irqmsk);
 2437 			tmp |= AMD_BIT(ep->num);
 2438 			writel(tmp,
 2439 				&dev->regs->ep_irqmsk);
 2440 		}
 2441 	}
 2442 	/* clear status bits */
 2443 	writel(epsts, &ep->regs->sts);
 2444 
 2445 finished:
 2446 	return ret_val;
 2447 
 2448 }
 2449 
 2450 /* Interrupt handler for Control OUT traffic */
 2451 static irqreturn_t udc_control_out_isr(struct udc *dev)
 2452 __releases(dev->lock)
 2453 __acquires(dev->lock)
 2454 {
 2455 	irqreturn_t ret_val = IRQ_NONE;
 2456 	u32 tmp;
 2457 	int setup_supported;
 2458 	u32 count;
 2459 	int set = 0;
 2460 	struct udc_ep	*ep;
 2461 	struct udc_ep	*ep_tmp;
 2462 
 2463 	ep = &dev->ep[UDC_EP0OUT_IX];
 2464 
 2465 	/* clear irq */
 2466 	writel(AMD_BIT(UDC_EPINT_OUT_EP0), &dev->regs->ep_irqsts);
 2467 
 2468 	tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
 2469 	/* check BNA and clear if set */
 2470 	if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
 2471 		VDBG(dev, "ep0: BNA set\n");
 2472 		writel(AMD_BIT(UDC_EPSTS_BNA),
 2473 			&dev->ep[UDC_EP0OUT_IX].regs->sts);
 2474 		ep->bna_occurred = 1;
 2475 		ret_val = IRQ_HANDLED;
 2476 		goto finished;
 2477 	}
 2478 
 2479 	/* type of data: SETUP or DATA 0 bytes */
 2480 	tmp = AMD_GETBITS(tmp, UDC_EPSTS_OUT);
 2481 	VDBG(dev, "data_typ = %x\n", tmp);
 2482 
 2483 	/* setup data */
 2484 	if (tmp == UDC_EPSTS_OUT_SETUP) {
 2485 		ret_val = IRQ_HANDLED;
 2486 
 2487 		ep->dev->stall_ep0in = 0;
 2488 		dev->waiting_zlp_ack_ep0in = 0;
 2489 
 2490 		/* set NAK for EP0_IN */
 2491 		tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
 2492 		tmp |= AMD_BIT(UDC_EPCTL_SNAK);
 2493 		writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
 2494 		dev->ep[UDC_EP0IN_IX].naking = 1;
 2495 		/* get setup data */
 2496 		if (use_dma) {
 2497 
 2498 			/* clear OUT bits in ep status */
 2499 			writel(UDC_EPSTS_OUT_CLEAR,
 2500 				&dev->ep[UDC_EP0OUT_IX].regs->sts);
 2501 
 2502 			setup_data.data[0] =
 2503 				dev->ep[UDC_EP0OUT_IX].td_stp->data12;
 2504 			setup_data.data[1] =
 2505 				dev->ep[UDC_EP0OUT_IX].td_stp->data34;
 2506 			/* set HOST READY */
 2507 			dev->ep[UDC_EP0OUT_IX].td_stp->status =
 2508 					UDC_DMA_STP_STS_BS_HOST_READY;
 2509 		} else {
 2510 			/* read fifo */
 2511 			udc_rxfifo_read_dwords(dev, setup_data.data, 2);
 2512 		}
 2513 
 2514 		/* determine direction of control data */
 2515 		if ((setup_data.request.bRequestType & USB_DIR_IN) != 0) {
 2516 			dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
 2517 			/* enable RDE */
 2518 			udc_ep0_set_rde(dev);
 2519 			set = 0;
 2520 		} else {
 2521 			dev->gadget.ep0 = &dev->ep[UDC_EP0OUT_IX].ep;
 2522 			/*
 2523 			 * implant BNA dummy descriptor to allow RXFIFO opening
 2524 			 * by RDE
 2525 			 */
 2526 			if (ep->bna_dummy_req) {
 2527 				/* write desc pointer */
 2528 				writel(ep->bna_dummy_req->td_phys,
 2529 					&dev->ep[UDC_EP0OUT_IX].regs->desptr);
 2530 				ep->bna_occurred = 0;
 2531 			}
 2532 
 2533 			set = 1;
 2534 			dev->ep[UDC_EP0OUT_IX].naking = 1;
 2535 			/*
 2536 			 * setup timer for enabling RDE (to not enable
 2537 			 * RXFIFO DMA for data to early)
 2538 			 */
 2539 			set_rde = 1;
 2540 			if (!timer_pending(&udc_timer)) {
 2541 				udc_timer.expires = jiffies +
 2542 							HZ/UDC_RDE_TIMER_DIV;
 2543 				if (!stop_timer)
 2544 					add_timer(&udc_timer);
 2545 			}
 2546 		}
 2547 
 2548 		/*
 2549 		 * mass storage reset must be processed here because
 2550 		 * next packet may be a CLEAR_FEATURE HALT which would not
 2551 		 * clear the stall bit when no STALL handshake was received
 2552 		 * before (autostall can cause this)
 2553 		 */
 2554 		if (setup_data.data[0] == UDC_MSCRES_DWORD0
 2555 				&& setup_data.data[1] == UDC_MSCRES_DWORD1) {
 2556 			DBG(dev, "MSC Reset\n");
 2557 			/*
 2558 			 * clear stall bits
 2559 			 * only one IN and OUT endpoints are handled
 2560 			 */
 2561 			ep_tmp = &udc->ep[UDC_EPIN_IX];
 2562 			udc_set_halt(&ep_tmp->ep, 0);
 2563 			ep_tmp = &udc->ep[UDC_EPOUT_IX];
 2564 			udc_set_halt(&ep_tmp->ep, 0);
 2565 		}
 2566 
 2567 		/* call gadget with setup data received */
 2568 		spin_unlock(&dev->lock);
 2569 		setup_supported = dev->driver->setup(&dev->gadget,
 2570 						&setup_data.request);
 2571 		spin_lock(&dev->lock);
 2572 
 2573 		tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
 2574 		/* ep0 in returns data (not zlp) on IN phase */
 2575 		if (setup_supported >= 0 && setup_supported <
 2576 				UDC_EP0IN_MAXPACKET) {
 2577 			/* clear NAK by writing CNAK in EP0_IN */
 2578 			tmp |= AMD_BIT(UDC_EPCTL_CNAK);
 2579 			writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
 2580 			dev->ep[UDC_EP0IN_IX].naking = 0;
 2581 			UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
 2582 
 2583 		/* if unsupported request then stall */
 2584 		} else if (setup_supported < 0) {
 2585 			tmp |= AMD_BIT(UDC_EPCTL_S);
 2586 			writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
 2587 		} else
 2588 			dev->waiting_zlp_ack_ep0in = 1;
 2589 
 2590 
 2591 		/* clear NAK by writing CNAK in EP0_OUT */
 2592 		if (!set) {
 2593 			tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
 2594 			tmp |= AMD_BIT(UDC_EPCTL_CNAK);
 2595 			writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
 2596 			dev->ep[UDC_EP0OUT_IX].naking = 0;
 2597 			UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
 2598 		}
 2599 
 2600 		if (!use_dma) {
 2601 			/* clear OUT bits in ep status */
 2602 			writel(UDC_EPSTS_OUT_CLEAR,
 2603 				&dev->ep[UDC_EP0OUT_IX].regs->sts);
 2604 		}
 2605 
 2606 	/* data packet 0 bytes */
 2607 	} else if (tmp == UDC_EPSTS_OUT_DATA) {
 2608 		/* clear OUT bits in ep status */
 2609 		writel(UDC_EPSTS_OUT_CLEAR, &dev->ep[UDC_EP0OUT_IX].regs->sts);
 2610 
 2611 		/* get setup data: only 0 packet */
 2612 		if (use_dma) {
 2613 			/* no req if 0 packet, just reactivate */
 2614 			if (list_empty(&dev->ep[UDC_EP0OUT_IX].queue)) {
 2615 				VDBG(dev, "ZLP\n");
 2616 
 2617 				/* set HOST READY */
 2618 				dev->ep[UDC_EP0OUT_IX].td->status =
 2619 					AMD_ADDBITS(
 2620 					dev->ep[UDC_EP0OUT_IX].td->status,
 2621 					UDC_DMA_OUT_STS_BS_HOST_READY,
 2622 					UDC_DMA_OUT_STS_BS);
 2623 				/* enable RDE */
 2624 				udc_ep0_set_rde(dev);
 2625 				ret_val = IRQ_HANDLED;
 2626 
 2627 			} else {
 2628 				/* control write */
 2629 				ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
 2630 				/* re-program desc. pointer for possible ZLPs */
 2631 				writel(dev->ep[UDC_EP0OUT_IX].td_phys,
 2632 					&dev->ep[UDC_EP0OUT_IX].regs->desptr);
 2633 				/* enable RDE */
 2634 				udc_ep0_set_rde(dev);
 2635 			}
 2636 		} else {
 2637 
 2638 			/* received number bytes */
 2639 			count = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
 2640 			count = AMD_GETBITS(count, UDC_EPSTS_RX_PKT_SIZE);
 2641 			/* out data for fifo mode not working */
 2642 			count = 0;
 2643 
 2644 			/* 0 packet or real data ? */
 2645 			if (count != 0) {
 2646 				ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
 2647 			} else {
 2648 				/* dummy read confirm */
 2649 				readl(&dev->ep[UDC_EP0OUT_IX].regs->confirm);
 2650 				ret_val = IRQ_HANDLED;
 2651 			}
 2652 		}
 2653 	}
 2654 
 2655 	/* check pending CNAKS */
 2656 	if (cnak_pending) {
 2657 		/* CNAk processing when rxfifo empty only */
 2658 		if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
 2659 			udc_process_cnak_queue(dev);
 2660 	}
 2661 
 2662 finished:
 2663 	return ret_val;
 2664 }
 2665 
 2666 /* Interrupt handler for Control IN traffic */
 2667 static irqreturn_t udc_control_in_isr(struct udc *dev)
 2668 {
 2669 	irqreturn_t ret_val = IRQ_NONE;
 2670 	u32 tmp;
 2671 	struct udc_ep *ep;
 2672 	struct udc_request *req;
 2673 	unsigned len;
 2674 
 2675 	ep = &dev->ep[UDC_EP0IN_IX];
 2676 
 2677 	/* clear irq */
 2678 	writel(AMD_BIT(UDC_EPINT_IN_EP0), &dev->regs->ep_irqsts);
 2679 
 2680 	tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->sts);
 2681 	/* DMA completion */
 2682 	if (tmp & AMD_BIT(UDC_EPSTS_TDC)) {
 2683 		VDBG(dev, "isr: TDC clear\n");
 2684 		ret_val = IRQ_HANDLED;
 2685 
 2686 		/* clear TDC bit */
 2687 		writel(AMD_BIT(UDC_EPSTS_TDC),
 2688 				&dev->ep[UDC_EP0IN_IX].regs->sts);
 2689 
 2690 	/* status reg has IN bit set ? */
 2691 	} else if (tmp & AMD_BIT(UDC_EPSTS_IN)) {
 2692 		ret_val = IRQ_HANDLED;
 2693 
 2694 		if (ep->dma) {
 2695 			/* clear IN bit */
 2696 			writel(AMD_BIT(UDC_EPSTS_IN),
 2697 				&dev->ep[UDC_EP0IN_IX].regs->sts);
 2698 		}
 2699 		if (dev->stall_ep0in) {
 2700 			DBG(dev, "stall ep0in\n");
 2701 			/* halt ep0in */
 2702 			tmp = readl(&ep->regs->ctl);
 2703 			tmp |= AMD_BIT(UDC_EPCTL_S);
 2704 			writel(tmp, &ep->regs->ctl);
 2705 		} else {
 2706 			if (!list_empty(&ep->queue)) {
 2707 				/* next request */
 2708 				req = list_entry(ep->queue.next,
 2709 						struct udc_request, queue);
 2710 
 2711 				if (ep->dma) {
 2712 					/* write desc pointer */
 2713 					writel(req->td_phys, &ep->regs->desptr);
 2714 					/* set HOST READY */
 2715 					req->td_data->status =
 2716 						AMD_ADDBITS(
 2717 						req->td_data->status,
 2718 						UDC_DMA_STP_STS_BS_HOST_READY,
 2719 						UDC_DMA_STP_STS_BS);
 2720 
 2721 					/* set poll demand bit */
 2722 					tmp =
 2723 					readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
 2724 					tmp |= AMD_BIT(UDC_EPCTL_P);
 2725 					writel(tmp,
 2726 					&dev->ep[UDC_EP0IN_IX].regs->ctl);
 2727 
 2728 					/* all bytes will be transferred */
 2729 					req->req.actual = req->req.length;
 2730 
 2731 					/* complete req */
 2732 					complete_req(ep, req, 0);
 2733 
 2734 				} else {
 2735 					/* write fifo */
 2736 					udc_txfifo_write(ep, &req->req);
 2737 
 2738 					/* lengh bytes transferred */
 2739 					len = req->req.length - req->req.actual;
 2740 					if (len > ep->ep.maxpacket)
 2741 						len = ep->ep.maxpacket;
 2742 
 2743 					req->req.actual += len;
 2744 					if (req->req.actual == req->req.length
 2745 						|| (len != ep->ep.maxpacket)) {
 2746 						/* complete req */
 2747 						complete_req(ep, req, 0);
 2748 					}
 2749 				}
 2750 
 2751 			}
 2752 		}
 2753 		ep->halted = 0;
 2754 		dev->stall_ep0in = 0;
 2755 		if (!ep->dma) {
 2756 			/* clear IN bit */
 2757 			writel(AMD_BIT(UDC_EPSTS_IN),
 2758 				&dev->ep[UDC_EP0IN_IX].regs->sts);
 2759 		}
 2760 	}
 2761 
 2762 	return ret_val;
 2763 }
 2764 
 2765 
 2766 /* Interrupt handler for global device events */
 2767 static irqreturn_t udc_dev_isr(struct udc *dev, u32 dev_irq)
 2768 __releases(dev->lock)
 2769 __acquires(dev->lock)
 2770 {
 2771 	irqreturn_t ret_val = IRQ_NONE;
 2772 	u32 tmp;
 2773 	u32 cfg;
 2774 	struct udc_ep *ep;
 2775 	u16 i;
 2776 	u8 udc_csr_epix;
 2777 
 2778 	/* SET_CONFIG irq ? */
 2779 	if (dev_irq & AMD_BIT(UDC_DEVINT_SC)) {
 2780 		ret_val = IRQ_HANDLED;
 2781 
 2782 		/* read config value */
 2783 		tmp = readl(&dev->regs->sts);
 2784 		cfg = AMD_GETBITS(tmp, UDC_DEVSTS_CFG);
 2785 		DBG(dev, "SET_CONFIG interrupt: config=%d\n", cfg);
 2786 		dev->cur_config = cfg;
 2787 		dev->set_cfg_not_acked = 1;
 2788 
 2789 		/* make usb request for gadget driver */
 2790 		memset(&setup_data, 0 , sizeof(union udc_setup_data));
 2791 		setup_data.request.bRequest = USB_REQ_SET_CONFIGURATION;
 2792 		setup_data.request.wValue = cpu_to_le16(dev->cur_config);
 2793 
 2794 		/* programm the NE registers */
 2795 		for (i = 0; i < UDC_EP_NUM; i++) {
 2796 			ep = &dev->ep[i];
 2797 			if (ep->in) {
 2798 
 2799 				/* ep ix in UDC CSR register space */
 2800 				udc_csr_epix = ep->num;
 2801 
 2802 
 2803 			/* OUT ep */
 2804 			} else {
 2805 				/* ep ix in UDC CSR register space */
 2806 				udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
 2807 			}
 2808 
 2809 			tmp = readl(&dev->csr->ne[udc_csr_epix]);
 2810 			/* ep cfg */
 2811 			tmp = AMD_ADDBITS(tmp, ep->dev->cur_config,
 2812 						UDC_CSR_NE_CFG);
 2813 			/* write reg */
 2814 			writel(tmp, &dev->csr->ne[udc_csr_epix]);
 2815 
 2816 			/* clear stall bits */
 2817 			ep->halted = 0;
 2818 			tmp = readl(&ep->regs->ctl);
 2819 			tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
 2820 			writel(tmp, &ep->regs->ctl);
 2821 		}
 2822 		/* call gadget zero with setup data received */
 2823 		spin_unlock(&dev->lock);
 2824 		tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
 2825 		spin_lock(&dev->lock);
 2826 
 2827 	} /* SET_INTERFACE ? */
 2828 	if (dev_irq & AMD_BIT(UDC_DEVINT_SI)) {
 2829 		ret_val = IRQ_HANDLED;
 2830 
 2831 		dev->set_cfg_not_acked = 1;
 2832 		/* read interface and alt setting values */
 2833 		tmp = readl(&dev->regs->sts);
 2834 		dev->cur_alt = AMD_GETBITS(tmp, UDC_DEVSTS_ALT);
 2835 		dev->cur_intf = AMD_GETBITS(tmp, UDC_DEVSTS_INTF);
 2836 
 2837 		/* make usb request for gadget driver */
 2838 		memset(&setup_data, 0 , sizeof(union udc_setup_data));
 2839 		setup_data.request.bRequest = USB_REQ_SET_INTERFACE;
 2840 		setup_data.request.bRequestType = USB_RECIP_INTERFACE;
 2841 		setup_data.request.wValue = cpu_to_le16(dev->cur_alt);
 2842 		setup_data.request.wIndex = cpu_to_le16(dev->cur_intf);
 2843 
 2844 		DBG(dev, "SET_INTERFACE interrupt: alt=%d intf=%d\n",
 2845 				dev->cur_alt, dev->cur_intf);
 2846 
 2847 		/* programm the NE registers */
 2848 		for (i = 0; i < UDC_EP_NUM; i++) {
 2849 			ep = &dev->ep[i];
 2850 			if (ep->in) {
 2851 
 2852 				/* ep ix in UDC CSR register space */
 2853 				udc_csr_epix = ep->num;
 2854 
 2855 
 2856 			/* OUT ep */
 2857 			} else {
 2858 				/* ep ix in UDC CSR register space */
 2859 				udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
 2860 			}
 2861 
 2862 			/* UDC CSR reg */
 2863 			/* set ep values */
 2864 			tmp = readl(&dev->csr->ne[udc_csr_epix]);
 2865 			/* ep interface */
 2866 			tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf,
 2867 						UDC_CSR_NE_INTF);
 2868 			/* tmp = AMD_ADDBITS(tmp, 2, UDC_CSR_NE_INTF); */
 2869 			/* ep alt */
 2870 			tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt,
 2871 						UDC_CSR_NE_ALT);
 2872 			/* write reg */
 2873 			writel(tmp, &dev->csr->ne[udc_csr_epix]);
 2874 
 2875 			/* clear stall bits */
 2876 			ep->halted = 0;
 2877 			tmp = readl(&ep->regs->ctl);
 2878 			tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
 2879 			writel(tmp, &ep->regs->ctl);
 2880 		}
 2881 
 2882 		/* call gadget zero with setup data received */
 2883 		spin_unlock(&dev->lock);
 2884 		tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
 2885 		spin_lock(&dev->lock);
 2886 
 2887 	} /* USB reset */
 2888 	if (dev_irq & AMD_BIT(UDC_DEVINT_UR)) {
 2889 		DBG(dev, "USB Reset interrupt\n");
 2890 		ret_val = IRQ_HANDLED;
 2891 
 2892 		/* allow soft reset when suspend occurs */
 2893 		soft_reset_occured = 0;
 2894 
 2895 		dev->waiting_zlp_ack_ep0in = 0;
 2896 		dev->set_cfg_not_acked = 0;
 2897 
 2898 		/* mask not needed interrupts */
 2899 		udc_mask_unused_interrupts(dev);
 2900 
 2901 		/* call gadget to resume and reset configs etc. */
 2902 		spin_unlock(&dev->lock);
 2903 		if (dev->sys_suspended && dev->driver->resume) {
 2904 			dev->driver->resume(&dev->gadget);
 2905 			dev->sys_suspended = 0;
 2906 		}
 2907 		usb_gadget_udc_reset(&dev->gadget, dev->driver);
 2908 		spin_lock(&dev->lock);
 2909 
 2910 		/* disable ep0 to empty req queue */
 2911 		empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
 2912 		ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
 2913 
 2914 		/* soft reset when rxfifo not empty */
 2915 		tmp = readl(&dev->regs->sts);
 2916 		if (!(tmp & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
 2917 				&& !soft_reset_after_usbreset_occured) {
 2918 			udc_soft_reset(dev);
 2919 			soft_reset_after_usbreset_occured++;
 2920 		}
 2921 
 2922 		/*
 2923 		 * DMA reset to kill potential old DMA hw hang,
 2924 		 * POLL bit is already reset by ep_init() through
 2925 		 * disconnect()
 2926 		 */
 2927 		DBG(dev, "DMA machine reset\n");
 2928 		tmp = readl(&dev->regs->cfg);
 2929 		writel(tmp | AMD_BIT(UDC_DEVCFG_DMARST), &dev->regs->cfg);
 2930 		writel(tmp, &dev->regs->cfg);
 2931 
 2932 		/* put into initial config */
 2933 		udc_basic_init(dev);
 2934 
 2935 		/* enable device setup interrupts */
 2936 		udc_enable_dev_setup_interrupts(dev);
 2937 
 2938 		/* enable suspend interrupt */
 2939 		tmp = readl(&dev->regs->irqmsk);
 2940 		tmp &= AMD_UNMASK_BIT(UDC_DEVINT_US);
 2941 		writel(tmp, &dev->regs->irqmsk);
 2942 
 2943 	} /* USB suspend */
 2944 	if (dev_irq & AMD_BIT(UDC_DEVINT_US)) {
 2945 		DBG(dev, "USB Suspend interrupt\n");
 2946 		ret_val = IRQ_HANDLED;
 2947 		if (dev->driver->suspend) {
 2948 			spin_unlock(&dev->lock);
 2949 			dev->sys_suspended = 1;
 2950 			dev->driver->suspend(&dev->gadget);
 2951 			spin_lock(&dev->lock);
 2952 		}
 2953 	} /* new speed ? */
 2954 	if (dev_irq & AMD_BIT(UDC_DEVINT_ENUM)) {
 2955 		DBG(dev, "ENUM interrupt\n");
 2956 		ret_val = IRQ_HANDLED;
 2957 		soft_reset_after_usbreset_occured = 0;
 2958 
 2959 		/* disable ep0 to empty req queue */
 2960 		empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
 2961 		ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
 2962 
 2963 		/* link up all endpoints */
 2964 		udc_setup_endpoints(dev);
 2965 		dev_info(&dev->pdev->dev, "Connect: %s\n",
 2966 			 usb_speed_string(dev->gadget.speed));
 2967 
 2968 		/* init ep 0 */
 2969 		activate_control_endpoints(dev);
 2970 
 2971 		/* enable ep0 interrupts */
 2972 		udc_enable_ep0_interrupts(dev);
 2973 	}
 2974 	/* session valid change interrupt */
 2975 	if (dev_irq & AMD_BIT(UDC_DEVINT_SVC)) {
 2976 		DBG(dev, "USB SVC interrupt\n");
 2977 		ret_val = IRQ_HANDLED;
 2978 
 2979 		/* check that session is not valid to detect disconnect */
 2980 		tmp = readl(&dev->regs->sts);
 2981 		if (!(tmp & AMD_BIT(UDC_DEVSTS_SESSVLD))) {
 2982 			/* disable suspend interrupt */
 2983 			tmp = readl(&dev->regs->irqmsk);
 2984 			tmp |= AMD_BIT(UDC_DEVINT_US);
 2985 			writel(tmp, &dev->regs->irqmsk);
 2986 			DBG(dev, "USB Disconnect (session valid low)\n");
 2987 			/* cleanup on disconnect */
 2988 			usb_disconnect(udc);
 2989 		}
 2990 
 2991 	}
 2992 
 2993 	return ret_val;
 2994 }
 2995 
 2996 /* Interrupt Service Routine, see Linux Kernel Doc for parameters */
 2997 static irqreturn_t udc_irq(int irq, void *pdev)
 2998 {
 2999 	struct udc *dev = pdev;
 3000 	u32 reg;
 3001 	u16 i;
 3002 	u32 ep_irq;
 3003 	irqreturn_t ret_val = IRQ_NONE;
 3004 
 3005 	spin_lock(&dev->lock);
 3006 
 3007 	/* check for ep irq */
 3008 	reg = readl(&dev->regs->ep_irqsts);
 3009 	if (reg) {
 3010 		if (reg & AMD_BIT(UDC_EPINT_OUT_EP0))
 3011 			ret_val |= udc_control_out_isr(dev);
 3012 		if (reg & AMD_BIT(UDC_EPINT_IN_EP0))
 3013 			ret_val |= udc_control_in_isr(dev);
 3014 
 3015 		/*
 3016 		 * data endpoint
 3017 		 * iterate ep's
 3018 		 */
 3019 		for (i = 1; i < UDC_EP_NUM; i++) {
 3020 			ep_irq = 1 << i;
 3021 			if (!(reg & ep_irq) || i == UDC_EPINT_OUT_EP0)
 3022 				continue;
 3023 
 3024 			/* clear irq status */
 3025 			writel(ep_irq, &dev->regs->ep_irqsts);
 3026 
 3027 			/* irq for out ep ? */
 3028 			if (i > UDC_EPIN_NUM)
 3029 				ret_val |= udc_data_out_isr(dev, i);
 3030 			else
 3031 				ret_val |= udc_data_in_isr(dev, i);
 3032 		}
 3033 
 3034 	}
 3035 
 3036 
 3037 	/* check for dev irq */
 3038 	reg = readl(&dev->regs->irqsts);
 3039 	if (reg) {
 3040 		/* clear irq */
 3041 		writel(reg, &dev->regs->irqsts);
 3042 		ret_val |= udc_dev_isr(dev, reg);
 3043 	}
 3044 
 3045 
 3046 	spin_unlock(&dev->lock);
 3047 	return ret_val;
 3048 }
 3049 
 3050 /* Tears down device */
 3051 static void gadget_release(struct device *pdev)
 3052 {
 3053 	struct amd5536udc *dev = dev_get_drvdata(pdev);
 3054 	kfree(dev);
 3055 }
 3056 
 3057 /* Cleanup on device remove */
 3058 static void udc_remove(struct udc *dev)
 3059 {
 3060 	/* remove timer */
 3061 	stop_timer++;
 3062 	if (timer_pending(&udc_timer))
 3063 		wait_for_completion(&on_exit);
 3064 	if (udc_timer.data)
 3065 		del_timer_sync(&udc_timer);
 3066 	/* remove pollstall timer */
 3067 	stop_pollstall_timer++;
 3068 	if (timer_pending(&udc_pollstall_timer))
 3069 		wait_for_completion(&on_pollstall_exit);
 3070 	if (udc_pollstall_timer.data)
 3071 		del_timer_sync(&udc_pollstall_timer);
 3072 	udc = NULL;
 3073 }
 3074 
 3075 /* Reset all pci context */
 3076 static void udc_pci_remove(struct pci_dev *pdev)
 3077 {
 3078 	struct udc		*dev;
 3079 
 3080 	dev = pci_get_drvdata(pdev);
 3081 
 3082 	usb_del_gadget_udc(&udc->gadget);
 3083 	/* gadget driver must not be registered */
 3084 	BUG_ON(dev->driver != NULL);
 3085 
 3086 	/* dma pool cleanup */
 3087 	if (dev->data_requests)
 3088 		pci_pool_destroy(dev->data_requests);
 3089 
 3090 	if (dev->stp_requests) {
 3091 		/* cleanup DMA desc's for ep0in */
 3092 		pci_pool_free(dev->stp_requests,
 3093 			dev->ep[UDC_EP0OUT_IX].td_stp,
 3094 			dev->ep[UDC_EP0OUT_IX].td_stp_dma);
 3095 		pci_pool_free(dev->stp_requests,
 3096 			dev->ep[UDC_EP0OUT_IX].td,
 3097 			dev->ep[UDC_EP0OUT_IX].td_phys);
 3098 
 3099 		pci_pool_destroy(dev->stp_requests);
 3100 	}
 3101 
 3102 	/* reset controller */
 3103 	writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
 3104 	if (dev->irq_registered)
 3105 		free_irq(pdev->irq, dev);
 3106 	if (dev->regs)
 3107 		iounmap(dev->regs);
 3108 	if (dev->mem_region)
 3109 		release_mem_region(pci_resource_start(pdev, 0),
 3110 				pci_resource_len(pdev, 0));
 3111 	if (dev->active)
 3112 		pci_disable_device(pdev);
 3113 
 3114 	udc_remove(dev);
 3115 }
 3116 
 3117 /* create dma pools on init */
 3118 static int init_dma_pools(struct udc *dev)
 3119 {
 3120 	struct udc_stp_dma	*td_stp;
 3121 	struct udc_data_dma	*td_data;
 3122 	int retval;
 3123 
 3124 	/* consistent DMA mode setting ? */
 3125 	if (use_dma_ppb) {
 3126 		use_dma_bufferfill_mode = 0;
 3127 	} else {
 3128 		use_dma_ppb_du = 0;
 3129 		use_dma_bufferfill_mode = 1;
 3130 	}
 3131 
 3132 	/* DMA setup */
 3133 	dev->data_requests = dma_pool_create("data_requests", NULL,
 3134 		sizeof(struct udc_data_dma), 0, 0);
 3135 	if (!dev->data_requests) {
 3136 		DBG(dev, "can't get request data pool\n");
 3137 		retval = -ENOMEM;
 3138 		goto finished;
 3139 	}
 3140 
 3141 	/* EP0 in dma regs = dev control regs */
 3142 	dev->ep[UDC_EP0IN_IX].dma = &dev->regs->ctl;
 3143 
 3144 	/* dma desc for setup data */
 3145 	dev->stp_requests = dma_pool_create("setup requests", NULL,
 3146 		sizeof(struct udc_stp_dma), 0, 0);
 3147 	if (!dev->stp_requests) {
 3148 		DBG(dev, "can't get stp request pool\n");
 3149 		retval = -ENOMEM;
 3150 		goto finished;
 3151 	}
 3152 	/* setup */
 3153 	td_stp = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
 3154 				&dev->ep[UDC_EP0OUT_IX].td_stp_dma);
 3155 	if (td_stp == NULL) {
 3156 		retval = -ENOMEM;
 3157 		goto finished;
 3158 	}
 3159 	dev->ep[UDC_EP0OUT_IX].td_stp = td_stp;
 3160 
 3161 	/* data: 0 packets !? */
 3162 	td_data = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
 3163 				&dev->ep[UDC_EP0OUT_IX].td_phys);
 3164 	if (td_data == NULL) {
 3165 		retval = -ENOMEM;
 3166 		goto finished;
 3167 	}
 3168 	dev->ep[UDC_EP0OUT_IX].td = td_data;
 3169 	return 0;
 3170 
 3171 finished:
 3172 	return retval;
 3173 }
 3174 
 3175 /* Called by pci bus driver to init pci context */
 3176 static int udc_pci_probe(
 3177 	struct pci_dev *pdev,
 3178 	const struct pci_device_id *id
 3179 )
 3180 {
 3181 	struct udc		*dev;
 3182 	unsigned long		resource;
 3183 	unsigned long		len;
 3184 	int			retval = 0;
 3185 
 3186 	/* one udc only */
 3187 	if (udc) {
 3188 		dev_dbg(&pdev->dev, "already probed\n");
 3189 		return -EBUSY;
 3190 	}
 3191 
 3192 	/* init */
 3193 	dev = kzalloc(sizeof(struct udc), GFP_KERNEL);
 3194 	if (!dev) {
 3195 		retval = -ENOMEM;
 3196 		goto finished;
 3197 	}
 3198 
 3199 	/* pci setup */
 3200 	if (pci_enable_device(pdev) < 0) {
 3201 		kfree(dev);
 3202 		dev = NULL;
 3203 		retval = -ENODEV;
 3204 		goto finished;
 3205 	}
 3206 	dev->active = 1;
 3207 
 3208 	/* PCI resource allocation */
 3209 	resource = pci_resource_start(pdev, 0);
 3210 	len = pci_resource_len(pdev, 0);
 3211 
 3212 	if (!request_mem_region(resource, len, name)) {
 3213 		dev_dbg(&pdev->dev, "pci device used already\n");
 3214 		kfree(dev);
 3215 		dev = NULL;
 3216 		retval = -EBUSY;
 3217 		goto finished;
 3218 	}
 3219 	dev->mem_region = 1;
 3220 
 3221 	dev->virt_addr = ioremap_nocache(resource, len);
 3222 	if (dev->virt_addr == NULL) {
 3223 		dev_dbg(&pdev->dev, "start address cannot be mapped\n");
 3224 		kfree(dev);
 3225 		dev = NULL;
 3226 		retval = -EFAULT;
 3227 		goto finished;
 3228 	}
 3229 
 3230 	if (!pdev->irq) {
 3231 		dev_err(&pdev->dev, "irq not set\n");
 3232 		kfree(dev);
 3233 		dev = NULL;
 3234 		retval = -ENODEV;
 3235 		goto finished;
 3236 	}
 3237 
 3238 	spin_lock_init(&dev->lock);
 3239 	/* udc csr registers base */
 3240 	dev->csr = dev->virt_addr + UDC_CSR_ADDR;
 3241 	/* dev registers base */
 3242 	dev->regs = dev->virt_addr + UDC_DEVCFG_ADDR;
 3243 	/* ep registers base */
 3244 	dev->ep_regs = dev->virt_addr + UDC_EPREGS_ADDR;
 3245 	/* fifo's base */
 3246 	dev->rxfifo = (u32 __iomem *)(dev->virt_addr + UDC_RXFIFO_ADDR);
 3247 	dev->txfifo = (u32 __iomem *)(dev->virt_addr + UDC_TXFIFO_ADDR);
 3248 
 3249 	if (request_irq(pdev->irq, udc_irq, IRQF_SHARED, name, dev) != 0) {
 3250 		dev_dbg(&pdev->dev, "request_irq(%d) fail\n", pdev->irq);
 3251 		kfree(dev);
 3252 		dev = NULL;
 3253 		retval = -EBUSY;
 3254 		goto finished;
 3255 	}
 3256 	dev->irq_registered = 1;
 3257 
 3258 	pci_set_drvdata(pdev, dev);
 3259 
 3260 	/* chip revision for Hs AMD5536 */
 3261 	dev->chiprev = pdev->revision;
 3262 
 3263 	pci_set_master(pdev);
 3264 	pci_try_set_mwi(pdev);
 3265 
 3266 	/* init dma pools */
 3267 	if (use_dma) {
 3268 		retval = init_dma_pools(dev);
 3269 		if (retval != 0)
 3270 			goto finished;
 3271 	}
 3272 
 3273 	dev->phys_addr = resource;
 3274 	dev->irq = pdev->irq;
 3275 	dev->pdev = pdev;
 3276 
 3277 	/* general probing */
 3278 	if (udc_probe(dev) == 0)
 3279 		return 0;
 3280 
 3281 finished:
 3282 	if (dev)
 3283 		udc_pci_remove(pdev);
 3284 	return retval;
 3285 }
 3286 
 3287 /* general probe */
 3288 static int udc_probe(struct udc *dev)
 3289 {
 3290 	char		tmp[128];
 3291 	u32		reg;
 3292 	int		retval;
 3293 
 3294 	/* mark timer as not initialized */
 3295 	udc_timer.data = 0;
 3296 	udc_pollstall_timer.data = 0;
 3297 
 3298 	/* device struct setup */
 3299 	dev->gadget.ops = &udc_ops;
 3300 
 3301 	dev_set_name(&dev->gadget.dev, "gadget");
 3302 	dev->gadget.name = name;
 3303 	dev->gadget.max_speed = USB_SPEED_HIGH;
 3304 
 3305 	/* init registers, interrupts, ... */
 3306 	startup_registers(dev);
 3307 
 3308 	dev_info(&dev->pdev->dev, "%s\n", mod_desc);
 3309 
 3310 	snprintf(tmp, sizeof tmp, "%d", dev->irq);
 3311 	dev_info(&dev->pdev->dev,
 3312 		"irq %s, pci mem %08lx, chip rev %02x(Geode5536 %s)\n",
 3313 		tmp, dev->phys_addr, dev->chiprev,
 3314 		(dev->chiprev == UDC_HSA0_REV) ? "A0" : "B1");
 3315 	strcpy(tmp, UDC_DRIVER_VERSION_STRING);
 3316 	if (dev->chiprev == UDC_HSA0_REV) {
 3317 		dev_err(&dev->pdev->dev, "chip revision is A0; too old\n");
 3318 		retval = -ENODEV;
 3319 		goto finished;
 3320 	}
 3321 	dev_info(&dev->pdev->dev,
 3322 		"driver version: %s(for Geode5536 B1)\n", tmp);
 3323 	udc = dev;
 3324 
 3325 	retval = usb_add_gadget_udc_release(&udc->pdev->dev, &dev->gadget,
 3326 			gadget_release);
 3327 	if (retval)
 3328 		goto finished;
 3329 
 3330 	/* timer init */
 3331 	init_timer(&udc_timer);
 3332 	udc_timer.function = udc_timer_function;
 3333 	udc_timer.data = 1;
 3334 	/* timer pollstall init */
 3335 	init_timer(&udc_pollstall_timer);
 3336 	udc_pollstall_timer.function = udc_pollstall_timer_function;
 3337 	udc_pollstall_timer.data = 1;
 3338 
 3339 	/* set SD */
 3340 	reg = readl(&dev->regs->ctl);
 3341 	reg |= AMD_BIT(UDC_DEVCTL_SD);
 3342 	writel(reg, &dev->regs->ctl);
 3343 
 3344 	/* print dev register info */
 3345 	print_regs(dev);
 3346 
 3347 	return 0;
 3348 
 3349 finished:
 3350 	return retval;
 3351 }
 3352 
 3353 /* Initiates a remote wakeup */
 3354 static int udc_remote_wakeup(struct udc *dev)
 3355 {
 3356 	unsigned long flags;
 3357 	u32 tmp;
 3358 
 3359 	DBG(dev, "UDC initiates remote wakeup\n");
 3360 
 3361 	spin_lock_irqsave(&dev->lock, flags);
 3362 
 3363 	tmp = readl(&dev->regs->ctl);
 3364 	tmp |= AMD_BIT(UDC_DEVCTL_RES);
 3365 	writel(tmp, &dev->regs->ctl);
 3366 	tmp &= AMD_CLEAR_BIT(UDC_DEVCTL_RES);
 3367 	writel(tmp, &dev->regs->ctl);
 3368 
 3369 	spin_unlock_irqrestore(&dev->lock, flags);
 3370 	return 0;
 3371 }
 3372 
 3373 /* PCI device parameters */
 3374 static const struct pci_device_id pci_id[] = {
 3375 	{
 3376 		PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x2096),
 3377 		.class =	(PCI_CLASS_SERIAL_USB << 8) | 0xfe,
 3378 		.class_mask =	0xffffffff,
 3379 	},
 3380 	{},
 3381 };
 3382 MODULE_DEVICE_TABLE(pci, pci_id);
 3383 
 3384 /* PCI functions */
 3385 static struct pci_driver udc_pci_driver = {
 3386 	.name =		(char *) name,
 3387 	.id_table =	pci_id,
 3388 	.probe =	udc_pci_probe,
 3389 	.remove =	udc_pci_remove,
 3390 };
 3391 
 3392 module_pci_driver(udc_pci_driver);
 3393 
 3394 MODULE_DESCRIPTION(UDC_MOD_DESCRIPTION);
 3395 MODULE_AUTHOR("Thomas Dahlmann");
 3396 MODULE_LICENSE("GPL");
 3397 
 3398 
 3399 
 3400 
 3401 
 3402 
 3403 /* LDV_COMMENT_BEGIN_MAIN */
 3404 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful
 3405 
 3406 /*###########################################################################*/
 3407 
 3408 /*############## Driver Environment Generator 0.2 output ####################*/
 3409 
 3410 /*###########################################################################*/
 3411 
 3412 
 3413 
 3414 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
 3415 void ldv_check_final_state(void);
 3416 
 3417 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
 3418 void ldv_check_return_value(int res);
 3419 
 3420 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
 3421 void ldv_check_return_value_probe(int res);
 3422 
 3423 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
 3424 void ldv_initialize(void);
 3425 
 3426 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
 3427 void ldv_handler_precall(void);
 3428 
 3429 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
 3430 int nondet_int(void);
 3431 
 3432 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
 3433 int LDV_IN_INTERRUPT;
 3434 
 3435 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
 3436 void ldv_main0_sequence_infinite_withcheck_stateful(void) {
 3437 
 3438 
 3439 
 3440 	/* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
 3441 	/*============================= VARIABLE DECLARATION PART   =============================*/
 3442 	/** STRUCT: struct type: usb_ep_ops, struct name: udc_ep_ops **/
 3443 	/* content: static int udc_ep_enable(struct usb_ep *usbep, const struct usb_endpoint_descriptor *desc)*/
 3444 	/* LDV_COMMENT_BEGIN_PREP */
 3445 	#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3446 	#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3447 	/* LDV_COMMENT_END_PREP */
 3448 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "udc_ep_enable" */
 3449 	struct usb_ep * var_group1;
 3450 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "udc_ep_enable" */
 3451 	const struct usb_endpoint_descriptor * var_udc_ep_enable_6_p1;
 3452 	/* LDV_COMMENT_BEGIN_PREP */
 3453 	#ifdef UDC_VERBOSE
 3454 	#endif
 3455 	/* LDV_COMMENT_END_PREP */
 3456 	/* content: static int udc_ep_disable(struct usb_ep *usbep)*/
 3457 	/* LDV_COMMENT_BEGIN_PREP */
 3458 	#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3459 	#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3460 	/* LDV_COMMENT_END_PREP */
 3461 	/* LDV_COMMENT_BEGIN_PREP */
 3462 	#ifdef UDC_VERBOSE
 3463 	#endif
 3464 	/* LDV_COMMENT_END_PREP */
 3465 	/* content: static struct usb_request * udc_alloc_request(struct usb_ep *usbep, gfp_t gfp)*/
 3466 	/* LDV_COMMENT_BEGIN_PREP */
 3467 	#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3468 	#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3469 	/* LDV_COMMENT_END_PREP */
 3470 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "udc_alloc_request" */
 3471 	gfp_t  var_udc_alloc_request_9_p1;
 3472 	/* LDV_COMMENT_BEGIN_PREP */
 3473 	#ifdef UDC_VERBOSE
 3474 	#endif
 3475 	/* LDV_COMMENT_END_PREP */
 3476 	/* content: static void udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq)*/
 3477 	/* LDV_COMMENT_BEGIN_PREP */
 3478 	#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3479 	#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3480 	/* LDV_COMMENT_END_PREP */
 3481 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "udc_free_request" */
 3482 	struct usb_request * var_group2;
 3483 	/* LDV_COMMENT_BEGIN_PREP */
 3484 	#ifdef UDC_VERBOSE
 3485 	#endif
 3486 	/* LDV_COMMENT_END_PREP */
 3487 	/* content: static int udc_queue(struct usb_ep *usbep, struct usb_request *usbreq, gfp_t gfp)*/
 3488 	/* LDV_COMMENT_BEGIN_PREP */
 3489 	#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3490 	#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3491 	#ifdef UDC_VERBOSE
 3492 	#endif
 3493 	/* LDV_COMMENT_END_PREP */
 3494 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "udc_queue" */
 3495 	gfp_t  var_udc_queue_23_p2;
 3496 	/* content: static int udc_dequeue(struct usb_ep *usbep, struct usb_request *usbreq)*/
 3497 	/* LDV_COMMENT_BEGIN_PREP */
 3498 	#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3499 	#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3500 	#ifdef UDC_VERBOSE
 3501 	#endif
 3502 	/* LDV_COMMENT_END_PREP */
 3503 	/* content: static int udc_set_halt(struct usb_ep *usbep, int halt)*/
 3504 	/* LDV_COMMENT_BEGIN_PREP */
 3505 	#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3506 	#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3507 	#ifdef UDC_VERBOSE
 3508 	#endif
 3509 	/* LDV_COMMENT_END_PREP */
 3510 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "udc_set_halt" */
 3511 	int  var_udc_set_halt_26_p1;
 3512 
 3513 	/** STRUCT: struct type: usb_gadget_ops, struct name: udc_ops **/
 3514 	/* content: static int udc_wakeup(struct usb_gadget *gadget)*/
 3515 	/* LDV_COMMENT_BEGIN_PREP */
 3516 	#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3517 	#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3518 	#ifdef UDC_VERBOSE
 3519 	#endif
 3520 	/* LDV_COMMENT_END_PREP */
 3521 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "udc_wakeup" */
 3522 	struct usb_gadget * var_group3;
 3523 	/* content: static int udc_get_frame(struct usb_gadget *gadget)*/
 3524 	/* LDV_COMMENT_BEGIN_PREP */
 3525 	#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3526 	#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3527 	#ifdef UDC_VERBOSE
 3528 	#endif
 3529 	/* LDV_COMMENT_END_PREP */
 3530 	/* content: static int amd5536_udc_start(struct usb_gadget *g, struct usb_gadget_driver *driver)*/
 3531 	/* LDV_COMMENT_BEGIN_PREP */
 3532 	#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3533 	#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3534 	#ifdef UDC_VERBOSE
 3535 	#endif
 3536 	/* LDV_COMMENT_END_PREP */
 3537 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "amd5536_udc_start" */
 3538 	struct usb_gadget_driver * var_group4;
 3539 	/* content: static int amd5536_udc_stop(struct usb_gadget *g)*/
 3540 	/* LDV_COMMENT_BEGIN_PREP */
 3541 	#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3542 	#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3543 	#ifdef UDC_VERBOSE
 3544 	#endif
 3545 	/* LDV_COMMENT_END_PREP */
 3546 
 3547 	/** STRUCT: struct type: pci_driver, struct name: udc_pci_driver **/
 3548 	/* content: static int udc_pci_probe( struct pci_dev *pdev, const struct pci_device_id *id )*/
 3549 	/* LDV_COMMENT_BEGIN_PREP */
 3550 	#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3551 	#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3552 	#ifdef UDC_VERBOSE
 3553 	#endif
 3554 	/* LDV_COMMENT_END_PREP */
 3555 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "udc_pci_probe" */
 3556 	struct pci_dev * var_group5;
 3557 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "udc_pci_probe" */
 3558 	const struct pci_device_id * var_udc_pci_probe_54_p1;
 3559 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "udc_pci_probe" */
 3560 	static int res_udc_pci_probe_54;
 3561 	/* content: static void udc_pci_remove(struct pci_dev *pdev)*/
 3562 	/* LDV_COMMENT_BEGIN_PREP */
 3563 	#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3564 	#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3565 	#ifdef UDC_VERBOSE
 3566 	#endif
 3567 	/* LDV_COMMENT_END_PREP */
 3568 
 3569 	/** CALLBACK SECTION request_irq **/
 3570 	/* content: static irqreturn_t udc_irq(int irq, void *pdev)*/
 3571 	/* LDV_COMMENT_BEGIN_PREP */
 3572 	#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3573 	#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3574 	#ifdef UDC_VERBOSE
 3575 	#endif
 3576 	/* LDV_COMMENT_END_PREP */
 3577 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "udc_irq" */
 3578 	int  var_udc_irq_49_p0;
 3579 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "udc_irq" */
 3580 	void * var_udc_irq_49_p1;
 3581 
 3582 	/** TIMER SECTION timer **/
 3583 	/* content: static void udc_timer_function(unsigned long v)*/
 3584 	/* LDV_COMMENT_BEGIN_PREP */
 3585 	#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3586 	#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3587 	#ifdef UDC_VERBOSE
 3588 	#endif
 3589 	/* LDV_COMMENT_END_PREP */
 3590 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "udc_timer_function" */
 3591 	unsigned long  var_udc_timer_function_37_p0;
 3592 	/* content: static void udc_pollstall_timer_function(unsigned long v)*/
 3593 	/* LDV_COMMENT_BEGIN_PREP */
 3594 	#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3595 	#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3596 	#ifdef UDC_VERBOSE
 3597 	#endif
 3598 	/* LDV_COMMENT_END_PREP */
 3599 	/* LDV_COMMENT_VAR_DECLARE Variable declaration for function "udc_pollstall_timer_function" */
 3600 	unsigned long  var_udc_pollstall_timer_function_39_p0;
 3601 
 3602 
 3603 
 3604 
 3605 	/* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
 3606 	/* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
 3607 	/*============================= VARIABLE INITIALIZING PART  =============================*/
 3608 	LDV_IN_INTERRUPT=1;
 3609 
 3610 
 3611 
 3612 
 3613 	/* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
 3614 	/* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
 3615 	/*============================= FUNCTION CALL SECTION       =============================*/
 3616 	/* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
 3617 	ldv_initialize();
 3618 	
 3619 
 3620 	
 3621 
 3622 	int ldv_s_udc_pci_driver_pci_driver = 0;
 3623 
 3624 	
 3625 
 3626 	
 3627 
 3628 
 3629 	while(  nondet_int()
 3630 		|| !(ldv_s_udc_pci_driver_pci_driver == 0)
 3631 	) {
 3632 
 3633 		switch(nondet_int()) {
 3634 
 3635 			case 0: {
 3636 
 3637 				/** STRUCT: struct type: usb_ep_ops, struct name: udc_ep_ops **/
 3638 				
 3639 
 3640 				/* content: static int udc_ep_enable(struct usb_ep *usbep, const struct usb_endpoint_descriptor *desc)*/
 3641 				/* LDV_COMMENT_BEGIN_PREP */
 3642 				#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3643 				#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3644 				/* LDV_COMMENT_END_PREP */
 3645 				/* LDV_COMMENT_FUNCTION_CALL Function from field "enable" from driver structure with callbacks "udc_ep_ops" */
 3646 				ldv_handler_precall();
 3647 				udc_ep_enable( var_group1, var_udc_ep_enable_6_p1);
 3648 				/* LDV_COMMENT_BEGIN_PREP */
 3649 				#ifdef UDC_VERBOSE
 3650 				#endif
 3651 				/* LDV_COMMENT_END_PREP */
 3652 				
 3653 
 3654 				
 3655 
 3656 			}
 3657 
 3658 			break;
 3659 			case 1: {
 3660 
 3661 				/** STRUCT: struct type: usb_ep_ops, struct name: udc_ep_ops **/
 3662 				
 3663 
 3664 				/* content: static int udc_ep_disable(struct usb_ep *usbep)*/
 3665 				/* LDV_COMMENT_BEGIN_PREP */
 3666 				#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3667 				#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3668 				/* LDV_COMMENT_END_PREP */
 3669 				/* LDV_COMMENT_FUNCTION_CALL Function from field "disable" from driver structure with callbacks "udc_ep_ops" */
 3670 				ldv_handler_precall();
 3671 				udc_ep_disable( var_group1);
 3672 				/* LDV_COMMENT_BEGIN_PREP */
 3673 				#ifdef UDC_VERBOSE
 3674 				#endif
 3675 				/* LDV_COMMENT_END_PREP */
 3676 				
 3677 
 3678 				
 3679 
 3680 			}
 3681 
 3682 			break;
 3683 			case 2: {
 3684 
 3685 				/** STRUCT: struct type: usb_ep_ops, struct name: udc_ep_ops **/
 3686 				
 3687 
 3688 				/* content: static struct usb_request * udc_alloc_request(struct usb_ep *usbep, gfp_t gfp)*/
 3689 				/* LDV_COMMENT_BEGIN_PREP */
 3690 				#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3691 				#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3692 				/* LDV_COMMENT_END_PREP */
 3693 				/* LDV_COMMENT_FUNCTION_CALL Function from field "alloc_request" from driver structure with callbacks "udc_ep_ops" */
 3694 				ldv_handler_precall();
 3695 				udc_alloc_request( var_group1, var_udc_alloc_request_9_p1);
 3696 				/* LDV_COMMENT_BEGIN_PREP */
 3697 				#ifdef UDC_VERBOSE
 3698 				#endif
 3699 				/* LDV_COMMENT_END_PREP */
 3700 				
 3701 
 3702 				
 3703 
 3704 			}
 3705 
 3706 			break;
 3707 			case 3: {
 3708 
 3709 				/** STRUCT: struct type: usb_ep_ops, struct name: udc_ep_ops **/
 3710 				
 3711 
 3712 				/* content: static void udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq)*/
 3713 				/* LDV_COMMENT_BEGIN_PREP */
 3714 				#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3715 				#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3716 				/* LDV_COMMENT_END_PREP */
 3717 				/* LDV_COMMENT_FUNCTION_CALL Function from field "free_request" from driver structure with callbacks "udc_ep_ops" */
 3718 				ldv_handler_precall();
 3719 				udc_free_request( var_group1, var_group2);
 3720 				/* LDV_COMMENT_BEGIN_PREP */
 3721 				#ifdef UDC_VERBOSE
 3722 				#endif
 3723 				/* LDV_COMMENT_END_PREP */
 3724 				
 3725 
 3726 				
 3727 
 3728 			}
 3729 
 3730 			break;
 3731 			case 4: {
 3732 
 3733 				/** STRUCT: struct type: usb_ep_ops, struct name: udc_ep_ops **/
 3734 				
 3735 
 3736 				/* content: static int udc_queue(struct usb_ep *usbep, struct usb_request *usbreq, gfp_t gfp)*/
 3737 				/* LDV_COMMENT_BEGIN_PREP */
 3738 				#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3739 				#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3740 				#ifdef UDC_VERBOSE
 3741 				#endif
 3742 				/* LDV_COMMENT_END_PREP */
 3743 				/* LDV_COMMENT_FUNCTION_CALL Function from field "queue" from driver structure with callbacks "udc_ep_ops" */
 3744 				ldv_handler_precall();
 3745 				udc_queue( var_group1, var_group2, var_udc_queue_23_p2);
 3746 				
 3747 
 3748 				
 3749 
 3750 			}
 3751 
 3752 			break;
 3753 			case 5: {
 3754 
 3755 				/** STRUCT: struct type: usb_ep_ops, struct name: udc_ep_ops **/
 3756 				
 3757 
 3758 				/* content: static int udc_dequeue(struct usb_ep *usbep, struct usb_request *usbreq)*/
 3759 				/* LDV_COMMENT_BEGIN_PREP */
 3760 				#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3761 				#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3762 				#ifdef UDC_VERBOSE
 3763 				#endif
 3764 				/* LDV_COMMENT_END_PREP */
 3765 				/* LDV_COMMENT_FUNCTION_CALL Function from field "dequeue" from driver structure with callbacks "udc_ep_ops" */
 3766 				ldv_handler_precall();
 3767 				udc_dequeue( var_group1, var_group2);
 3768 				
 3769 
 3770 				
 3771 
 3772 			}
 3773 
 3774 			break;
 3775 			case 6: {
 3776 
 3777 				/** STRUCT: struct type: usb_ep_ops, struct name: udc_ep_ops **/
 3778 				
 3779 
 3780 				/* content: static int udc_set_halt(struct usb_ep *usbep, int halt)*/
 3781 				/* LDV_COMMENT_BEGIN_PREP */
 3782 				#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3783 				#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3784 				#ifdef UDC_VERBOSE
 3785 				#endif
 3786 				/* LDV_COMMENT_END_PREP */
 3787 				/* LDV_COMMENT_FUNCTION_CALL Function from field "set_halt" from driver structure with callbacks "udc_ep_ops" */
 3788 				ldv_handler_precall();
 3789 				udc_set_halt( var_group1, var_udc_set_halt_26_p1);
 3790 				
 3791 
 3792 				
 3793 
 3794 			}
 3795 
 3796 			break;
 3797 			case 7: {
 3798 
 3799 				/** STRUCT: struct type: usb_gadget_ops, struct name: udc_ops **/
 3800 				
 3801 
 3802 				/* content: static int udc_wakeup(struct usb_gadget *gadget)*/
 3803 				/* LDV_COMMENT_BEGIN_PREP */
 3804 				#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3805 				#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3806 				#ifdef UDC_VERBOSE
 3807 				#endif
 3808 				/* LDV_COMMENT_END_PREP */
 3809 				/* LDV_COMMENT_FUNCTION_CALL Function from field "wakeup" from driver structure with callbacks "udc_ops" */
 3810 				ldv_handler_precall();
 3811 				udc_wakeup( var_group3);
 3812 				
 3813 
 3814 				
 3815 
 3816 			}
 3817 
 3818 			break;
 3819 			case 8: {
 3820 
 3821 				/** STRUCT: struct type: usb_gadget_ops, struct name: udc_ops **/
 3822 				
 3823 
 3824 				/* content: static int udc_get_frame(struct usb_gadget *gadget)*/
 3825 				/* LDV_COMMENT_BEGIN_PREP */
 3826 				#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3827 				#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3828 				#ifdef UDC_VERBOSE
 3829 				#endif
 3830 				/* LDV_COMMENT_END_PREP */
 3831 				/* LDV_COMMENT_FUNCTION_CALL Function from field "get_frame" from driver structure with callbacks "udc_ops" */
 3832 				ldv_handler_precall();
 3833 				udc_get_frame( var_group3);
 3834 				
 3835 
 3836 				
 3837 
 3838 			}
 3839 
 3840 			break;
 3841 			case 9: {
 3842 
 3843 				/** STRUCT: struct type: usb_gadget_ops, struct name: udc_ops **/
 3844 				
 3845 
 3846 				/* content: static int amd5536_udc_start(struct usb_gadget *g, struct usb_gadget_driver *driver)*/
 3847 				/* LDV_COMMENT_BEGIN_PREP */
 3848 				#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3849 				#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3850 				#ifdef UDC_VERBOSE
 3851 				#endif
 3852 				/* LDV_COMMENT_END_PREP */
 3853 				/* LDV_COMMENT_FUNCTION_CALL Function from field "udc_start" from driver structure with callbacks "udc_ops" */
 3854 				ldv_handler_precall();
 3855 				amd5536_udc_start( var_group3, var_group4);
 3856 				
 3857 
 3858 				
 3859 
 3860 			}
 3861 
 3862 			break;
 3863 			case 10: {
 3864 
 3865 				/** STRUCT: struct type: usb_gadget_ops, struct name: udc_ops **/
 3866 				
 3867 
 3868 				/* content: static int amd5536_udc_stop(struct usb_gadget *g)*/
 3869 				/* LDV_COMMENT_BEGIN_PREP */
 3870 				#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3871 				#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3872 				#ifdef UDC_VERBOSE
 3873 				#endif
 3874 				/* LDV_COMMENT_END_PREP */
 3875 				/* LDV_COMMENT_FUNCTION_CALL Function from field "udc_stop" from driver structure with callbacks "udc_ops" */
 3876 				ldv_handler_precall();
 3877 				amd5536_udc_stop( var_group3);
 3878 				
 3879 
 3880 				
 3881 
 3882 			}
 3883 
 3884 			break;
 3885 			case 11: {
 3886 
 3887 				/** STRUCT: struct type: pci_driver, struct name: udc_pci_driver **/
 3888 				if(ldv_s_udc_pci_driver_pci_driver==0) {
 3889 
 3890 				/* content: static int udc_pci_probe( struct pci_dev *pdev, const struct pci_device_id *id )*/
 3891 				/* LDV_COMMENT_BEGIN_PREP */
 3892 				#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3893 				#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3894 				#ifdef UDC_VERBOSE
 3895 				#endif
 3896 				/* LDV_COMMENT_END_PREP */
 3897 				/* LDV_COMMENT_FUNCTION_CALL Function from field "probe" from driver structure with callbacks "udc_pci_driver". Standart function test for correct return result. */
 3898 				res_udc_pci_probe_54 = udc_pci_probe( var_group5, var_udc_pci_probe_54_p1);
 3899 				 ldv_check_return_value(res_udc_pci_probe_54);
 3900 				 ldv_check_return_value_probe(res_udc_pci_probe_54);
 3901 				 if(res_udc_pci_probe_54) 
 3902 					goto ldv_module_exit;
 3903 				ldv_s_udc_pci_driver_pci_driver++;
 3904 
 3905 				}
 3906 
 3907 			}
 3908 
 3909 			break;
 3910 			case 12: {
 3911 
 3912 				/** STRUCT: struct type: pci_driver, struct name: udc_pci_driver **/
 3913 				if(ldv_s_udc_pci_driver_pci_driver==1) {
 3914 
 3915 				/* content: static void udc_pci_remove(struct pci_dev *pdev)*/
 3916 				/* LDV_COMMENT_BEGIN_PREP */
 3917 				#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3918 				#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3919 				#ifdef UDC_VERBOSE
 3920 				#endif
 3921 				/* LDV_COMMENT_END_PREP */
 3922 				/* LDV_COMMENT_FUNCTION_CALL Function from field "remove" from driver structure with callbacks "udc_pci_driver" */
 3923 				ldv_handler_precall();
 3924 				udc_pci_remove( var_group5);
 3925 				ldv_s_udc_pci_driver_pci_driver=0;
 3926 
 3927 				}
 3928 
 3929 			}
 3930 
 3931 			break;
 3932 			case 13: {
 3933 
 3934 				/** CALLBACK SECTION request_irq **/
 3935 				LDV_IN_INTERRUPT=2;
 3936 
 3937 				/* content: static irqreturn_t udc_irq(int irq, void *pdev)*/
 3938 				/* LDV_COMMENT_BEGIN_PREP */
 3939 				#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3940 				#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3941 				#ifdef UDC_VERBOSE
 3942 				#endif
 3943 				/* LDV_COMMENT_END_PREP */
 3944 				/* LDV_COMMENT_FUNCTION_CALL */
 3945 				ldv_handler_precall();
 3946 				udc_irq( var_udc_irq_49_p0, var_udc_irq_49_p1);
 3947 				LDV_IN_INTERRUPT=1;
 3948 
 3949 				
 3950 
 3951 			}
 3952 
 3953 			break;
 3954 			case 14: {
 3955 
 3956 				/** TIMER SECTION timer **/
 3957 				
 3958 
 3959 				/* content: static void udc_timer_function(unsigned long v)*/
 3960 				/* LDV_COMMENT_BEGIN_PREP */
 3961 				#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3962 				#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3963 				#ifdef UDC_VERBOSE
 3964 				#endif
 3965 				/* LDV_COMMENT_END_PREP */
 3966 				/* LDV_COMMENT_FUNCTION_CALL */
 3967 				ldv_handler_precall();
 3968 				udc_timer_function( var_udc_timer_function_37_p0);
 3969 				
 3970 
 3971 				
 3972 
 3973 			}
 3974 
 3975 			break;
 3976 			case 15: {
 3977 
 3978 				/** TIMER SECTION timer **/
 3979 				
 3980 
 3981 				/* content: static void udc_pollstall_timer_function(unsigned long v)*/
 3982 				/* LDV_COMMENT_BEGIN_PREP */
 3983 				#define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
 3984 				#define UDC_DRIVER_VERSION_STRING	"01.00.0206"
 3985 				#ifdef UDC_VERBOSE
 3986 				#endif
 3987 				/* LDV_COMMENT_END_PREP */
 3988 				/* LDV_COMMENT_FUNCTION_CALL */
 3989 				ldv_handler_precall();
 3990 				udc_pollstall_timer_function( var_udc_pollstall_timer_function_39_p0);
 3991 				
 3992 
 3993 				
 3994 
 3995 			}
 3996 
 3997 			break;
 3998 			default: break;
 3999 
 4000 		}
 4001 
 4002 	}
 4003 
 4004 	ldv_module_exit: 
 4005 
 4006 	/* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
 4007 	ldv_final: ldv_check_final_state();
 4008 
 4009 	/* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
 4010 	return;
 4011 
 4012 }
 4013 #endif
 4014 
 4015 /* LDV_COMMENT_END_MAIN */
 4016 
 4017 #line 32 "/home/ldvuser/ref_launch/work/current--X--drivers--X--defaultlinux-4.1-rc1.tar.xz--X--152_1a--X--cpachecker/linux-4.1-rc1.tar.xz/csd_deg_dscv/8673/dscv_tempdir/dscv/ri/152_1a/drivers/usb/gadget/udc/amd5536udc.o.c.prepared"           1 #ifndef _LDV_RCV_H_
    2 #define _LDV_RCV_H_
    3 
    4 /* If expr evaluates to zero, ldv_assert() causes a program to reach the error
    5    label like the standard assert(). */
    6 #define ldv_assert(expr) ((expr) ? 0 : ldv_error())
    7 
    8 /* The error label wrapper. It is used because of some static verifiers (like
    9    BLAST) don't accept multiple error labels through a program. */
   10 static inline void ldv_error(void)
   11 {
   12   LDV_ERROR: goto LDV_ERROR;
   13 }
   14 
   15 /* If expr evaluates to zero, ldv_assume() causes an infinite loop that is
   16    avoided by verifiers. */
   17 #define ldv_assume(expr) ((expr) ? 0 : ldv_stop())
   18 
   19 /* Infinite loop, that causes verifiers to skip such paths. */
   20 static inline void ldv_stop(void) {
   21   LDV_STOP: goto LDV_STOP;
   22 }
   23 
   24 /* Special nondeterministic functions. */
   25 int ldv_undef_int(void);
   26 void *ldv_undef_ptr(void);
   27 unsigned long ldv_undef_ulong(void);
   28 long ldv_undef_long(void);
   29 /* Return nondeterministic negative integer number. */
   30 static inline int ldv_undef_int_negative(void)
   31 {
   32   int ret = ldv_undef_int();
   33 
   34   ldv_assume(ret < 0);
   35 
   36   return ret;
   37 }
   38 /* Return nondeterministic nonpositive integer number. */
   39 static inline int ldv_undef_int_nonpositive(void)
   40 {
   41   int ret = ldv_undef_int();
   42 
   43   ldv_assume(ret <= 0);
   44 
   45   return ret;
   46 }
   47 
   48 /* Add explicit model for __builin_expect GCC function. Without the model a
   49    return value will be treated as nondetermined by verifiers. */
   50 static inline long __builtin_expect(long exp, long c)
   51 {
   52   return exp;
   53 }
   54 
   55 /* This function causes the program to exit abnormally. GCC implements this
   56 function by using a target-dependent mechanism (such as intentionally executing
   57 an illegal instruction) or by calling abort. The mechanism used may vary from
   58 release to release so you should not rely on any particular implementation.
   59 http://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html */
   60 static inline void __builtin_trap(void)
   61 {
   62   ldv_assert(0);
   63 }
   64 
   65 /* The constant is for simulating an error of ldv_undef_ptr() function. */
   66 #define LDV_PTR_MAX 2012
   67 
   68 #endif /* _LDV_RCV_H_ */           1 /* interrupt.h */
    2 #ifndef _LINUX_INTERRUPT_H
    3 #define _LINUX_INTERRUPT_H
    4 
    5 #include <linux/kernel.h>
    6 #include <linux/linkage.h>
    7 #include <linux/bitops.h>
    8 #include <linux/preempt.h>
    9 #include <linux/cpumask.h>
   10 #include <linux/irqreturn.h>
   11 #include <linux/irqnr.h>
   12 #include <linux/hardirq.h>
   13 #include <linux/irqflags.h>
   14 #include <linux/hrtimer.h>
   15 #include <linux/kref.h>
   16 #include <linux/workqueue.h>
   17 
   18 #include <linux/atomic.h>
   19 #include <asm/ptrace.h>
   20 #include <asm/irq.h>
   21 
   22 /*
   23  * These correspond to the IORESOURCE_IRQ_* defines in
   24  * linux/ioport.h to select the interrupt line behaviour.  When
   25  * requesting an interrupt without specifying a IRQF_TRIGGER, the
   26  * setting should be assumed to be "as already configured", which
   27  * may be as per machine or firmware initialisation.
   28  */
   29 #define IRQF_TRIGGER_NONE	0x00000000
   30 #define IRQF_TRIGGER_RISING	0x00000001
   31 #define IRQF_TRIGGER_FALLING	0x00000002
   32 #define IRQF_TRIGGER_HIGH	0x00000004
   33 #define IRQF_TRIGGER_LOW	0x00000008
   34 #define IRQF_TRIGGER_MASK	(IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
   35 				 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
   36 #define IRQF_TRIGGER_PROBE	0x00000010
   37 
   38 /*
   39  * These flags used only by the kernel as part of the
   40  * irq handling routines.
   41  *
   42  * IRQF_SHARED - allow sharing the irq among several devices
   43  * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
   44  * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
   45  * IRQF_PERCPU - Interrupt is per cpu
   46  * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
   47  * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
   48  *                registered first in an shared interrupt is considered for
   49  *                performance reasons)
   50  * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
   51  *                Used by threaded interrupts which need to keep the
   52  *                irq line disabled until the threaded handler has been run.
   53  * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend.  Does not guarantee
   54  *                   that this interrupt will wake the system from a suspended
   55  *                   state.  See Documentation/power/suspend-and-interrupts.txt
   56  * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
   57  * IRQF_NO_THREAD - Interrupt cannot be threaded
   58  * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
   59  *                resume time.
   60  * IRQF_COND_SUSPEND - If the IRQ is shared with a NO_SUSPEND user, execute this
   61  *                interrupt handler after suspending interrupts. For system
   62  *                wakeup devices users need to implement wakeup detection in
   63  *                their interrupt handlers.
   64  */
   65 #define IRQF_SHARED		0x00000080
   66 #define IRQF_PROBE_SHARED	0x00000100
   67 #define __IRQF_TIMER		0x00000200
   68 #define IRQF_PERCPU		0x00000400
   69 #define IRQF_NOBALANCING	0x00000800
   70 #define IRQF_IRQPOLL		0x00001000
   71 #define IRQF_ONESHOT		0x00002000
   72 #define IRQF_NO_SUSPEND		0x00004000
   73 #define IRQF_FORCE_RESUME	0x00008000
   74 #define IRQF_NO_THREAD		0x00010000
   75 #define IRQF_EARLY_RESUME	0x00020000
   76 #define IRQF_COND_SUSPEND	0x00040000
   77 
   78 #define IRQF_TIMER		(__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
   79 
   80 /*
   81  * These values can be returned by request_any_context_irq() and
   82  * describe the context the interrupt will be run in.
   83  *
   84  * IRQC_IS_HARDIRQ - interrupt runs in hardirq context
   85  * IRQC_IS_NESTED - interrupt runs in a nested threaded context
   86  */
   87 enum {
   88 	IRQC_IS_HARDIRQ	= 0,
   89 	IRQC_IS_NESTED,
   90 };
   91 
   92 typedef irqreturn_t (*irq_handler_t)(int, void *);
   93 
   94 /**
   95  * struct irqaction - per interrupt action descriptor
   96  * @handler:	interrupt handler function
   97  * @name:	name of the device
   98  * @dev_id:	cookie to identify the device
   99  * @percpu_dev_id:	cookie to identify the device
  100  * @next:	pointer to the next irqaction for shared interrupts
  101  * @irq:	interrupt number
  102  * @flags:	flags (see IRQF_* above)
  103  * @thread_fn:	interrupt handler function for threaded interrupts
  104  * @thread:	thread pointer for threaded interrupts
  105  * @thread_flags:	flags related to @thread
  106  * @thread_mask:	bitmask for keeping track of @thread activity
  107  * @dir:	pointer to the proc/irq/NN/name entry
  108  */
  109 struct irqaction {
  110 	irq_handler_t		handler;
  111 	void			*dev_id;
  112 	void __percpu		*percpu_dev_id;
  113 	struct irqaction	*next;
  114 	irq_handler_t		thread_fn;
  115 	struct task_struct	*thread;
  116 	unsigned int		irq;
  117 	unsigned int		flags;
  118 	unsigned long		thread_flags;
  119 	unsigned long		thread_mask;
  120 	const char		*name;
  121 	struct proc_dir_entry	*dir;
  122 } ____cacheline_internodealigned_in_smp;
  123 
  124 extern irqreturn_t no_action(int cpl, void *dev_id);
  125 
  126 extern int __must_check
  127 request_threaded_irq(unsigned int irq, irq_handler_t handler,
  128 		     irq_handler_t thread_fn,
  129 		     unsigned long flags, const char *name, void *dev);
  130 
  131 static inline int __must_check
  132 request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
  133 	    const char *name, void *dev)
  134 {
  135 	return request_threaded_irq(irq, handler, NULL, flags, name, dev);
  136 }
  137 
  138 extern int __must_check
  139 request_any_context_irq(unsigned int irq, irq_handler_t handler,
  140 			unsigned long flags, const char *name, void *dev_id);
  141 
  142 extern int __must_check
  143 request_percpu_irq(unsigned int irq, irq_handler_t handler,
  144 		   const char *devname, void __percpu *percpu_dev_id);
  145 
  146 extern void free_irq(unsigned int, void *);
  147 extern void free_percpu_irq(unsigned int, void __percpu *);
  148 
  149 struct device;
  150 
  151 extern int __must_check
  152 devm_request_threaded_irq(struct device *dev, unsigned int irq,
  153 			  irq_handler_t handler, irq_handler_t thread_fn,
  154 			  unsigned long irqflags, const char *devname,
  155 			  void *dev_id);
  156 
  157 static inline int __must_check
  158 devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
  159 		 unsigned long irqflags, const char *devname, void *dev_id)
  160 {
  161 	return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
  162 					 devname, dev_id);
  163 }
  164 
  165 extern int __must_check
  166 devm_request_any_context_irq(struct device *dev, unsigned int irq,
  167 		 irq_handler_t handler, unsigned long irqflags,
  168 		 const char *devname, void *dev_id);
  169 
  170 extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
  171 
  172 /*
  173  * On lockdep we dont want to enable hardirqs in hardirq
  174  * context. Use local_irq_enable_in_hardirq() to annotate
  175  * kernel code that has to do this nevertheless (pretty much
  176  * the only valid case is for old/broken hardware that is
  177  * insanely slow).
  178  *
  179  * NOTE: in theory this might break fragile code that relies
  180  * on hardirq delivery - in practice we dont seem to have such
  181  * places left. So the only effect should be slightly increased
  182  * irqs-off latencies.
  183  */
  184 #ifdef CONFIG_LOCKDEP
  185 # define local_irq_enable_in_hardirq()	do { } while (0)
  186 #else
  187 # define local_irq_enable_in_hardirq()	local_irq_enable()
  188 #endif
  189 
  190 extern void disable_irq_nosync(unsigned int irq);
  191 extern bool disable_hardirq(unsigned int irq);
  192 extern void disable_irq(unsigned int irq);
  193 extern void disable_percpu_irq(unsigned int irq);
  194 extern void enable_irq(unsigned int irq);
  195 extern void enable_percpu_irq(unsigned int irq, unsigned int type);
  196 extern void irq_wake_thread(unsigned int irq, void *dev_id);
  197 
  198 /* The following three functions are for the core kernel use only. */
  199 extern void suspend_device_irqs(void);
  200 extern void resume_device_irqs(void);
  201 
  202 /**
  203  * struct irq_affinity_notify - context for notification of IRQ affinity changes
  204  * @irq:		Interrupt to which notification applies
  205  * @kref:		Reference count, for internal use
  206  * @work:		Work item, for internal use
  207  * @notify:		Function to be called on change.  This will be
  208  *			called in process context.
  209  * @release:		Function to be called on release.  This will be
  210  *			called in process context.  Once registered, the
  211  *			structure must only be freed when this function is
  212  *			called or later.
  213  */
  214 struct irq_affinity_notify {
  215 	unsigned int irq;
  216 	struct kref kref;
  217 	struct work_struct work;
  218 	void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
  219 	void (*release)(struct kref *ref);
  220 };
  221 
  222 #if defined(CONFIG_SMP)
  223 
  224 extern cpumask_var_t irq_default_affinity;
  225 
  226 /* Internal implementation. Use the helpers below */
  227 extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask,
  228 			      bool force);
  229 
  230 /**
  231  * irq_set_affinity - Set the irq affinity of a given irq
  232  * @irq:	Interrupt to set affinity
  233  * @cpumask:	cpumask
  234  *
  235  * Fails if cpumask does not contain an online CPU
  236  */
  237 static inline int
  238 irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
  239 {
  240 	return __irq_set_affinity(irq, cpumask, false);
  241 }
  242 
  243 /**
  244  * irq_force_affinity - Force the irq affinity of a given irq
  245  * @irq:	Interrupt to set affinity
  246  * @cpumask:	cpumask
  247  *
  248  * Same as irq_set_affinity, but without checking the mask against
  249  * online cpus.
  250  *
  251  * Solely for low level cpu hotplug code, where we need to make per
  252  * cpu interrupts affine before the cpu becomes online.
  253  */
  254 static inline int
  255 irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
  256 {
  257 	return __irq_set_affinity(irq, cpumask, true);
  258 }
  259 
  260 extern int irq_can_set_affinity(unsigned int irq);
  261 extern int irq_select_affinity(unsigned int irq);
  262 
  263 extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
  264 
  265 extern int
  266 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
  267 
  268 #else /* CONFIG_SMP */
  269 
  270 static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
  271 {
  272 	return -EINVAL;
  273 }
  274 
  275 static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
  276 {
  277 	return 0;
  278 }
  279 
  280 static inline int irq_can_set_affinity(unsigned int irq)
  281 {
  282 	return 0;
  283 }
  284 
  285 static inline int irq_select_affinity(unsigned int irq)  { return 0; }
  286 
  287 static inline int irq_set_affinity_hint(unsigned int irq,
  288 					const struct cpumask *m)
  289 {
  290 	return -EINVAL;
  291 }
  292 
  293 static inline int
  294 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
  295 {
  296 	return 0;
  297 }
  298 #endif /* CONFIG_SMP */
  299 
  300 /*
  301  * Special lockdep variants of irq disabling/enabling.
  302  * These should be used for locking constructs that
  303  * know that a particular irq context which is disabled,
  304  * and which is the only irq-context user of a lock,
  305  * that it's safe to take the lock in the irq-disabled
  306  * section without disabling hardirqs.
  307  *
  308  * On !CONFIG_LOCKDEP they are equivalent to the normal
  309  * irq disable/enable methods.
  310  */
  311 static inline void disable_irq_nosync_lockdep(unsigned int irq)
  312 {
  313 	disable_irq_nosync(irq);
  314 #ifdef CONFIG_LOCKDEP
  315 	local_irq_disable();
  316 #endif
  317 }
  318 
  319 static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
  320 {
  321 	disable_irq_nosync(irq);
  322 #ifdef CONFIG_LOCKDEP
  323 	local_irq_save(*flags);
  324 #endif
  325 }
  326 
  327 static inline void disable_irq_lockdep(unsigned int irq)
  328 {
  329 	disable_irq(irq);
  330 #ifdef CONFIG_LOCKDEP
  331 	local_irq_disable();
  332 #endif
  333 }
  334 
  335 static inline void enable_irq_lockdep(unsigned int irq)
  336 {
  337 #ifdef CONFIG_LOCKDEP
  338 	local_irq_enable();
  339 #endif
  340 	enable_irq(irq);
  341 }
  342 
  343 static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
  344 {
  345 #ifdef CONFIG_LOCKDEP
  346 	local_irq_restore(*flags);
  347 #endif
  348 	enable_irq(irq);
  349 }
  350 
  351 /* IRQ wakeup (PM) control: */
  352 extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
  353 
  354 static inline int enable_irq_wake(unsigned int irq)
  355 {
  356 	return irq_set_irq_wake(irq, 1);
  357 }
  358 
  359 static inline int disable_irq_wake(unsigned int irq)
  360 {
  361 	return irq_set_irq_wake(irq, 0);
  362 }
  363 
  364 /*
  365  * irq_get_irqchip_state/irq_set_irqchip_state specific flags
  366  */
  367 enum irqchip_irq_state {
  368 	IRQCHIP_STATE_PENDING,		/* Is interrupt pending? */
  369 	IRQCHIP_STATE_ACTIVE,		/* Is interrupt in progress? */
  370 	IRQCHIP_STATE_MASKED,		/* Is interrupt masked? */
  371 	IRQCHIP_STATE_LINE_LEVEL,	/* Is IRQ line high? */
  372 };
  373 
  374 extern int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
  375 				 bool *state);
  376 extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
  377 				 bool state);
  378 
  379 #ifdef CONFIG_IRQ_FORCED_THREADING
  380 extern bool force_irqthreads;
  381 #else
  382 #define force_irqthreads	(0)
  383 #endif
  384 
  385 #ifndef __ARCH_SET_SOFTIRQ_PENDING
  386 #define set_softirq_pending(x) (local_softirq_pending() = (x))
  387 #define or_softirq_pending(x)  (local_softirq_pending() |= (x))
  388 #endif
  389 
  390 /* Some architectures might implement lazy enabling/disabling of
  391  * interrupts. In some cases, such as stop_machine, we might want
  392  * to ensure that after a local_irq_disable(), interrupts have
  393  * really been disabled in hardware. Such architectures need to
  394  * implement the following hook.
  395  */
  396 #ifndef hard_irq_disable
  397 #define hard_irq_disable()	do { } while(0)
  398 #endif
  399 
  400 /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
  401    frequency threaded job scheduling. For almost all the purposes
  402    tasklets are more than enough. F.e. all serial device BHs et
  403    al. should be converted to tasklets, not to softirqs.
  404  */
  405 
  406 enum
  407 {
  408 	HI_SOFTIRQ=0,
  409 	TIMER_SOFTIRQ,
  410 	NET_TX_SOFTIRQ,
  411 	NET_RX_SOFTIRQ,
  412 	BLOCK_SOFTIRQ,
  413 	BLOCK_IOPOLL_SOFTIRQ,
  414 	TASKLET_SOFTIRQ,
  415 	SCHED_SOFTIRQ,
  416 	HRTIMER_SOFTIRQ,
  417 	RCU_SOFTIRQ,    /* Preferable RCU should always be the last softirq */
  418 
  419 	NR_SOFTIRQS
  420 };
  421 
  422 #define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
  423 
  424 /* map softirq index to softirq name. update 'softirq_to_name' in
  425  * kernel/softirq.c when adding a new softirq.
  426  */
  427 extern const char * const softirq_to_name[NR_SOFTIRQS];
  428 
  429 /* softirq mask and active fields moved to irq_cpustat_t in
  430  * asm/hardirq.h to get better cache usage.  KAO
  431  */
  432 
  433 struct softirq_action
  434 {
  435 	void	(*action)(struct softirq_action *);
  436 };
  437 
  438 asmlinkage void do_softirq(void);
  439 asmlinkage void __do_softirq(void);
  440 
  441 #ifdef __ARCH_HAS_DO_SOFTIRQ
  442 void do_softirq_own_stack(void);
  443 #else
  444 static inline void do_softirq_own_stack(void)
  445 {
  446 	__do_softirq();
  447 }
  448 #endif
  449 
  450 extern void open_softirq(int nr, void (*action)(struct softirq_action *));
  451 extern void softirq_init(void);
  452 extern void __raise_softirq_irqoff(unsigned int nr);
  453 
  454 extern void raise_softirq_irqoff(unsigned int nr);
  455 extern void raise_softirq(unsigned int nr);
  456 
  457 DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
  458 
  459 static inline struct task_struct *this_cpu_ksoftirqd(void)
  460 {
  461 	return this_cpu_read(ksoftirqd);
  462 }
  463 
  464 /* Tasklets --- multithreaded analogue of BHs.
  465 
  466    Main feature differing them of generic softirqs: tasklet
  467    is running only on one CPU simultaneously.
  468 
  469    Main feature differing them of BHs: different tasklets
  470    may be run simultaneously on different CPUs.
  471 
  472    Properties:
  473    * If tasklet_schedule() is called, then tasklet is guaranteed
  474      to be executed on some cpu at least once after this.
  475    * If the tasklet is already scheduled, but its execution is still not
  476      started, it will be executed only once.
  477    * If this tasklet is already running on another CPU (or schedule is called
  478      from tasklet itself), it is rescheduled for later.
  479    * Tasklet is strictly serialized wrt itself, but not
  480      wrt another tasklets. If client needs some intertask synchronization,
  481      he makes it with spinlocks.
  482  */
  483 
  484 struct tasklet_struct
  485 {
  486 	struct tasklet_struct *next;
  487 	unsigned long state;
  488 	atomic_t count;
  489 	void (*func)(unsigned long);
  490 	unsigned long data;
  491 };
  492 
  493 #define DECLARE_TASKLET(name, func, data) \
  494 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
  495 
  496 #define DECLARE_TASKLET_DISABLED(name, func, data) \
  497 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
  498 
  499 
  500 enum
  501 {
  502 	TASKLET_STATE_SCHED,	/* Tasklet is scheduled for execution */
  503 	TASKLET_STATE_RUN	/* Tasklet is running (SMP only) */
  504 };
  505 
  506 #ifdef CONFIG_SMP
  507 static inline int tasklet_trylock(struct tasklet_struct *t)
  508 {
  509 	return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
  510 }
  511 
  512 static inline void tasklet_unlock(struct tasklet_struct *t)
  513 {
  514 	smp_mb__before_atomic();
  515 	clear_bit(TASKLET_STATE_RUN, &(t)->state);
  516 }
  517 
  518 static inline void tasklet_unlock_wait(struct tasklet_struct *t)
  519 {
  520 	while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
  521 }
  522 #else
  523 #define tasklet_trylock(t) 1
  524 #define tasklet_unlock_wait(t) do { } while (0)
  525 #define tasklet_unlock(t) do { } while (0)
  526 #endif
  527 
  528 extern void __tasklet_schedule(struct tasklet_struct *t);
  529 
  530 static inline void tasklet_schedule(struct tasklet_struct *t)
  531 {
  532 	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
  533 		__tasklet_schedule(t);
  534 }
  535 
  536 extern void __tasklet_hi_schedule(struct tasklet_struct *t);
  537 
  538 static inline void tasklet_hi_schedule(struct tasklet_struct *t)
  539 {
  540 	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
  541 		__tasklet_hi_schedule(t);
  542 }
  543 
  544 extern void __tasklet_hi_schedule_first(struct tasklet_struct *t);
  545 
  546 /*
  547  * This version avoids touching any other tasklets. Needed for kmemcheck
  548  * in order not to take any page faults while enqueueing this tasklet;
  549  * consider VERY carefully whether you really need this or
  550  * tasklet_hi_schedule()...
  551  */
  552 static inline void tasklet_hi_schedule_first(struct tasklet_struct *t)
  553 {
  554 	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
  555 		__tasklet_hi_schedule_first(t);
  556 }
  557 
  558 
  559 static inline void tasklet_disable_nosync(struct tasklet_struct *t)
  560 {
  561 	atomic_inc(&t->count);
  562 	smp_mb__after_atomic();
  563 }
  564 
  565 static inline void tasklet_disable(struct tasklet_struct *t)
  566 {
  567 	tasklet_disable_nosync(t);
  568 	tasklet_unlock_wait(t);
  569 	smp_mb();
  570 }
  571 
  572 static inline void tasklet_enable(struct tasklet_struct *t)
  573 {
  574 	smp_mb__before_atomic();
  575 	atomic_dec(&t->count);
  576 }
  577 
  578 extern void tasklet_kill(struct tasklet_struct *t);
  579 extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
  580 extern void tasklet_init(struct tasklet_struct *t,
  581 			 void (*func)(unsigned long), unsigned long data);
  582 
  583 struct tasklet_hrtimer {
  584 	struct hrtimer		timer;
  585 	struct tasklet_struct	tasklet;
  586 	enum hrtimer_restart	(*function)(struct hrtimer *);
  587 };
  588 
  589 extern void
  590 tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
  591 		     enum hrtimer_restart (*function)(struct hrtimer *),
  592 		     clockid_t which_clock, enum hrtimer_mode mode);
  593 
  594 static inline
  595 int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
  596 			  const enum hrtimer_mode mode)
  597 {
  598 	return hrtimer_start(&ttimer->timer, time, mode);
  599 }
  600 
  601 static inline
  602 void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
  603 {
  604 	hrtimer_cancel(&ttimer->timer);
  605 	tasklet_kill(&ttimer->tasklet);
  606 }
  607 
  608 /*
  609  * Autoprobing for irqs:
  610  *
  611  * probe_irq_on() and probe_irq_off() provide robust primitives
  612  * for accurate IRQ probing during kernel initialization.  They are
  613  * reasonably simple to use, are not "fooled" by spurious interrupts,
  614  * and, unlike other attempts at IRQ probing, they do not get hung on
  615  * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
  616  *
  617  * For reasonably foolproof probing, use them as follows:
  618  *
  619  * 1. clear and/or mask the device's internal interrupt.
  620  * 2. sti();
  621  * 3. irqs = probe_irq_on();      // "take over" all unassigned idle IRQs
  622  * 4. enable the device and cause it to trigger an interrupt.
  623  * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
  624  * 6. irq = probe_irq_off(irqs);  // get IRQ number, 0=none, negative=multiple
  625  * 7. service the device to clear its pending interrupt.
  626  * 8. loop again if paranoia is required.
  627  *
  628  * probe_irq_on() returns a mask of allocated irq's.
  629  *
  630  * probe_irq_off() takes the mask as a parameter,
  631  * and returns the irq number which occurred,
  632  * or zero if none occurred, or a negative irq number
  633  * if more than one irq occurred.
  634  */
  635 
  636 #if !defined(CONFIG_GENERIC_IRQ_PROBE) 
  637 static inline unsigned long probe_irq_on(void)
  638 {
  639 	return 0;
  640 }
  641 static inline int probe_irq_off(unsigned long val)
  642 {
  643 	return 0;
  644 }
  645 static inline unsigned int probe_irq_mask(unsigned long val)
  646 {
  647 	return 0;
  648 }
  649 #else
  650 extern unsigned long probe_irq_on(void);	/* returns 0 on failure */
  651 extern int probe_irq_off(unsigned long);	/* returns 0 or negative on failure */
  652 extern unsigned int probe_irq_mask(unsigned long);	/* returns mask of ISA interrupts */
  653 #endif
  654 
  655 #ifdef CONFIG_PROC_FS
  656 /* Initialize /proc/irq/ */
  657 extern void init_irq_proc(void);
  658 #else
  659 static inline void init_irq_proc(void)
  660 {
  661 }
  662 #endif
  663 
  664 struct seq_file;
  665 int show_interrupts(struct seq_file *p, void *v);
  666 int arch_show_interrupts(struct seq_file *p, int prec);
  667 
  668 extern int early_irq_init(void);
  669 extern int arch_probe_nr_irqs(void);
  670 extern int arch_early_irq_init(void);
  671 
  672 #endif           1 /*
    2  * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
    3  *
    4  * (C) SGI 2006, Christoph Lameter
    5  * 	Cleaned up and restructured to ease the addition of alternative
    6  * 	implementations of SLAB allocators.
    7  * (C) Linux Foundation 2008-2013
    8  *      Unified interface for all slab allocators
    9  */
   10 
   11 #ifndef _LINUX_SLAB_H
   12 #define	_LINUX_SLAB_H
   13 
   14 #include <linux/gfp.h>
   15 #include <linux/types.h>
   16 #include <linux/workqueue.h>
   17 
   18 
   19 /*
   20  * Flags to pass to kmem_cache_create().
   21  * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
   22  */
   23 #define SLAB_DEBUG_FREE		0x00000100UL	/* DEBUG: Perform (expensive) checks on free */
   24 #define SLAB_RED_ZONE		0x00000400UL	/* DEBUG: Red zone objs in a cache */
   25 #define SLAB_POISON		0x00000800UL	/* DEBUG: Poison objects */
   26 #define SLAB_HWCACHE_ALIGN	0x00002000UL	/* Align objs on cache lines */
   27 #define SLAB_CACHE_DMA		0x00004000UL	/* Use GFP_DMA memory */
   28 #define SLAB_STORE_USER		0x00010000UL	/* DEBUG: Store the last owner for bug hunting */
   29 #define SLAB_PANIC		0x00040000UL	/* Panic if kmem_cache_create() fails */
   30 /*
   31  * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS!
   32  *
   33  * This delays freeing the SLAB page by a grace period, it does _NOT_
   34  * delay object freeing. This means that if you do kmem_cache_free()
   35  * that memory location is free to be reused at any time. Thus it may
   36  * be possible to see another object there in the same RCU grace period.
   37  *
   38  * This feature only ensures the memory location backing the object
   39  * stays valid, the trick to using this is relying on an independent
   40  * object validation pass. Something like:
   41  *
   42  *  rcu_read_lock()
   43  * again:
   44  *  obj = lockless_lookup(key);
   45  *  if (obj) {
   46  *    if (!try_get_ref(obj)) // might fail for free objects
   47  *      goto again;
   48  *
   49  *    if (obj->key != key) { // not the object we expected
   50  *      put_ref(obj);
   51  *      goto again;
   52  *    }
   53  *  }
   54  *  rcu_read_unlock();
   55  *
   56  * This is useful if we need to approach a kernel structure obliquely,
   57  * from its address obtained without the usual locking. We can lock
   58  * the structure to stabilize it and check it's still at the given address,
   59  * only if we can be sure that the memory has not been meanwhile reused
   60  * for some other kind of object (which our subsystem's lock might corrupt).
   61  *
   62  * rcu_read_lock before reading the address, then rcu_read_unlock after
   63  * taking the spinlock within the structure expected at that address.
   64  */
   65 #define SLAB_DESTROY_BY_RCU	0x00080000UL	/* Defer freeing slabs to RCU */
   66 #define SLAB_MEM_SPREAD		0x00100000UL	/* Spread some memory over cpuset */
   67 #define SLAB_TRACE		0x00200000UL	/* Trace allocations and frees */
   68 
   69 /* Flag to prevent checks on free */
   70 #ifdef CONFIG_DEBUG_OBJECTS
   71 # define SLAB_DEBUG_OBJECTS	0x00400000UL
   72 #else
   73 # define SLAB_DEBUG_OBJECTS	0x00000000UL
   74 #endif
   75 
   76 #define SLAB_NOLEAKTRACE	0x00800000UL	/* Avoid kmemleak tracing */
   77 
   78 /* Don't track use of uninitialized memory */
   79 #ifdef CONFIG_KMEMCHECK
   80 # define SLAB_NOTRACK		0x01000000UL
   81 #else
   82 # define SLAB_NOTRACK		0x00000000UL
   83 #endif
   84 #ifdef CONFIG_FAILSLAB
   85 # define SLAB_FAILSLAB		0x02000000UL	/* Fault injection mark */
   86 #else
   87 # define SLAB_FAILSLAB		0x00000000UL
   88 #endif
   89 
   90 /* The following flags affect the page allocator grouping pages by mobility */
   91 #define SLAB_RECLAIM_ACCOUNT	0x00020000UL		/* Objects are reclaimable */
   92 #define SLAB_TEMPORARY		SLAB_RECLAIM_ACCOUNT	/* Objects are short-lived */
   93 /*
   94  * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
   95  *
   96  * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
   97  *
   98  * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
   99  * Both make kfree a no-op.
  100  */
  101 #define ZERO_SIZE_PTR ((void *)16)
  102 
  103 #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
  104 				(unsigned long)ZERO_SIZE_PTR)
  105 
  106 #include <linux/kmemleak.h>
  107 #include <linux/kasan.h>
  108 
  109 struct mem_cgroup;
  110 /*
  111  * struct kmem_cache related prototypes
  112  */
  113 void __init kmem_cache_init(void);
  114 int slab_is_available(void);
  115 
  116 struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
  117 			unsigned long,
  118 			void (*)(void *));
  119 void kmem_cache_destroy(struct kmem_cache *);
  120 int kmem_cache_shrink(struct kmem_cache *);
  121 
  122 void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *);
  123 void memcg_deactivate_kmem_caches(struct mem_cgroup *);
  124 void memcg_destroy_kmem_caches(struct mem_cgroup *);
  125 
  126 /*
  127  * Please use this macro to create slab caches. Simply specify the
  128  * name of the structure and maybe some flags that are listed above.
  129  *
  130  * The alignment of the struct determines object alignment. If you
  131  * f.e. add ____cacheline_aligned_in_smp to the struct declaration
  132  * then the objects will be properly aligned in SMP configurations.
  133  */
  134 #define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\
  135 		sizeof(struct __struct), __alignof__(struct __struct),\
  136 		(__flags), NULL)
  137 
  138 /*
  139  * Common kmalloc functions provided by all allocators
  140  */
  141 void * __must_check __krealloc(const void *, size_t, gfp_t);
  142 void * __must_check krealloc(const void *, size_t, gfp_t);
  143 void kfree(const void *);
  144 void kzfree(const void *);
  145 size_t ksize(const void *);
  146 
  147 /*
  148  * Some archs want to perform DMA into kmalloc caches and need a guaranteed
  149  * alignment larger than the alignment of a 64-bit integer.
  150  * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
  151  */
  152 #if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
  153 #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
  154 #define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
  155 #define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
  156 #else
  157 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
  158 #endif
  159 
  160 /*
  161  * Kmalloc array related definitions
  162  */
  163 
  164 #ifdef CONFIG_SLAB
  165 /*
  166  * The largest kmalloc size supported by the SLAB allocators is
  167  * 32 megabyte (2^25) or the maximum allocatable page order if that is
  168  * less than 32 MB.
  169  *
  170  * WARNING: Its not easy to increase this value since the allocators have
  171  * to do various tricks to work around compiler limitations in order to
  172  * ensure proper constant folding.
  173  */
  174 #define KMALLOC_SHIFT_HIGH	((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
  175 				(MAX_ORDER + PAGE_SHIFT - 1) : 25)
  176 #define KMALLOC_SHIFT_MAX	KMALLOC_SHIFT_HIGH
  177 #ifndef KMALLOC_SHIFT_LOW
  178 #define KMALLOC_SHIFT_LOW	5
  179 #endif
  180 #endif
  181 
  182 #ifdef CONFIG_SLUB
  183 /*
  184  * SLUB directly allocates requests fitting in to an order-1 page
  185  * (PAGE_SIZE*2).  Larger requests are passed to the page allocator.
  186  */
  187 #define KMALLOC_SHIFT_HIGH	(PAGE_SHIFT + 1)
  188 #define KMALLOC_SHIFT_MAX	(MAX_ORDER + PAGE_SHIFT)
  189 #ifndef KMALLOC_SHIFT_LOW
  190 #define KMALLOC_SHIFT_LOW	3
  191 #endif
  192 #endif
  193 
  194 #ifdef CONFIG_SLOB
  195 /*
  196  * SLOB passes all requests larger than one page to the page allocator.
  197  * No kmalloc array is necessary since objects of different sizes can
  198  * be allocated from the same page.
  199  */
  200 #define KMALLOC_SHIFT_HIGH	PAGE_SHIFT
  201 #define KMALLOC_SHIFT_MAX	30
  202 #ifndef KMALLOC_SHIFT_LOW
  203 #define KMALLOC_SHIFT_LOW	3
  204 #endif
  205 #endif
  206 
  207 /* Maximum allocatable size */
  208 #define KMALLOC_MAX_SIZE	(1UL << KMALLOC_SHIFT_MAX)
  209 /* Maximum size for which we actually use a slab cache */
  210 #define KMALLOC_MAX_CACHE_SIZE	(1UL << KMALLOC_SHIFT_HIGH)
  211 /* Maximum order allocatable via the slab allocagtor */
  212 #define KMALLOC_MAX_ORDER	(KMALLOC_SHIFT_MAX - PAGE_SHIFT)
  213 
  214 /*
  215  * Kmalloc subsystem.
  216  */
  217 #ifndef KMALLOC_MIN_SIZE
  218 #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
  219 #endif
  220 
  221 /*
  222  * This restriction comes from byte sized index implementation.
  223  * Page size is normally 2^12 bytes and, in this case, if we want to use
  224  * byte sized index which can represent 2^8 entries, the size of the object
  225  * should be equal or greater to 2^12 / 2^8 = 2^4 = 16.
  226  * If minimum size of kmalloc is less than 16, we use it as minimum object
  227  * size and give up to use byte sized index.
  228  */
  229 #define SLAB_OBJ_MIN_SIZE      (KMALLOC_MIN_SIZE < 16 ? \
  230                                (KMALLOC_MIN_SIZE) : 16)
  231 
  232 #ifndef CONFIG_SLOB
  233 extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
  234 #ifdef CONFIG_ZONE_DMA
  235 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
  236 #endif
  237 
  238 /*
  239  * Figure out which kmalloc slab an allocation of a certain size
  240  * belongs to.
  241  * 0 = zero alloc
  242  * 1 =  65 .. 96 bytes
  243  * 2 = 120 .. 192 bytes
  244  * n = 2^(n-1) .. 2^n -1
  245  */
  246 static __always_inline int kmalloc_index(size_t size)
  247 {
  248 	if (!size)
  249 		return 0;
  250 
  251 	if (size <= KMALLOC_MIN_SIZE)
  252 		return KMALLOC_SHIFT_LOW;
  253 
  254 	if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
  255 		return 1;
  256 	if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
  257 		return 2;
  258 	if (size <=          8) return 3;
  259 	if (size <=         16) return 4;
  260 	if (size <=         32) return 5;
  261 	if (size <=         64) return 6;
  262 	if (size <=        128) return 7;
  263 	if (size <=        256) return 8;
  264 	if (size <=        512) return 9;
  265 	if (size <=       1024) return 10;
  266 	if (size <=   2 * 1024) return 11;
  267 	if (size <=   4 * 1024) return 12;
  268 	if (size <=   8 * 1024) return 13;
  269 	if (size <=  16 * 1024) return 14;
  270 	if (size <=  32 * 1024) return 15;
  271 	if (size <=  64 * 1024) return 16;
  272 	if (size <= 128 * 1024) return 17;
  273 	if (size <= 256 * 1024) return 18;
  274 	if (size <= 512 * 1024) return 19;
  275 	if (size <= 1024 * 1024) return 20;
  276 	if (size <=  2 * 1024 * 1024) return 21;
  277 	if (size <=  4 * 1024 * 1024) return 22;
  278 	if (size <=  8 * 1024 * 1024) return 23;
  279 	if (size <=  16 * 1024 * 1024) return 24;
  280 	if (size <=  32 * 1024 * 1024) return 25;
  281 	if (size <=  64 * 1024 * 1024) return 26;
  282 	BUG();
  283 
  284 	/* Will never be reached. Needed because the compiler may complain */
  285 	return -1;
  286 }
  287 #endif /* !CONFIG_SLOB */
  288 
  289 void *__kmalloc(size_t size, gfp_t flags);
  290 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
  291 void kmem_cache_free(struct kmem_cache *, void *);
  292 
  293 #ifdef CONFIG_NUMA
  294 void *__kmalloc_node(size_t size, gfp_t flags, int node);
  295 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
  296 #else
  297 static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
  298 {
  299 	return __kmalloc(size, flags);
  300 }
  301 
  302 static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
  303 {
  304 	return kmem_cache_alloc(s, flags);
  305 }
  306 #endif
  307 
  308 #ifdef CONFIG_TRACING
  309 extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
  310 
  311 #ifdef CONFIG_NUMA
  312 extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
  313 					   gfp_t gfpflags,
  314 					   int node, size_t size);
  315 #else
  316 static __always_inline void *
  317 kmem_cache_alloc_node_trace(struct kmem_cache *s,
  318 			      gfp_t gfpflags,
  319 			      int node, size_t size)
  320 {
  321 	return kmem_cache_alloc_trace(s, gfpflags, size);
  322 }
  323 #endif /* CONFIG_NUMA */
  324 
  325 #else /* CONFIG_TRACING */
  326 static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
  327 		gfp_t flags, size_t size)
  328 {
  329 	void *ret = kmem_cache_alloc(s, flags);
  330 
  331 	kasan_kmalloc(s, ret, size);
  332 	return ret;
  333 }
  334 
  335 static __always_inline void *
  336 kmem_cache_alloc_node_trace(struct kmem_cache *s,
  337 			      gfp_t gfpflags,
  338 			      int node, size_t size)
  339 {
  340 	void *ret = kmem_cache_alloc_node(s, gfpflags, node);
  341 
  342 	kasan_kmalloc(s, ret, size);
  343 	return ret;
  344 }
  345 #endif /* CONFIG_TRACING */
  346 
  347 extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order);
  348 
  349 #ifdef CONFIG_TRACING
  350 extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
  351 #else
  352 static __always_inline void *
  353 kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
  354 {
  355 	return kmalloc_order(size, flags, order);
  356 }
  357 #endif
  358 
  359 static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
  360 {
  361 	unsigned int order = get_order(size);
  362 	return kmalloc_order_trace(size, flags, order);
  363 }
  364 
  365 /**
  366  * kmalloc - allocate memory
  367  * @size: how many bytes of memory are required.
  368  * @flags: the type of memory to allocate.
  369  *
  370  * kmalloc is the normal method of allocating memory
  371  * for objects smaller than page size in the kernel.
  372  *
  373  * The @flags argument may be one of:
  374  *
  375  * %GFP_USER - Allocate memory on behalf of user.  May sleep.
  376  *
  377  * %GFP_KERNEL - Allocate normal kernel ram.  May sleep.
  378  *
  379  * %GFP_ATOMIC - Allocation will not sleep.  May use emergency pools.
  380  *   For example, use this inside interrupt handlers.
  381  *
  382  * %GFP_HIGHUSER - Allocate pages from high memory.
  383  *
  384  * %GFP_NOIO - Do not do any I/O at all while trying to get memory.
  385  *
  386  * %GFP_NOFS - Do not make any fs calls while trying to get memory.
  387  *
  388  * %GFP_NOWAIT - Allocation will not sleep.
  389  *
  390  * %__GFP_THISNODE - Allocate node-local memory only.
  391  *
  392  * %GFP_DMA - Allocation suitable for DMA.
  393  *   Should only be used for kmalloc() caches. Otherwise, use a
  394  *   slab created with SLAB_DMA.
  395  *
  396  * Also it is possible to set different flags by OR'ing
  397  * in one or more of the following additional @flags:
  398  *
  399  * %__GFP_COLD - Request cache-cold pages instead of
  400  *   trying to return cache-warm pages.
  401  *
  402  * %__GFP_HIGH - This allocation has high priority and may use emergency pools.
  403  *
  404  * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
  405  *   (think twice before using).
  406  *
  407  * %__GFP_NORETRY - If memory is not immediately available,
  408  *   then give up at once.
  409  *
  410  * %__GFP_NOWARN - If allocation fails, don't issue any warnings.
  411  *
  412  * %__GFP_REPEAT - If allocation fails initially, try once more before failing.
  413  *
  414  * There are other flags available as well, but these are not intended
  415  * for general use, and so are not documented here. For a full list of
  416  * potential flags, always refer to linux/gfp.h.
  417  */
  418 static __always_inline void *kmalloc(size_t size, gfp_t flags)
  419 {
  420 	if (__builtin_constant_p(size)) {
  421 		if (size > KMALLOC_MAX_CACHE_SIZE)
  422 			return kmalloc_large(size, flags);
  423 #ifndef CONFIG_SLOB
  424 		if (!(flags & GFP_DMA)) {
  425 			int index = kmalloc_index(size);
  426 
  427 			if (!index)
  428 				return ZERO_SIZE_PTR;
  429 
  430 			return kmem_cache_alloc_trace(kmalloc_caches[index],
  431 					flags, size);
  432 		}
  433 #endif
  434 	}
  435 	return __kmalloc(size, flags);
  436 }
  437 
  438 /*
  439  * Determine size used for the nth kmalloc cache.
  440  * return size or 0 if a kmalloc cache for that
  441  * size does not exist
  442  */
  443 static __always_inline int kmalloc_size(int n)
  444 {
  445 #ifndef CONFIG_SLOB
  446 	if (n > 2)
  447 		return 1 << n;
  448 
  449 	if (n == 1 && KMALLOC_MIN_SIZE <= 32)
  450 		return 96;
  451 
  452 	if (n == 2 && KMALLOC_MIN_SIZE <= 64)
  453 		return 192;
  454 #endif
  455 	return 0;
  456 }
  457 
  458 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
  459 {
  460 #ifndef CONFIG_SLOB
  461 	if (__builtin_constant_p(size) &&
  462 		size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
  463 		int i = kmalloc_index(size);
  464 
  465 		if (!i)
  466 			return ZERO_SIZE_PTR;
  467 
  468 		return kmem_cache_alloc_node_trace(kmalloc_caches[i],
  469 						flags, node, size);
  470 	}
  471 #endif
  472 	return __kmalloc_node(size, flags, node);
  473 }
  474 
  475 /*
  476  * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
  477  * Intended for arches that get misalignment faults even for 64 bit integer
  478  * aligned buffers.
  479  */
  480 #ifndef ARCH_SLAB_MINALIGN
  481 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
  482 #endif
  483 
  484 struct memcg_cache_array {
  485 	struct rcu_head rcu;
  486 	struct kmem_cache *entries[0];
  487 };
  488 
  489 /*
  490  * This is the main placeholder for memcg-related information in kmem caches.
  491  * Both the root cache and the child caches will have it. For the root cache,
  492  * this will hold a dynamically allocated array large enough to hold
  493  * information about the currently limited memcgs in the system. To allow the
  494  * array to be accessed without taking any locks, on relocation we free the old
  495  * version only after a grace period.
  496  *
  497  * Child caches will hold extra metadata needed for its operation. Fields are:
  498  *
  499  * @memcg: pointer to the memcg this cache belongs to
  500  * @root_cache: pointer to the global, root cache, this cache was derived from
  501  *
  502  * Both root and child caches of the same kind are linked into a list chained
  503  * through @list.
  504  */
  505 struct memcg_cache_params {
  506 	bool is_root_cache;
  507 	struct list_head list;
  508 	union {
  509 		struct memcg_cache_array __rcu *memcg_caches;
  510 		struct {
  511 			struct mem_cgroup *memcg;
  512 			struct kmem_cache *root_cache;
  513 		};
  514 	};
  515 };
  516 
  517 int memcg_update_all_caches(int num_memcgs);
  518 
  519 /**
  520  * kmalloc_array - allocate memory for an array.
  521  * @n: number of elements.
  522  * @size: element size.
  523  * @flags: the type of memory to allocate (see kmalloc).
  524  */
  525 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
  526 {
  527 	if (size != 0 && n > SIZE_MAX / size)
  528 		return NULL;
  529 	return __kmalloc(n * size, flags);
  530 }
  531 
  532 /**
  533  * kcalloc - allocate memory for an array. The memory is set to zero.
  534  * @n: number of elements.
  535  * @size: element size.
  536  * @flags: the type of memory to allocate (see kmalloc).
  537  */
  538 static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
  539 {
  540 	return kmalloc_array(n, size, flags | __GFP_ZERO);
  541 }
  542 
  543 /*
  544  * kmalloc_track_caller is a special version of kmalloc that records the
  545  * calling function of the routine calling it for slab leak tracking instead
  546  * of just the calling function (confusing, eh?).
  547  * It's useful when the call to kmalloc comes from a widely-used standard
  548  * allocator where we care about the real place the memory allocation
  549  * request comes from.
  550  */
  551 extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
  552 #define kmalloc_track_caller(size, flags) \
  553 	__kmalloc_track_caller(size, flags, _RET_IP_)
  554 
  555 #ifdef CONFIG_NUMA
  556 extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
  557 #define kmalloc_node_track_caller(size, flags, node) \
  558 	__kmalloc_node_track_caller(size, flags, node, \
  559 			_RET_IP_)
  560 
  561 #else /* CONFIG_NUMA */
  562 
  563 #define kmalloc_node_track_caller(size, flags, node) \
  564 	kmalloc_track_caller(size, flags)
  565 
  566 #endif /* CONFIG_NUMA */
  567 
  568 /*
  569  * Shortcuts
  570  */
  571 static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
  572 {
  573 	return kmem_cache_alloc(k, flags | __GFP_ZERO);
  574 }
  575 
  576 /**
  577  * kzalloc - allocate memory. The memory is set to zero.
  578  * @size: how many bytes of memory are required.
  579  * @flags: the type of memory to allocate (see kmalloc).
  580  */
  581 static inline void *kzalloc(size_t size, gfp_t flags)
  582 {
  583 	return kmalloc(size, flags | __GFP_ZERO);
  584 }
  585 
  586 /**
  587  * kzalloc_node - allocate zeroed memory from a particular memory node.
  588  * @size: how many bytes of memory are required.
  589  * @flags: the type of memory to allocate (see kmalloc).
  590  * @node: memory node from which to allocate
  591  */
  592 static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
  593 {
  594 	return kmalloc_node(size, flags | __GFP_ZERO, node);
  595 }
  596 
  597 unsigned int kmem_cache_size(struct kmem_cache *s);
  598 void __init kmem_cache_init_late(void);
  599 
  600 #endif	/* _LINUX_SLAB_H */           1 #ifndef __LINUX_SPINLOCK_H
    2 #define __LINUX_SPINLOCK_H
    3 
    4 /*
    5  * include/linux/spinlock.h - generic spinlock/rwlock declarations
    6  *
    7  * here's the role of the various spinlock/rwlock related include files:
    8  *
    9  * on SMP builds:
   10  *
   11  *  asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
   12  *                        initializers
   13  *
   14  *  linux/spinlock_types.h:
   15  *                        defines the generic type and initializers
   16  *
   17  *  asm/spinlock.h:       contains the arch_spin_*()/etc. lowlevel
   18  *                        implementations, mostly inline assembly code
   19  *
   20  *   (also included on UP-debug builds:)
   21  *
   22  *  linux/spinlock_api_smp.h:
   23  *                        contains the prototypes for the _spin_*() APIs.
   24  *
   25  *  linux/spinlock.h:     builds the final spin_*() APIs.
   26  *
   27  * on UP builds:
   28  *
   29  *  linux/spinlock_type_up.h:
   30  *                        contains the generic, simplified UP spinlock type.
   31  *                        (which is an empty structure on non-debug builds)
   32  *
   33  *  linux/spinlock_types.h:
   34  *                        defines the generic type and initializers
   35  *
   36  *  linux/spinlock_up.h:
   37  *                        contains the arch_spin_*()/etc. version of UP
   38  *                        builds. (which are NOPs on non-debug, non-preempt
   39  *                        builds)
   40  *
   41  *   (included on UP-non-debug builds:)
   42  *
   43  *  linux/spinlock_api_up.h:
   44  *                        builds the _spin_*() APIs.
   45  *
   46  *  linux/spinlock.h:     builds the final spin_*() APIs.
   47  */
   48 
   49 #include <linux/typecheck.h>
   50 #include <linux/preempt.h>
   51 #include <linux/linkage.h>
   52 #include <linux/compiler.h>
   53 #include <linux/irqflags.h>
   54 #include <linux/thread_info.h>
   55 #include <linux/kernel.h>
   56 #include <linux/stringify.h>
   57 #include <linux/bottom_half.h>
   58 #include <asm/barrier.h>
   59 
   60 
   61 /*
   62  * Must define these before including other files, inline functions need them
   63  */
   64 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
   65 
   66 #define LOCK_SECTION_START(extra)               \
   67         ".subsection 1\n\t"                     \
   68         extra                                   \
   69         ".ifndef " LOCK_SECTION_NAME "\n\t"     \
   70         LOCK_SECTION_NAME ":\n\t"               \
   71         ".endif\n"
   72 
   73 #define LOCK_SECTION_END                        \
   74         ".previous\n\t"
   75 
   76 #define __lockfunc __attribute__((section(".spinlock.text")))
   77 
   78 /*
   79  * Pull the arch_spinlock_t and arch_rwlock_t definitions:
   80  */
   81 #include <linux/spinlock_types.h>
   82 
   83 /*
   84  * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
   85  */
   86 #ifdef CONFIG_SMP
   87 # include <asm/spinlock.h>
   88 #else
   89 # include <linux/spinlock_up.h>
   90 #endif
   91 
   92 #ifdef CONFIG_DEBUG_SPINLOCK
   93   extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
   94 				   struct lock_class_key *key);
   95 # define raw_spin_lock_init(lock)				\
   96 do {								\
   97 	static struct lock_class_key __key;			\
   98 								\
   99 	__raw_spin_lock_init((lock), #lock, &__key);		\
  100 } while (0)
  101 
  102 #else
  103 # define raw_spin_lock_init(lock)				\
  104 	do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
  105 #endif
  106 
  107 #define raw_spin_is_locked(lock)	arch_spin_is_locked(&(lock)->raw_lock)
  108 
  109 #ifdef CONFIG_GENERIC_LOCKBREAK
  110 #define raw_spin_is_contended(lock) ((lock)->break_lock)
  111 #else
  112 
  113 #ifdef arch_spin_is_contended
  114 #define raw_spin_is_contended(lock)	arch_spin_is_contended(&(lock)->raw_lock)
  115 #else
  116 #define raw_spin_is_contended(lock)	(((void)(lock), 0))
  117 #endif /*arch_spin_is_contended*/
  118 #endif
  119 
  120 /*
  121  * Despite its name it doesn't necessarily has to be a full barrier.
  122  * It should only guarantee that a STORE before the critical section
  123  * can not be reordered with a LOAD inside this section.
  124  * spin_lock() is the one-way barrier, this LOAD can not escape out
  125  * of the region. So the default implementation simply ensures that
  126  * a STORE can not move into the critical section, smp_wmb() should
  127  * serialize it with another STORE done by spin_lock().
  128  */
  129 #ifndef smp_mb__before_spinlock
  130 #define smp_mb__before_spinlock()	smp_wmb()
  131 #endif
  132 
  133 /*
  134  * Place this after a lock-acquisition primitive to guarantee that
  135  * an UNLOCK+LOCK pair act as a full barrier.  This guarantee applies
  136  * if the UNLOCK and LOCK are executed by the same CPU or if the
  137  * UNLOCK and LOCK operate on the same lock variable.
  138  */
  139 #ifndef smp_mb__after_unlock_lock
  140 #define smp_mb__after_unlock_lock()	do { } while (0)
  141 #endif
  142 
  143 /**
  144  * raw_spin_unlock_wait - wait until the spinlock gets unlocked
  145  * @lock: the spinlock in question.
  146  */
  147 #define raw_spin_unlock_wait(lock)	arch_spin_unlock_wait(&(lock)->raw_lock)
  148 
  149 #ifdef CONFIG_DEBUG_SPINLOCK
  150  extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
  151 #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
  152  extern int do_raw_spin_trylock(raw_spinlock_t *lock);
  153  extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
  154 #else
  155 static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
  156 {
  157 	__acquire(lock);
  158 	arch_spin_lock(&lock->raw_lock);
  159 }
  160 
  161 static inline void
  162 do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
  163 {
  164 	__acquire(lock);
  165 	arch_spin_lock_flags(&lock->raw_lock, *flags);
  166 }
  167 
  168 static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
  169 {
  170 	return arch_spin_trylock(&(lock)->raw_lock);
  171 }
  172 
  173 static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
  174 {
  175 	arch_spin_unlock(&lock->raw_lock);
  176 	__release(lock);
  177 }
  178 #endif
  179 
  180 /*
  181  * Define the various spin_lock methods.  Note we define these
  182  * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
  183  * various methods are defined as nops in the case they are not
  184  * required.
  185  */
  186 #define raw_spin_trylock(lock)	__cond_lock(lock, _raw_spin_trylock(lock))
  187 
  188 #define raw_spin_lock(lock)	_raw_spin_lock(lock)
  189 
  190 #ifdef CONFIG_DEBUG_LOCK_ALLOC
  191 # define raw_spin_lock_nested(lock, subclass) \
  192 	_raw_spin_lock_nested(lock, subclass)
  193 # define raw_spin_lock_bh_nested(lock, subclass) \
  194 	_raw_spin_lock_bh_nested(lock, subclass)
  195 
  196 # define raw_spin_lock_nest_lock(lock, nest_lock)			\
  197 	 do {								\
  198 		 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
  199 		 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map);	\
  200 	 } while (0)
  201 #else
  202 /*
  203  * Always evaluate the 'subclass' argument to avoid that the compiler
  204  * warns about set-but-not-used variables when building with
  205  * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
  206  */
  207 # define raw_spin_lock_nested(lock, subclass)		\
  208 	_raw_spin_lock(((void)(subclass), (lock)))
  209 # define raw_spin_lock_nest_lock(lock, nest_lock)	_raw_spin_lock(lock)
  210 # define raw_spin_lock_bh_nested(lock, subclass)	_raw_spin_lock_bh(lock)
  211 #endif
  212 
  213 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  214 
  215 #define raw_spin_lock_irqsave(lock, flags)			\
  216 	do {						\
  217 		typecheck(unsigned long, flags);	\
  218 		flags = _raw_spin_lock_irqsave(lock);	\
  219 	} while (0)
  220 
  221 #ifdef CONFIG_DEBUG_LOCK_ALLOC
  222 #define raw_spin_lock_irqsave_nested(lock, flags, subclass)		\
  223 	do {								\
  224 		typecheck(unsigned long, flags);			\
  225 		flags = _raw_spin_lock_irqsave_nested(lock, subclass);	\
  226 	} while (0)
  227 #else
  228 #define raw_spin_lock_irqsave_nested(lock, flags, subclass)		\
  229 	do {								\
  230 		typecheck(unsigned long, flags);			\
  231 		flags = _raw_spin_lock_irqsave(lock);			\
  232 	} while (0)
  233 #endif
  234 
  235 #else
  236 
  237 #define raw_spin_lock_irqsave(lock, flags)		\
  238 	do {						\
  239 		typecheck(unsigned long, flags);	\
  240 		_raw_spin_lock_irqsave(lock, flags);	\
  241 	} while (0)
  242 
  243 #define raw_spin_lock_irqsave_nested(lock, flags, subclass)	\
  244 	raw_spin_lock_irqsave(lock, flags)
  245 
  246 #endif
  247 
  248 #define raw_spin_lock_irq(lock)		_raw_spin_lock_irq(lock)
  249 #define raw_spin_lock_bh(lock)		_raw_spin_lock_bh(lock)
  250 #define raw_spin_unlock(lock)		_raw_spin_unlock(lock)
  251 #define raw_spin_unlock_irq(lock)	_raw_spin_unlock_irq(lock)
  252 
  253 #define raw_spin_unlock_irqrestore(lock, flags)		\
  254 	do {							\
  255 		typecheck(unsigned long, flags);		\
  256 		_raw_spin_unlock_irqrestore(lock, flags);	\
  257 	} while (0)
  258 #define raw_spin_unlock_bh(lock)	_raw_spin_unlock_bh(lock)
  259 
  260 #define raw_spin_trylock_bh(lock) \
  261 	__cond_lock(lock, _raw_spin_trylock_bh(lock))
  262 
  263 #define raw_spin_trylock_irq(lock) \
  264 ({ \
  265 	local_irq_disable(); \
  266 	raw_spin_trylock(lock) ? \
  267 	1 : ({ local_irq_enable(); 0;  }); \
  268 })
  269 
  270 #define raw_spin_trylock_irqsave(lock, flags) \
  271 ({ \
  272 	local_irq_save(flags); \
  273 	raw_spin_trylock(lock) ? \
  274 	1 : ({ local_irq_restore(flags); 0; }); \
  275 })
  276 
  277 /**
  278  * raw_spin_can_lock - would raw_spin_trylock() succeed?
  279  * @lock: the spinlock in question.
  280  */
  281 #define raw_spin_can_lock(lock)	(!raw_spin_is_locked(lock))
  282 
  283 /* Include rwlock functions */
  284 #include <linux/rwlock.h>
  285 
  286 /*
  287  * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
  288  */
  289 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  290 # include <linux/spinlock_api_smp.h>
  291 #else
  292 # include <linux/spinlock_api_up.h>
  293 #endif
  294 
  295 /*
  296  * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
  297  */
  298 
  299 static inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
  300 {
  301 	return &lock->rlock;
  302 }
  303 
  304 #define spin_lock_init(_lock)				\
  305 do {							\
  306 	spinlock_check(_lock);				\
  307 	raw_spin_lock_init(&(_lock)->rlock);		\
  308 } while (0)
  309 
  310 static inline void spin_lock(spinlock_t *lock)
  311 {
  312 	raw_spin_lock(&lock->rlock);
  313 }
  314 
  315 static inline void spin_lock_bh(spinlock_t *lock)
  316 {
  317 	raw_spin_lock_bh(&lock->rlock);
  318 }
  319 
  320 static inline int spin_trylock(spinlock_t *lock)
  321 {
  322 	return raw_spin_trylock(&lock->rlock);
  323 }
  324 
  325 #define spin_lock_nested(lock, subclass)			\
  326 do {								\
  327 	raw_spin_lock_nested(spinlock_check(lock), subclass);	\
  328 } while (0)
  329 
  330 #define spin_lock_bh_nested(lock, subclass)			\
  331 do {								\
  332 	raw_spin_lock_bh_nested(spinlock_check(lock), subclass);\
  333 } while (0)
  334 
  335 #define spin_lock_nest_lock(lock, nest_lock)				\
  336 do {									\
  337 	raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock);	\
  338 } while (0)
  339 
  340 static inline void spin_lock_irq(spinlock_t *lock)
  341 {
  342 	raw_spin_lock_irq(&lock->rlock);
  343 }
  344 
  345 #define spin_lock_irqsave(lock, flags)				\
  346 do {								\
  347 	raw_spin_lock_irqsave(spinlock_check(lock), flags);	\
  348 } while (0)
  349 
  350 #define spin_lock_irqsave_nested(lock, flags, subclass)			\
  351 do {									\
  352 	raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
  353 } while (0)
  354 
  355 static inline void spin_unlock(spinlock_t *lock)
  356 {
  357 	raw_spin_unlock(&lock->rlock);
  358 }
  359 
  360 static inline void spin_unlock_bh(spinlock_t *lock)
  361 {
  362 	raw_spin_unlock_bh(&lock->rlock);
  363 }
  364 
  365 static inline void spin_unlock_irq(spinlock_t *lock)
  366 {
  367 	raw_spin_unlock_irq(&lock->rlock);
  368 }
  369 
  370 static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
  371 {
  372 	raw_spin_unlock_irqrestore(&lock->rlock, flags);
  373 }
  374 
  375 static inline int spin_trylock_bh(spinlock_t *lock)
  376 {
  377 	return raw_spin_trylock_bh(&lock->rlock);
  378 }
  379 
  380 static inline int spin_trylock_irq(spinlock_t *lock)
  381 {
  382 	return raw_spin_trylock_irq(&lock->rlock);
  383 }
  384 
  385 #define spin_trylock_irqsave(lock, flags)			\
  386 ({								\
  387 	raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
  388 })
  389 
  390 static inline void spin_unlock_wait(spinlock_t *lock)
  391 {
  392 	raw_spin_unlock_wait(&lock->rlock);
  393 }
  394 
  395 static inline int spin_is_locked(spinlock_t *lock)
  396 {
  397 	return raw_spin_is_locked(&lock->rlock);
  398 }
  399 
  400 static inline int spin_is_contended(spinlock_t *lock)
  401 {
  402 	return raw_spin_is_contended(&lock->rlock);
  403 }
  404 
  405 static inline int spin_can_lock(spinlock_t *lock)
  406 {
  407 	return raw_spin_can_lock(&lock->rlock);
  408 }
  409 
  410 #define assert_spin_locked(lock)	assert_raw_spin_locked(&(lock)->rlock)
  411 
  412 /*
  413  * Pull the atomic_t declaration:
  414  * (asm-mips/atomic.h needs above definitions)
  415  */
  416 #include <linux/atomic.h>
  417 /**
  418  * atomic_dec_and_lock - lock on reaching reference count zero
  419  * @atomic: the atomic counter
  420  * @lock: the spinlock in question
  421  *
  422  * Decrements @atomic by 1.  If the result is 0, returns true and locks
  423  * @lock.  Returns false for all other cases.
  424  */
  425 extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
  426 #define atomic_dec_and_lock(atomic, lock) \
  427 		__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
  428 
  429 #endif /* __LINUX_SPINLOCK_H */       | 
Here is an explanation of a rule violation arisen while checking your driver against a corresponding kernel.
Note that it may be false positive, i.e. there isn't a real error indeed. Please analyze a given error trace and related source code to understand whether there is an error in your driver.
Error trace column contains a path on which the given rule is violated. You can expand/collapse some entity classes by clicking on corresponding checkboxes in a main menu or in an advanced Others menu. Also you can expand/collapse each particular entity by clicking on +/-. In hovering on some entities you can see some tips. Also the error trace is bound with related source code. Line numbers may be shown as links on the left. You can click on them to open corresponding lines in source code.
Source code column contains a content of files related with the error trace. There is source code of your driver (note that there are some LDV modifications at the end), kernel headers and rule model. Tabs show a currently opened file and other available files. In hovering on them you can see full file names. On clicking a corresponding file content will be shown.
| Ядро | Модуль | Правило | Верификатор | Вердикт | Статус | Время создания | Описание проблемы | 
| linux-4.1-rc1.tar.xz | drivers/usb/gadget/udc/amd5536udc.ko | 152_1a | CPAchecker | Bug | Fixed | 2015-09-06 17:46:10 | L0204 | 
[В начало]
