Bug
[В начало]
Ошибка # 127
Показать/спрятать трассу ошибок Error trace
{ 20 typedef unsigned char __u8; 23 typedef unsigned short __u16; 25 typedef int __s32; 26 typedef unsigned int __u32; 30 typedef unsigned long long __u64; 15 typedef signed char s8; 16 typedef unsigned char u8; 19 typedef unsigned short u16; 21 typedef int s32; 22 typedef unsigned int u32; 24 typedef long long s64; 25 typedef unsigned long long u64; 14 typedef long __kernel_long_t; 15 typedef unsigned long __kernel_ulong_t; 27 typedef int __kernel_pid_t; 48 typedef unsigned int __kernel_uid32_t; 49 typedef unsigned int __kernel_gid32_t; 71 typedef __kernel_ulong_t __kernel_size_t; 72 typedef __kernel_long_t __kernel_ssize_t; 87 typedef long long __kernel_loff_t; 88 typedef __kernel_long_t __kernel_time_t; 89 typedef __kernel_long_t __kernel_clock_t; 90 typedef int __kernel_timer_t; 91 typedef int __kernel_clockid_t; 32 typedef __u16 __le16; 34 typedef __u32 __le32; 36 typedef __u64 __le64; 12 typedef __u32 __kernel_dev_t; 15 typedef __kernel_dev_t dev_t; 18 typedef unsigned short umode_t; 21 typedef __kernel_pid_t pid_t; 26 typedef __kernel_clockid_t clockid_t; 29 typedef _Bool bool; 31 typedef __kernel_uid32_t uid_t; 32 typedef __kernel_gid32_t gid_t; 45 typedef __kernel_loff_t loff_t; 54 typedef __kernel_size_t size_t; 59 typedef __kernel_ssize_t ssize_t; 69 typedef __kernel_time_t time_t; 102 typedef __s32 int32_t; 106 typedef __u8 uint8_t; 107 typedef __u16 uint16_t; 108 typedef __u32 uint32_t; 133 typedef unsigned long sector_t; 134 typedef unsigned long blkcnt_t; 152 typedef u64 dma_addr_t; 157 typedef unsigned int gfp_t; 158 typedef unsigned int fmode_t; 159 typedef unsigned int oom_flags_t; 162 typedef u64 phys_addr_t; 167 typedef phys_addr_t resource_size_t; 177 struct __anonstruct_atomic_t_6 { int counter; } ; 177 typedef struct __anonstruct_atomic_t_6 atomic_t; 182 struct __anonstruct_atomic64_t_7 { long counter; } ; 182 typedef struct __anonstruct_atomic64_t_7 atomic64_t; 183 struct list_head { struct list_head *next; struct list_head *prev; } ; 188 struct hlist_node ; 188 struct hlist_head { struct hlist_node *first; } ; 192 struct hlist_node { struct hlist_node *next; struct hlist_node **pprev; } ; 203 struct callback_head { struct callback_head *next; void (*func)(struct callback_head *); } ; 234 enum dma_data_direction { DMA_BIDIRECTIONAL = 0, DMA_TO_DEVICE = 1, DMA_FROM_DEVICE = 2, DMA_NONE = 3 } ; 5 struct device ; 5 struct page ; 7 struct dma_attrs ; 33 struct module ; 72 struct pt_regs { unsigned long r15; unsigned long r14; unsigned long r13; unsigned long r12; unsigned long bp; unsigned long bx; unsigned long r11; unsigned long r10; unsigned long r9; unsigned long r8; unsigned long ax; unsigned long cx; unsigned long dx; unsigned long si; unsigned long di; unsigned long orig_ax; unsigned long ip; unsigned long cs; unsigned long flags; unsigned long sp; unsigned long ss; } ; 66 struct __anonstruct____missing_field_name_9 { unsigned int a; unsigned int b; } ; 66 struct __anonstruct____missing_field_name_10 { u16 limit0; u16 base0; unsigned char base1; unsigned char type; unsigned char s; unsigned char dpl; unsigned char p; unsigned char limit; unsigned char avl; unsigned char l; unsigned char d; unsigned char g; unsigned char base2; } ; 66 union __anonunion____missing_field_name_8 { struct __anonstruct____missing_field_name_9 __annonCompField4; struct __anonstruct____missing_field_name_10 __annonCompField5; } ; 66 struct desc_struct { union __anonunion____missing_field_name_8 __annonCompField6; } ; 12 typedef unsigned long pteval_t; 13 typedef unsigned long pmdval_t; 15 typedef unsigned long pgdval_t; 16 typedef unsigned long pgprotval_t; 18 struct __anonstruct_pte_t_11 { pteval_t pte; } ; 18 typedef struct __anonstruct_pte_t_11 pte_t; 20 struct pgprot { pgprotval_t pgprot; } ; 221 typedef struct pgprot pgprot_t; 223 struct __anonstruct_pgd_t_12 { pgdval_t pgd; } ; 223 typedef struct __anonstruct_pgd_t_12 pgd_t; 262 struct __anonstruct_pmd_t_14 { pmdval_t pmd; } ; 262 typedef struct __anonstruct_pmd_t_14 pmd_t; 390 typedef struct page *pgtable_t; 401 struct file ; 414 struct seq_file ; 452 struct thread_struct ; 454 struct mm_struct ; 455 struct task_struct ; 456 struct cpumask ; 20 struct qspinlock { atomic_t val; } ; 33 typedef struct qspinlock arch_spinlock_t; 34 struct qrwlock { atomic_t cnts; arch_spinlock_t wait_lock; } ; 14 typedef struct qrwlock arch_rwlock_t; 420 struct file_operations ; 432 struct completion ; 102 struct vm_area_struct ; 27 union __anonunion___u_16 { struct list_head *__val; char __c[1U]; } ; 200 union __anonunion___u_20 { struct list_head *__val; char __c[1U]; } ; 555 struct timespec ; 556 struct compat_timespec ; 557 struct __anonstruct_futex_32 { u32 *uaddr; u32 val; u32 flags; u32 bitset; u64 time; u32 *uaddr2; } ; 557 struct __anonstruct_nanosleep_33 { clockid_t clockid; struct timespec *rmtp; struct compat_timespec *compat_rmtp; u64 expires; } ; 557 struct pollfd ; 557 struct __anonstruct_poll_34 { struct pollfd *ufds; int nfds; int has_timeout; unsigned long tv_sec; unsigned long tv_nsec; } ; 557 union __anonunion____missing_field_name_31 { struct __anonstruct_futex_32 futex; struct __anonstruct_nanosleep_33 nanosleep; struct __anonstruct_poll_34 poll; } ; 557 struct restart_block { long int (*fn)(struct restart_block *); union __anonunion____missing_field_name_31 __annonCompField7; } ; 27 struct math_emu_info { long ___orig_eip; struct pt_regs *regs; } ; 328 struct cpumask { unsigned long bits[128U]; } ; 15 typedef struct cpumask cpumask_t; 656 typedef struct cpumask *cpumask_var_t; 23 typedef atomic64_t atomic_long_t; 260 struct fregs_state { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u32 status; } ; 26 struct __anonstruct____missing_field_name_59 { u64 rip; u64 rdp; } ; 26 struct __anonstruct____missing_field_name_60 { u32 fip; u32 fcs; u32 foo; u32 fos; } ; 26 union __anonunion____missing_field_name_58 { struct __anonstruct____missing_field_name_59 __annonCompField13; struct __anonstruct____missing_field_name_60 __annonCompField14; } ; 26 union __anonunion____missing_field_name_61 { u32 padding1[12U]; u32 sw_reserved[12U]; } ; 26 struct fxregs_state { u16 cwd; u16 swd; u16 twd; u16 fop; union __anonunion____missing_field_name_58 __annonCompField15; u32 mxcsr; u32 mxcsr_mask; u32 st_space[32U]; u32 xmm_space[64U]; u32 padding[12U]; union __anonunion____missing_field_name_61 __annonCompField16; } ; 66 struct swregs_state { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u8 ftop; u8 changed; u8 lookahead; u8 no_update; u8 rm; u8 alimit; struct math_emu_info *info; u32 entry_eip; } ; 214 struct xstate_header { u64 xfeatures; u64 xcomp_bv; u64 reserved[6U]; } ; 220 struct xregs_state { struct fxregs_state i387; struct xstate_header header; u8 extended_state_area[0U]; } ; 235 union fpregs_state { struct fregs_state fsave; struct fxregs_state fxsave; struct swregs_state soft; struct xregs_state xsave; u8 __padding[4096U]; } ; 252 struct fpu { unsigned int last_cpu; unsigned char fpstate_active; unsigned char fpregs_active; unsigned char counter; union fpregs_state state; } ; 170 struct seq_operations ; 369 struct perf_event ; 370 struct thread_struct { struct desc_struct tls_array[3U]; unsigned long sp0; unsigned long sp; unsigned short es; unsigned short ds; unsigned short fsindex; unsigned short gsindex; unsigned long fs; unsigned long gs; struct perf_event *ptrace_bps[4U]; unsigned long debugreg6; unsigned long ptrace_dr7; unsigned long cr2; unsigned long trap_nr; unsigned long error_code; unsigned long *io_bitmap_ptr; unsigned long iopl; unsigned int io_bitmap_max; struct fpu fpu; } ; 33 struct lockdep_map ; 55 struct stack_trace { unsigned int nr_entries; unsigned int max_entries; unsigned long *entries; int skip; } ; 28 struct lockdep_subclass_key { char __one_byte; } ; 53 struct lock_class_key { struct lockdep_subclass_key subkeys[8U]; } ; 59 struct lock_class { struct list_head hash_entry; struct list_head lock_entry; struct lockdep_subclass_key *key; unsigned int subclass; unsigned int dep_gen_id; unsigned long usage_mask; struct stack_trace usage_traces[13U]; struct list_head locks_after; struct list_head locks_before; unsigned int version; unsigned long ops; const char *name; int name_version; unsigned long contention_point[4U]; unsigned long contending_point[4U]; } ; 144 struct lockdep_map { struct lock_class_key *key; struct lock_class *class_cache[2U]; const char *name; int cpu; unsigned long ip; } ; 205 struct held_lock { u64 prev_chain_key; unsigned long acquire_ip; struct lockdep_map *instance; struct lockdep_map *nest_lock; u64 waittime_stamp; u64 holdtime_stamp; unsigned short class_idx; unsigned char irq_context; unsigned char trylock; unsigned char read; unsigned char check; unsigned char hardirqs_off; unsigned short references; unsigned int pin_count; } ; 546 struct raw_spinlock { arch_spinlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ; 32 typedef struct raw_spinlock raw_spinlock_t; 33 struct __anonstruct____missing_field_name_75 { u8 __padding[24U]; struct lockdep_map dep_map; } ; 33 union __anonunion____missing_field_name_74 { struct raw_spinlock rlock; struct __anonstruct____missing_field_name_75 __annonCompField19; } ; 33 struct spinlock { union __anonunion____missing_field_name_74 __annonCompField20; } ; 76 typedef struct spinlock spinlock_t; 23 struct __anonstruct_rwlock_t_76 { arch_rwlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ; 23 typedef struct __anonstruct_rwlock_t_76 rwlock_t; 12 struct __wait_queue ; 12 typedef struct __wait_queue wait_queue_t; 15 struct __wait_queue { unsigned int flags; void *private; int (*func)(wait_queue_t *, unsigned int, int, void *); struct list_head task_list; } ; 38 struct __wait_queue_head { spinlock_t lock; struct list_head task_list; } ; 43 typedef struct __wait_queue_head wait_queue_head_t; 1221 struct seqcount { unsigned int sequence; struct lockdep_map dep_map; } ; 52 typedef struct seqcount seqcount_t; 404 struct __anonstruct_seqlock_t_89 { struct seqcount seqcount; spinlock_t lock; } ; 404 typedef struct __anonstruct_seqlock_t_89 seqlock_t; 95 struct __anonstruct_nodemask_t_90 { unsigned long bits[16U]; } ; 95 typedef struct __anonstruct_nodemask_t_90 nodemask_t; 13 struct optimistic_spin_queue { atomic_t tail; } ; 39 struct mutex { atomic_t count; spinlock_t wait_lock; struct list_head wait_list; struct task_struct *owner; void *magic; struct lockdep_map dep_map; } ; 67 struct mutex_waiter { struct list_head list; struct task_struct *task; void *magic; } ; 177 struct rw_semaphore ; 178 struct rw_semaphore { long count; struct list_head wait_list; raw_spinlock_t wait_lock; struct optimistic_spin_queue osq; struct task_struct *owner; struct lockdep_map dep_map; } ; 172 struct completion { unsigned int done; wait_queue_head_t wait; } ; 105 struct timespec { __kernel_time_t tv_sec; long tv_nsec; } ; 446 union ktime { s64 tv64; } ; 41 typedef union ktime ktime_t; 1148 struct timer_list { struct hlist_node entry; unsigned long expires; void (*function)(unsigned long); unsigned long data; u32 flags; int slack; int start_pid; void *start_site; char start_comm[16U]; struct lockdep_map lockdep_map; } ; 238 struct hrtimer ; 239 enum hrtimer_restart ; 240 struct rb_node { unsigned long __rb_parent_color; struct rb_node *rb_right; struct rb_node *rb_left; } ; 41 struct rb_root { struct rb_node *rb_node; } ; 838 struct nsproxy ; 259 struct workqueue_struct ; 260 struct work_struct ; 54 struct work_struct { atomic_long_t data; struct list_head entry; void (*func)(struct work_struct *); struct lockdep_map lockdep_map; } ; 107 struct delayed_work { struct work_struct work; struct timer_list timer; struct workqueue_struct *wq; int cpu; } ; 215 struct resource ; 64 struct resource { resource_size_t start; resource_size_t end; const char *name; unsigned long flags; struct resource *parent; struct resource *sibling; struct resource *child; } ; 58 struct pm_message { int event; } ; 64 typedef struct pm_message pm_message_t; 65 struct dev_pm_ops { int (*prepare)(struct device *); void (*complete)(struct device *); int (*suspend)(struct device *); int (*resume)(struct device *); int (*freeze)(struct device *); int (*thaw)(struct device *); int (*poweroff)(struct device *); int (*restore)(struct device *); int (*suspend_late)(struct device *); int (*resume_early)(struct device *); int (*freeze_late)(struct device *); int (*thaw_early)(struct device *); int (*poweroff_late)(struct device *); int (*restore_early)(struct device *); int (*suspend_noirq)(struct device *); int (*resume_noirq)(struct device *); int (*freeze_noirq)(struct device *); int (*thaw_noirq)(struct device *); int (*poweroff_noirq)(struct device *); int (*restore_noirq)(struct device *); int (*runtime_suspend)(struct device *); int (*runtime_resume)(struct device *); int (*runtime_idle)(struct device *); } ; 320 enum rpm_status { RPM_ACTIVE = 0, RPM_RESUMING = 1, RPM_SUSPENDED = 2, RPM_SUSPENDING = 3 } ; 327 enum rpm_request { RPM_REQ_NONE = 0, RPM_REQ_IDLE = 1, RPM_REQ_SUSPEND = 2, RPM_REQ_AUTOSUSPEND = 3, RPM_REQ_RESUME = 4 } ; 335 struct wakeup_source ; 336 struct wake_irq ; 337 struct pm_domain_data ; 338 struct pm_subsys_data { spinlock_t lock; unsigned int refcount; struct list_head clock_list; struct pm_domain_data *domain_data; } ; 556 struct dev_pm_qos ; 556 struct dev_pm_info { pm_message_t power_state; unsigned char can_wakeup; unsigned char async_suspend; bool is_prepared; bool is_suspended; bool is_noirq_suspended; bool is_late_suspended; bool ignore_children; bool early_init; bool direct_complete; spinlock_t lock; struct list_head entry; struct completion completion; struct wakeup_source *wakeup; bool wakeup_path; bool syscore; bool no_pm_callbacks; struct timer_list suspend_timer; unsigned long timer_expires; struct work_struct work; wait_queue_head_t wait_queue; struct wake_irq *wakeirq; atomic_t usage_count; atomic_t child_count; unsigned char disable_depth; unsigned char idle_notification; unsigned char request_pending; unsigned char deferred_resume; unsigned char run_wake; unsigned char runtime_auto; unsigned char no_callbacks; unsigned char irq_safe; unsigned char use_autosuspend; unsigned char timer_autosuspends; unsigned char memalloc_noio; enum rpm_request request; enum rpm_status runtime_status; int runtime_error; int autosuspend_delay; unsigned long last_busy; unsigned long active_jiffies; unsigned long suspended_jiffies; unsigned long accounting_timestamp; struct pm_subsys_data *subsys_data; void (*set_latency_tolerance)(struct device *, s32 ); struct dev_pm_qos *qos; } ; 616 struct dev_pm_domain { struct dev_pm_ops ops; void (*detach)(struct device *, bool ); int (*activate)(struct device *); void (*sync)(struct device *); void (*dismiss)(struct device *); } ; 25 struct ldt_struct ; 25 struct __anonstruct_mm_context_t_159 { struct ldt_struct *ldt; unsigned short ia32_compat; struct mutex lock; void *vdso; atomic_t perf_rdpmc_allowed; } ; 25 typedef struct __anonstruct_mm_context_t_159 mm_context_t; 22 struct bio_vec ; 1211 struct llist_node ; 64 struct llist_node { struct llist_node *next; } ; 559 union __anonunion____missing_field_name_189 { unsigned long bitmap[4U]; struct callback_head callback_head; } ; 559 struct idr_layer { int prefix; int layer; struct idr_layer *ary[256U]; int count; union __anonunion____missing_field_name_189 __annonCompField33; } ; 41 struct idr { struct idr_layer *hint; struct idr_layer *top; int layers; int cur; spinlock_t lock; int id_free_cnt; struct idr_layer *id_free; } ; 124 struct ida_bitmap { long nr_busy; unsigned long bitmap[15U]; } ; 167 struct ida { struct idr idr; struct ida_bitmap *free_bitmap; } ; 199 struct dentry ; 200 struct iattr ; 201 struct super_block ; 202 struct file_system_type ; 203 struct kernfs_open_node ; 204 struct kernfs_iattrs ; 227 struct kernfs_root ; 227 struct kernfs_elem_dir { unsigned long subdirs; struct rb_root children; struct kernfs_root *root; } ; 85 struct kernfs_node ; 85 struct kernfs_elem_symlink { struct kernfs_node *target_kn; } ; 89 struct kernfs_ops ; 89 struct kernfs_elem_attr { const struct kernfs_ops *ops; struct kernfs_open_node *open; loff_t size; struct kernfs_node *notify_next; } ; 96 union __anonunion____missing_field_name_194 { struct kernfs_elem_dir dir; struct kernfs_elem_symlink symlink; struct kernfs_elem_attr attr; } ; 96 struct kernfs_node { atomic_t count; atomic_t active; struct lockdep_map dep_map; struct kernfs_node *parent; const char *name; struct rb_node rb; const void *ns; unsigned int hash; union __anonunion____missing_field_name_194 __annonCompField34; void *priv; unsigned short flags; umode_t mode; unsigned int ino; struct kernfs_iattrs *iattr; } ; 138 struct kernfs_syscall_ops { int (*remount_fs)(struct kernfs_root *, int *, char *); int (*show_options)(struct seq_file *, struct kernfs_root *); int (*mkdir)(struct kernfs_node *, const char *, umode_t ); int (*rmdir)(struct kernfs_node *); int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *); } ; 155 struct kernfs_root { struct kernfs_node *kn; unsigned int flags; struct ida ino_ida; struct kernfs_syscall_ops *syscall_ops; struct list_head supers; wait_queue_head_t deactivate_waitq; } ; 171 struct vm_operations_struct ; 171 struct kernfs_open_file { struct kernfs_node *kn; struct file *file; void *priv; struct mutex mutex; int event; struct list_head list; char *prealloc_buf; size_t atomic_write_len; bool mmapped; const struct vm_operations_struct *vm_ops; } ; 188 struct kernfs_ops { int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); ssize_t (*read)(struct kernfs_open_file *, char *, size_t , loff_t ); size_t atomic_write_len; bool prealloc; ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *); struct lock_class_key lockdep_key; } ; 284 struct inode ; 493 struct sock ; 494 struct kobject ; 495 enum kobj_ns_type { KOBJ_NS_TYPE_NONE = 0, KOBJ_NS_TYPE_NET = 1, KOBJ_NS_TYPES = 2 } ; 501 struct kobj_ns_type_operations { enum kobj_ns_type type; bool (*current_may_mount)(); void * (*grab_current_ns)(); const void * (*netlink_ns)(struct sock *); const void * (*initial_ns)(); void (*drop_ns)(void *); } ; 83 struct user_namespace ; 22 struct __anonstruct_kuid_t_197 { uid_t val; } ; 22 typedef struct __anonstruct_kuid_t_197 kuid_t; 27 struct __anonstruct_kgid_t_198 { gid_t val; } ; 27 typedef struct __anonstruct_kgid_t_198 kgid_t; 139 struct kstat { u64 ino; dev_t dev; umode_t mode; unsigned int nlink; kuid_t uid; kgid_t gid; dev_t rdev; loff_t size; struct timespec atime; struct timespec mtime; struct timespec ctime; unsigned long blksize; unsigned long long blocks; } ; 36 struct bin_attribute ; 37 struct attribute { const char *name; umode_t mode; bool ignore_lockdep; struct lock_class_key *key; struct lock_class_key skey; } ; 37 struct attribute_group { const char *name; umode_t (*is_visible)(struct kobject *, struct attribute *, int); umode_t (*is_bin_visible)(struct kobject *, struct bin_attribute *, int); struct attribute **attrs; struct bin_attribute **bin_attrs; } ; 92 struct bin_attribute { struct attribute attr; size_t size; void *private; ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); int (*mmap)(struct file *, struct kobject *, struct bin_attribute *, struct vm_area_struct *); } ; 165 struct sysfs_ops { ssize_t (*show)(struct kobject *, struct attribute *, char *); ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t ); } ; 530 struct kref { atomic_t refcount; } ; 52 struct kset ; 52 struct kobj_type ; 52 struct kobject { const char *name; struct list_head entry; struct kobject *parent; struct kset *kset; struct kobj_type *ktype; struct kernfs_node *sd; struct kref kref; struct delayed_work release; unsigned char state_initialized; unsigned char state_in_sysfs; unsigned char state_add_uevent_sent; unsigned char state_remove_uevent_sent; unsigned char uevent_suppress; } ; 115 struct kobj_type { void (*release)(struct kobject *); const struct sysfs_ops *sysfs_ops; struct attribute **default_attrs; const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject *); const void * (*namespace)(struct kobject *); } ; 123 struct kobj_uevent_env { char *argv[3U]; char *envp[32U]; int envp_idx; char buf[2048U]; int buflen; } ; 131 struct kset_uevent_ops { const int (*filter)(struct kset *, struct kobject *); const const char * (*name)(struct kset *, struct kobject *); const int (*uevent)(struct kset *, struct kobject *, struct kobj_uevent_env *); } ; 148 struct kset { struct list_head list; spinlock_t list_lock; struct kobject kobj; const struct kset_uevent_ops *uevent_ops; } ; 223 struct klist_node ; 37 struct klist_node { void *n_klist; struct list_head n_node; struct kref n_ref; } ; 68 struct path ; 69 struct seq_file { char *buf; size_t size; size_t from; size_t count; size_t pad_until; loff_t index; loff_t read_pos; u64 version; struct mutex lock; const struct seq_operations *op; int poll_event; struct user_namespace *user_ns; void *private; } ; 35 struct seq_operations { void * (*start)(struct seq_file *, loff_t *); void (*stop)(struct seq_file *, void *); void * (*next)(struct seq_file *, void *, loff_t *); int (*show)(struct seq_file *, void *); } ; 227 struct pinctrl ; 228 struct pinctrl_state ; 194 struct dev_pin_info { struct pinctrl *p; struct pinctrl_state *default_state; struct pinctrl_state *init_state; struct pinctrl_state *sleep_state; struct pinctrl_state *idle_state; } ; 48 struct dma_map_ops ; 48 struct dev_archdata { struct dma_map_ops *dma_ops; void *iommu; } ; 24 struct device_private ; 25 struct device_driver ; 26 struct driver_private ; 27 struct class ; 28 struct subsys_private ; 29 struct bus_type ; 30 struct device_node ; 31 struct fwnode_handle ; 32 struct iommu_ops ; 33 struct iommu_group ; 61 struct device_attribute ; 61 struct bus_type { const char *name; const char *dev_name; struct device *dev_root; struct device_attribute *dev_attrs; const struct attribute_group **bus_groups; const struct attribute_group **dev_groups; const struct attribute_group **drv_groups; int (*match)(struct device *, struct device_driver *); int (*uevent)(struct device *, struct kobj_uevent_env *); int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*online)(struct device *); int (*offline)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct dev_pm_ops *pm; const struct iommu_ops *iommu_ops; struct subsys_private *p; struct lock_class_key lock_key; } ; 139 struct device_type ; 198 enum probe_type { PROBE_DEFAULT_STRATEGY = 0, PROBE_PREFER_ASYNCHRONOUS = 1, PROBE_FORCE_SYNCHRONOUS = 2 } ; 204 struct of_device_id ; 204 struct acpi_device_id ; 204 struct device_driver { const char *name; struct bus_type *bus; struct module *owner; const char *mod_name; bool suppress_bind_attrs; enum probe_type probe_type; const struct of_device_id *of_match_table; const struct acpi_device_id *acpi_match_table; int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct attribute_group **groups; const struct dev_pm_ops *pm; struct driver_private *p; } ; 354 struct class_attribute ; 354 struct class { const char *name; struct module *owner; struct class_attribute *class_attrs; const struct attribute_group **dev_groups; struct kobject *dev_kobj; int (*dev_uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *); void (*class_release)(struct class *); void (*dev_release)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct kobj_ns_type_operations *ns_type; const void * (*namespace)(struct device *); const struct dev_pm_ops *pm; struct subsys_private *p; } ; 447 struct class_attribute { struct attribute attr; ssize_t (*show)(struct class *, struct class_attribute *, char *); ssize_t (*store)(struct class *, struct class_attribute *, const char *, size_t ); } ; 515 struct device_type { const char *name; const struct attribute_group **groups; int (*uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *, kuid_t *, kgid_t *); void (*release)(struct device *); const struct dev_pm_ops *pm; } ; 543 struct device_attribute { struct attribute attr; ssize_t (*show)(struct device *, struct device_attribute *, char *); ssize_t (*store)(struct device *, struct device_attribute *, const char *, size_t ); } ; 684 struct device_dma_parameters { unsigned int max_segment_size; unsigned long segment_boundary_mask; } ; 693 struct irq_domain ; 693 struct dma_coherent_mem ; 693 struct cma ; 693 struct device { struct device *parent; struct device_private *p; struct kobject kobj; const char *init_name; const struct device_type *type; struct mutex mutex; struct bus_type *bus; struct device_driver *driver; void *platform_data; void *driver_data; struct dev_pm_info power; struct dev_pm_domain *pm_domain; struct irq_domain *msi_domain; struct dev_pin_info *pins; struct list_head msi_list; int numa_node; u64 *dma_mask; u64 coherent_dma_mask; unsigned long dma_pfn_offset; struct device_dma_parameters *dma_parms; struct list_head dma_pools; struct dma_coherent_mem *dma_mem; struct cma *cma_area; struct dev_archdata archdata; struct device_node *of_node; struct fwnode_handle *fwnode; dev_t devt; u32 id; spinlock_t devres_lock; struct list_head devres_head; struct klist_node knode_class; struct class *class; const struct attribute_group **groups; void (*release)(struct device *); struct iommu_group *iommu_group; bool offline_disabled; bool offline; } ; 847 struct wakeup_source { const char *name; struct list_head entry; spinlock_t lock; struct wake_irq *wakeirq; struct timer_list timer; unsigned long timer_expires; ktime_t total_time; ktime_t max_time; ktime_t last_time; ktime_t start_prevent_time; ktime_t prevent_sleep_time; unsigned long event_count; unsigned long active_count; unsigned long relax_count; unsigned long expire_count; unsigned long wakeup_count; bool active; bool autosleep_enabled; } ; 1320 struct dma_attrs { unsigned long flags[1U]; } ; 69 struct scatterlist ; 58 struct arch_uprobe_task { unsigned long saved_scratch_register; unsigned int saved_trap_nr; unsigned int saved_tf; } ; 66 enum uprobe_task_state { UTASK_RUNNING = 0, UTASK_SSTEP = 1, UTASK_SSTEP_ACK = 2, UTASK_SSTEP_TRAPPED = 3 } ; 73 struct __anonstruct____missing_field_name_204 { struct arch_uprobe_task autask; unsigned long vaddr; } ; 73 struct __anonstruct____missing_field_name_205 { struct callback_head dup_xol_work; unsigned long dup_xol_addr; } ; 73 union __anonunion____missing_field_name_203 { struct __anonstruct____missing_field_name_204 __annonCompField37; struct __anonstruct____missing_field_name_205 __annonCompField38; } ; 73 struct uprobe ; 73 struct return_instance ; 73 struct uprobe_task { enum uprobe_task_state state; union __anonunion____missing_field_name_203 __annonCompField39; struct uprobe *active_uprobe; unsigned long xol_vaddr; struct return_instance *return_instances; unsigned int depth; } ; 94 struct return_instance { struct uprobe *uprobe; unsigned long func; unsigned long stack; unsigned long orig_ret_vaddr; bool chained; struct return_instance *next; } ; 110 struct xol_area ; 111 struct uprobes_state { struct xol_area *xol_area; } ; 150 struct address_space ; 151 struct mem_cgroup ; 152 union __anonunion____missing_field_name_206 { struct address_space *mapping; void *s_mem; atomic_t compound_mapcount; } ; 152 union __anonunion____missing_field_name_208 { unsigned long index; void *freelist; } ; 152 struct __anonstruct____missing_field_name_212 { unsigned short inuse; unsigned short objects; unsigned char frozen; } ; 152 union __anonunion____missing_field_name_211 { atomic_t _mapcount; struct __anonstruct____missing_field_name_212 __annonCompField42; int units; } ; 152 struct __anonstruct____missing_field_name_210 { union __anonunion____missing_field_name_211 __annonCompField43; atomic_t _count; } ; 152 union __anonunion____missing_field_name_209 { unsigned long counters; struct __anonstruct____missing_field_name_210 __annonCompField44; unsigned int active; } ; 152 struct __anonstruct____missing_field_name_207 { union __anonunion____missing_field_name_208 __annonCompField41; union __anonunion____missing_field_name_209 __annonCompField45; } ; 152 struct dev_pagemap ; 152 struct __anonstruct____missing_field_name_214 { struct page *next; int pages; int pobjects; } ; 152 struct __anonstruct____missing_field_name_215 { unsigned long compound_head; unsigned int compound_dtor; unsigned int compound_order; } ; 152 struct __anonstruct____missing_field_name_216 { unsigned long __pad; pgtable_t pmd_huge_pte; } ; 152 union __anonunion____missing_field_name_213 { struct list_head lru; struct dev_pagemap *pgmap; struct __anonstruct____missing_field_name_214 __annonCompField47; struct callback_head callback_head; struct __anonstruct____missing_field_name_215 __annonCompField48; struct __anonstruct____missing_field_name_216 __annonCompField49; } ; 152 struct kmem_cache ; 152 union __anonunion____missing_field_name_217 { unsigned long private; spinlock_t *ptl; struct kmem_cache *slab_cache; } ; 152 struct page { unsigned long flags; union __anonunion____missing_field_name_206 __annonCompField40; struct __anonstruct____missing_field_name_207 __annonCompField46; union __anonunion____missing_field_name_213 __annonCompField50; union __anonunion____missing_field_name_217 __annonCompField51; struct mem_cgroup *mem_cgroup; } ; 191 struct page_frag { struct page *page; __u32 offset; __u32 size; } ; 276 struct userfaultfd_ctx ; 276 struct vm_userfaultfd_ctx { struct userfaultfd_ctx *ctx; } ; 283 struct __anonstruct_shared_218 { struct rb_node rb; unsigned long rb_subtree_last; } ; 283 struct anon_vma ; 283 struct mempolicy ; 283 struct vm_area_struct { unsigned long vm_start; unsigned long vm_end; struct vm_area_struct *vm_next; struct vm_area_struct *vm_prev; struct rb_node vm_rb; unsigned long rb_subtree_gap; struct mm_struct *vm_mm; pgprot_t vm_page_prot; unsigned long vm_flags; struct __anonstruct_shared_218 shared; struct list_head anon_vma_chain; struct anon_vma *anon_vma; const struct vm_operations_struct *vm_ops; unsigned long vm_pgoff; struct file *vm_file; void *vm_private_data; struct mempolicy *vm_policy; struct vm_userfaultfd_ctx vm_userfaultfd_ctx; } ; 356 struct core_thread { struct task_struct *task; struct core_thread *next; } ; 361 struct core_state { atomic_t nr_threads; struct core_thread dumper; struct completion startup; } ; 375 struct task_rss_stat { int events; int count[4U]; } ; 383 struct mm_rss_stat { atomic_long_t count[4U]; } ; 388 struct kioctx_table ; 389 struct linux_binfmt ; 389 struct mmu_notifier_mm ; 389 struct mm_struct { struct vm_area_struct *mmap; struct rb_root mm_rb; u32 vmacache_seqnum; unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); unsigned long mmap_base; unsigned long mmap_legacy_base; unsigned long task_size; unsigned long highest_vm_end; pgd_t *pgd; atomic_t mm_users; atomic_t mm_count; atomic_long_t nr_ptes; atomic_long_t nr_pmds; int map_count; spinlock_t page_table_lock; struct rw_semaphore mmap_sem; struct list_head mmlist; unsigned long hiwater_rss; unsigned long hiwater_vm; unsigned long total_vm; unsigned long locked_vm; unsigned long pinned_vm; unsigned long data_vm; unsigned long exec_vm; unsigned long stack_vm; unsigned long def_flags; unsigned long start_code; unsigned long end_code; unsigned long start_data; unsigned long end_data; unsigned long start_brk; unsigned long brk; unsigned long start_stack; unsigned long arg_start; unsigned long arg_end; unsigned long env_start; unsigned long env_end; unsigned long saved_auxv[46U]; struct mm_rss_stat rss_stat; struct linux_binfmt *binfmt; cpumask_var_t cpu_vm_mask_var; mm_context_t context; unsigned long flags; struct core_state *core_state; spinlock_t ioctx_lock; struct kioctx_table *ioctx_table; struct task_struct *owner; struct file *exe_file; struct mmu_notifier_mm *mmu_notifier_mm; struct cpumask cpumask_allocation; unsigned long numa_next_scan; unsigned long numa_scan_offset; int numa_scan_seq; bool tlb_flush_pending; struct uprobes_state uprobes_state; void *bd_addr; atomic_long_t hugetlb_usage; } ; 591 struct percpu_ref ; 55 typedef void percpu_ref_func_t(struct percpu_ref *); 68 struct percpu_ref { atomic_long_t count; unsigned long percpu_count_ptr; percpu_ref_func_t *release; percpu_ref_func_t *confirm_switch; bool force_atomic; struct callback_head rcu; } ; 93 struct shrink_control { gfp_t gfp_mask; unsigned long nr_to_scan; int nid; struct mem_cgroup *memcg; } ; 27 struct shrinker { unsigned long int (*count_objects)(struct shrinker *, struct shrink_control *); unsigned long int (*scan_objects)(struct shrinker *, struct shrink_control *); int seeks; long batch; unsigned long flags; struct list_head list; atomic_long_t *nr_deferred; } ; 41 struct rlimit { __kernel_ulong_t rlim_cur; __kernel_ulong_t rlim_max; } ; 68 struct file_ra_state ; 69 struct user_struct ; 70 struct writeback_control ; 71 struct bdi_writeback ; 226 struct vm_fault { unsigned int flags; gfp_t gfp_mask; unsigned long pgoff; void *virtual_address; struct page *cow_page; struct page *page; unsigned long max_pgoff; pte_t *pte; } ; 262 struct vm_operations_struct { void (*open)(struct vm_area_struct *); void (*close)(struct vm_area_struct *); int (*mremap)(struct vm_area_struct *); int (*fault)(struct vm_area_struct *, struct vm_fault *); int (*pmd_fault)(struct vm_area_struct *, unsigned long, pmd_t *, unsigned int); void (*map_pages)(struct vm_area_struct *, struct vm_fault *); int (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *); int (*pfn_mkwrite)(struct vm_area_struct *, struct vm_fault *); int (*access)(struct vm_area_struct *, unsigned long, void *, int, int); const char * (*name)(struct vm_area_struct *); int (*set_policy)(struct vm_area_struct *, struct mempolicy *); struct mempolicy * (*get_policy)(struct vm_area_struct *, unsigned long); struct page * (*find_special_page)(struct vm_area_struct *, unsigned long); } ; 1285 struct kvec ; 2365 struct scatterlist { unsigned long sg_magic; unsigned long page_link; unsigned int offset; unsigned int length; dma_addr_t dma_address; unsigned int dma_length; } ; 21 struct sg_table { struct scatterlist *sgl; unsigned int nents; unsigned int orig_nents; } ; 158 struct dma_map_ops { void * (*alloc)(struct device *, size_t , dma_addr_t *, gfp_t , struct dma_attrs *); void (*free)(struct device *, size_t , void *, dma_addr_t , struct dma_attrs *); int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t , size_t , struct dma_attrs *); int (*get_sgtable)(struct device *, struct sg_table *, void *, dma_addr_t , size_t , struct dma_attrs *); dma_addr_t (*map_page)(struct device *, struct page *, unsigned long, size_t , enum dma_data_direction , struct dma_attrs *); void (*unmap_page)(struct device *, dma_addr_t , size_t , enum dma_data_direction , struct dma_attrs *); int (*map_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , struct dma_attrs *); void (*unmap_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , struct dma_attrs *); void (*sync_single_for_cpu)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_single_for_device)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_sg_for_cpu)(struct device *, struct scatterlist *, int, enum dma_data_direction ); void (*sync_sg_for_device)(struct device *, struct scatterlist *, int, enum dma_data_direction ); int (*mapping_error)(struct device *, dma_addr_t ); int (*dma_supported)(struct device *, u64 ); int (*set_dma_mask)(struct device *, u64 ); int is_phys; } ; 13 typedef unsigned long kernel_ulong_t; 186 struct acpi_device_id { __u8 id[9U]; kernel_ulong_t driver_data; __u32 cls; __u32 cls_msk; } ; 229 struct of_device_id { char name[32U]; char type[32U]; char compatible[128U]; const void *data; } ; 22 struct kernel_cap_struct { __u32 cap[2U]; } ; 25 typedef struct kernel_cap_struct kernel_cap_t; 84 struct plist_node { int prio; struct list_head prio_list; struct list_head node_list; } ; 4 typedef unsigned long cputime_t; 25 struct sem_undo_list ; 25 struct sysv_sem { struct sem_undo_list *undo_list; } ; 26 struct sysv_shm { struct list_head shm_clist; } ; 24 struct __anonstruct_sigset_t_234 { unsigned long sig[1U]; } ; 24 typedef struct __anonstruct_sigset_t_234 sigset_t; 25 struct siginfo ; 17 typedef void __signalfn_t(int); 18 typedef __signalfn_t *__sighandler_t; 20 typedef void __restorefn_t(); 21 typedef __restorefn_t *__sigrestore_t; 34 union sigval { int sival_int; void *sival_ptr; } ; 10 typedef union sigval sigval_t; 11 struct __anonstruct__kill_236 { __kernel_pid_t _pid; __kernel_uid32_t _uid; } ; 11 struct __anonstruct__timer_237 { __kernel_timer_t _tid; int _overrun; char _pad[0U]; sigval_t _sigval; int _sys_private; } ; 11 struct __anonstruct__rt_238 { __kernel_pid_t _pid; __kernel_uid32_t _uid; sigval_t _sigval; } ; 11 struct __anonstruct__sigchld_239 { __kernel_pid_t _pid; __kernel_uid32_t _uid; int _status; __kernel_clock_t _utime; __kernel_clock_t _stime; } ; 11 struct __anonstruct__addr_bnd_241 { void *_lower; void *_upper; } ; 11 struct __anonstruct__sigfault_240 { void *_addr; short _addr_lsb; struct __anonstruct__addr_bnd_241 _addr_bnd; } ; 11 struct __anonstruct__sigpoll_242 { long _band; int _fd; } ; 11 struct __anonstruct__sigsys_243 { void *_call_addr; int _syscall; unsigned int _arch; } ; 11 union __anonunion__sifields_235 { int _pad[28U]; struct __anonstruct__kill_236 _kill; struct __anonstruct__timer_237 _timer; struct __anonstruct__rt_238 _rt; struct __anonstruct__sigchld_239 _sigchld; struct __anonstruct__sigfault_240 _sigfault; struct __anonstruct__sigpoll_242 _sigpoll; struct __anonstruct__sigsys_243 _sigsys; } ; 11 struct siginfo { int si_signo; int si_errno; int si_code; union __anonunion__sifields_235 _sifields; } ; 113 typedef struct siginfo siginfo_t; 22 struct sigpending { struct list_head list; sigset_t signal; } ; 242 struct sigaction { __sighandler_t sa_handler; unsigned long sa_flags; __sigrestore_t sa_restorer; sigset_t sa_mask; } ; 256 struct k_sigaction { struct sigaction sa; } ; 442 enum pid_type { PIDTYPE_PID = 0, PIDTYPE_PGID = 1, PIDTYPE_SID = 2, PIDTYPE_MAX = 3 } ; 449 struct pid_namespace ; 449 struct upid { int nr; struct pid_namespace *ns; struct hlist_node pid_chain; } ; 56 struct pid { atomic_t count; unsigned int level; struct hlist_head tasks[3U]; struct callback_head rcu; struct upid numbers[1U]; } ; 68 struct pid_link { struct hlist_node node; struct pid *pid; } ; 53 struct seccomp_filter ; 54 struct seccomp { int mode; struct seccomp_filter *filter; } ; 40 struct rt_mutex_waiter ; 100 struct timerqueue_node { struct rb_node node; ktime_t expires; } ; 12 struct timerqueue_head { struct rb_root head; struct timerqueue_node *next; } ; 50 struct hrtimer_clock_base ; 51 struct hrtimer_cpu_base ; 60 enum hrtimer_restart { HRTIMER_NORESTART = 0, HRTIMER_RESTART = 1 } ; 65 struct hrtimer { struct timerqueue_node node; ktime_t _softexpires; enum hrtimer_restart (*function)(struct hrtimer *); struct hrtimer_clock_base *base; unsigned long state; int start_pid; void *start_site; char start_comm[16U]; } ; 123 struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base; int index; clockid_t clockid; struct timerqueue_head active; ktime_t (*get_time)(); ktime_t offset; } ; 156 struct hrtimer_cpu_base { raw_spinlock_t lock; seqcount_t seq; struct hrtimer *running; unsigned int cpu; unsigned int active_bases; unsigned int clock_was_set_seq; bool migration_enabled; bool nohz_active; unsigned char in_hrtirq; unsigned char hres_active; unsigned char hang_detected; ktime_t expires_next; struct hrtimer *next_timer; unsigned int nr_events; unsigned int nr_retries; unsigned int nr_hangs; unsigned int max_hang_time; struct hrtimer_clock_base clock_base[4U]; } ; 466 struct task_io_accounting { u64 rchar; u64 wchar; u64 syscr; u64 syscw; u64 read_bytes; u64 write_bytes; u64 cancelled_write_bytes; } ; 45 struct latency_record { unsigned long backtrace[12U]; unsigned int count; unsigned long time; unsigned long max; } ; 39 struct assoc_array_ptr ; 39 struct assoc_array { struct assoc_array_ptr *root; unsigned long nr_leaves_on_tree; } ; 31 typedef int32_t key_serial_t; 34 typedef uint32_t key_perm_t; 35 struct key ; 36 struct signal_struct ; 37 struct cred ; 38 struct key_type ; 42 struct keyring_index_key { struct key_type *type; const char *description; size_t desc_len; } ; 91 union key_payload { void *rcu_data0; void *data[4U]; } ; 128 union __anonunion____missing_field_name_262 { struct list_head graveyard_link; struct rb_node serial_node; } ; 128 struct key_user ; 128 union __anonunion____missing_field_name_263 { time_t expiry; time_t revoked_at; } ; 128 struct __anonstruct____missing_field_name_265 { struct key_type *type; char *description; } ; 128 union __anonunion____missing_field_name_264 { struct keyring_index_key index_key; struct __anonstruct____missing_field_name_265 __annonCompField54; } ; 128 struct __anonstruct____missing_field_name_267 { struct list_head name_link; struct assoc_array keys; } ; 128 union __anonunion____missing_field_name_266 { union key_payload payload; struct __anonstruct____missing_field_name_267 __annonCompField56; int reject_error; } ; 128 struct key { atomic_t usage; key_serial_t serial; union __anonunion____missing_field_name_262 __annonCompField52; struct rw_semaphore sem; struct key_user *user; void *security; union __anonunion____missing_field_name_263 __annonCompField53; time_t last_used_at; kuid_t uid; kgid_t gid; key_perm_t perm; unsigned short quotalen; unsigned short datalen; unsigned long flags; union __anonunion____missing_field_name_264 __annonCompField55; union __anonunion____missing_field_name_266 __annonCompField57; } ; 354 struct audit_context ; 27 struct group_info { atomic_t usage; int ngroups; int nblocks; kgid_t small_block[32U]; kgid_t *blocks[0U]; } ; 90 struct cred { atomic_t usage; atomic_t subscribers; void *put_addr; unsigned int magic; kuid_t uid; kgid_t gid; kuid_t suid; kgid_t sgid; kuid_t euid; kgid_t egid; kuid_t fsuid; kgid_t fsgid; unsigned int securebits; kernel_cap_t cap_inheritable; kernel_cap_t cap_permitted; kernel_cap_t cap_effective; kernel_cap_t cap_bset; kernel_cap_t cap_ambient; unsigned char jit_keyring; struct key *session_keyring; struct key *process_keyring; struct key *thread_keyring; struct key *request_key_auth; void *security; struct user_struct *user; struct user_namespace *user_ns; struct group_info *group_info; struct callback_head rcu; } ; 377 enum rcu_sync_type { RCU_SYNC = 0, RCU_SCHED_SYNC = 1, RCU_BH_SYNC = 2 } ; 383 struct rcu_sync { int gp_state; int gp_count; wait_queue_head_t gp_wait; int cb_state; struct callback_head cb_head; enum rcu_sync_type gp_type; } ; 65 struct percpu_rw_semaphore { struct rcu_sync rss; unsigned int *fast_read_ctr; struct rw_semaphore rw_sem; atomic_t slow_read_ctr; wait_queue_head_t write_waitq; } ; 54 struct cgroup ; 55 struct cgroup_root ; 56 struct cgroup_subsys ; 57 struct cgroup_taskset ; 100 struct cgroup_file { struct kernfs_node *kn; } ; 89 struct cgroup_subsys_state { struct cgroup *cgroup; struct cgroup_subsys *ss; struct percpu_ref refcnt; struct cgroup_subsys_state *parent; struct list_head sibling; struct list_head children; int id; unsigned int flags; u64 serial_nr; struct callback_head callback_head; struct work_struct destroy_work; } ; 134 struct css_set { atomic_t refcount; struct hlist_node hlist; struct list_head tasks; struct list_head mg_tasks; struct list_head cgrp_links; struct cgroup *dfl_cgrp; struct cgroup_subsys_state *subsys[13U]; struct list_head mg_preload_node; struct list_head mg_node; struct cgroup *mg_src_cgrp; struct css_set *mg_dst_cset; struct list_head e_cset_node[13U]; struct list_head task_iters; struct callback_head callback_head; } ; 210 struct cgroup { struct cgroup_subsys_state self; unsigned long flags; int id; int level; int populated_cnt; struct kernfs_node *kn; struct cgroup_file procs_file; struct cgroup_file events_file; unsigned int subtree_control; unsigned int child_subsys_mask; struct cgroup_subsys_state *subsys[13U]; struct cgroup_root *root; struct list_head cset_links; struct list_head e_csets[13U]; struct list_head pidlists; struct mutex pidlist_mutex; wait_queue_head_t offline_waitq; struct work_struct release_agent_work; int ancestor_ids[]; } ; 294 struct cgroup_root { struct kernfs_root *kf_root; unsigned int subsys_mask; int hierarchy_id; struct cgroup cgrp; int cgrp_ancestor_id_storage; atomic_t nr_cgrps; struct list_head root_list; unsigned int flags; struct idr cgroup_idr; char release_agent_path[4096U]; char name[64U]; } ; 333 struct cftype { char name[64U]; unsigned long private; size_t max_write_len; unsigned int flags; unsigned int file_offset; struct cgroup_subsys *ss; struct list_head node; struct kernfs_ops *kf_ops; u64 (*read_u64)(struct cgroup_subsys_state *, struct cftype *); s64 (*read_s64)(struct cgroup_subsys_state *, struct cftype *); int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); int (*write_u64)(struct cgroup_subsys_state *, struct cftype *, u64 ); int (*write_s64)(struct cgroup_subsys_state *, struct cftype *, s64 ); ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); struct lock_class_key lockdep_key; } ; 418 struct cgroup_subsys { struct cgroup_subsys_state * (*css_alloc)(struct cgroup_subsys_state *); int (*css_online)(struct cgroup_subsys_state *); void (*css_offline)(struct cgroup_subsys_state *); void (*css_released)(struct cgroup_subsys_state *); void (*css_free)(struct cgroup_subsys_state *); void (*css_reset)(struct cgroup_subsys_state *); void (*css_e_css_changed)(struct cgroup_subsys_state *); int (*can_attach)(struct cgroup_taskset *); void (*cancel_attach)(struct cgroup_taskset *); void (*attach)(struct cgroup_taskset *); int (*can_fork)(struct task_struct *); void (*cancel_fork)(struct task_struct *); void (*fork)(struct task_struct *); void (*exit)(struct task_struct *); void (*free)(struct task_struct *); void (*bind)(struct cgroup_subsys_state *); int early_init; bool broken_hierarchy; bool warned_broken_hierarchy; int id; const char *name; const char *legacy_name; struct cgroup_root *root; struct idr css_idr; struct list_head cfts; struct cftype *dfl_cftypes; struct cftype *legacy_cftypes; unsigned int depends_on; } ; 128 struct futex_pi_state ; 129 struct robust_list_head ; 130 struct bio_list ; 131 struct fs_struct ; 132 struct perf_event_context ; 133 struct blk_plug ; 135 struct nameidata ; 188 struct cfs_rq ; 189 struct task_group ; 482 struct sighand_struct { atomic_t count; struct k_sigaction action[64U]; spinlock_t siglock; wait_queue_head_t signalfd_wqh; } ; 523 struct pacct_struct { int ac_flag; long ac_exitcode; unsigned long ac_mem; cputime_t ac_utime; cputime_t ac_stime; unsigned long ac_minflt; unsigned long ac_majflt; } ; 531 struct cpu_itimer { cputime_t expires; cputime_t incr; u32 error; u32 incr_error; } ; 538 struct prev_cputime { cputime_t utime; cputime_t stime; raw_spinlock_t lock; } ; 563 struct task_cputime { cputime_t utime; cputime_t stime; unsigned long long sum_exec_runtime; } ; 579 struct task_cputime_atomic { atomic64_t utime; atomic64_t stime; atomic64_t sum_exec_runtime; } ; 601 struct thread_group_cputimer { struct task_cputime_atomic cputime_atomic; bool running; bool checking_timer; } ; 646 struct autogroup ; 647 struct tty_struct ; 647 struct taskstats ; 647 struct tty_audit_buf ; 647 struct signal_struct { atomic_t sigcnt; atomic_t live; int nr_threads; struct list_head thread_head; wait_queue_head_t wait_chldexit; struct task_struct *curr_target; struct sigpending shared_pending; int group_exit_code; int notify_count; struct task_struct *group_exit_task; int group_stop_count; unsigned int flags; unsigned char is_child_subreaper; unsigned char has_child_subreaper; int posix_timer_id; struct list_head posix_timers; struct hrtimer real_timer; struct pid *leader_pid; ktime_t it_real_incr; struct cpu_itimer it[2U]; struct thread_group_cputimer cputimer; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; struct pid *tty_old_pgrp; int leader; struct tty_struct *tty; struct autogroup *autogroup; seqlock_t stats_lock; cputime_t utime; cputime_t stime; cputime_t cutime; cputime_t cstime; cputime_t gtime; cputime_t cgtime; struct prev_cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; unsigned long cnvcsw; unsigned long cnivcsw; unsigned long min_flt; unsigned long maj_flt; unsigned long cmin_flt; unsigned long cmaj_flt; unsigned long inblock; unsigned long oublock; unsigned long cinblock; unsigned long coublock; unsigned long maxrss; unsigned long cmaxrss; struct task_io_accounting ioac; unsigned long long sum_sched_runtime; struct rlimit rlim[16U]; struct pacct_struct pacct; struct taskstats *stats; unsigned int audit_tty; unsigned int audit_tty_log_passwd; struct tty_audit_buf *tty_audit_buf; oom_flags_t oom_flags; short oom_score_adj; short oom_score_adj_min; struct mutex cred_guard_mutex; } ; 814 struct user_struct { atomic_t __count; atomic_t processes; atomic_t sigpending; atomic_t inotify_watches; atomic_t inotify_devs; atomic_t fanotify_listeners; atomic_long_t epoll_watches; unsigned long mq_bytes; unsigned long locked_shm; unsigned long unix_inflight; atomic_long_t pipe_bufs; struct key *uid_keyring; struct key *session_keyring; struct hlist_node uidhash_node; kuid_t uid; atomic_long_t locked_vm; } ; 859 struct backing_dev_info ; 860 struct reclaim_state ; 861 struct sched_info { unsigned long pcount; unsigned long long run_delay; unsigned long long last_arrival; unsigned long long last_queued; } ; 875 struct task_delay_info { spinlock_t lock; unsigned int flags; u64 blkio_start; u64 blkio_delay; u64 swapin_delay; u32 blkio_count; u32 swapin_count; u64 freepages_start; u64 freepages_delay; u32 freepages_count; } ; 923 struct wake_q_node { struct wake_q_node *next; } ; 1150 struct io_context ; 1184 struct pipe_inode_info ; 1186 struct load_weight { unsigned long weight; u32 inv_weight; } ; 1193 struct sched_avg { u64 last_update_time; u64 load_sum; u32 util_sum; u32 period_contrib; unsigned long load_avg; unsigned long util_avg; } ; 1213 struct sched_statistics { u64 wait_start; u64 wait_max; u64 wait_count; u64 wait_sum; u64 iowait_count; u64 iowait_sum; u64 sleep_start; u64 sleep_max; s64 sum_sleep_runtime; u64 block_start; u64 block_max; u64 exec_max; u64 slice_max; u64 nr_migrations_cold; u64 nr_failed_migrations_affine; u64 nr_failed_migrations_running; u64 nr_failed_migrations_hot; u64 nr_forced_migrations; u64 nr_wakeups; u64 nr_wakeups_sync; u64 nr_wakeups_migrate; u64 nr_wakeups_local; u64 nr_wakeups_remote; u64 nr_wakeups_affine; u64 nr_wakeups_affine_attempts; u64 nr_wakeups_passive; u64 nr_wakeups_idle; } ; 1248 struct sched_entity { struct load_weight load; struct rb_node run_node; struct list_head group_node; unsigned int on_rq; u64 exec_start; u64 sum_exec_runtime; u64 vruntime; u64 prev_sum_exec_runtime; u64 nr_migrations; struct sched_statistics statistics; int depth; struct sched_entity *parent; struct cfs_rq *cfs_rq; struct cfs_rq *my_q; struct sched_avg avg; } ; 1285 struct rt_rq ; 1285 struct sched_rt_entity { struct list_head run_list; unsigned long timeout; unsigned long watchdog_stamp; unsigned int time_slice; struct sched_rt_entity *back; struct sched_rt_entity *parent; struct rt_rq *rt_rq; struct rt_rq *my_q; } ; 1301 struct sched_dl_entity { struct rb_node rb_node; u64 dl_runtime; u64 dl_deadline; u64 dl_period; u64 dl_bw; s64 runtime; u64 deadline; unsigned int flags; int dl_throttled; int dl_new; int dl_boosted; int dl_yielded; struct hrtimer dl_timer; } ; 1369 struct tlbflush_unmap_batch { struct cpumask cpumask; bool flush_required; bool writable; } ; 1388 struct sched_class ; 1388 struct files_struct ; 1388 struct compat_robust_list_head ; 1388 struct numa_group ; 1388 struct task_struct { volatile long state; void *stack; atomic_t usage; unsigned int flags; unsigned int ptrace; struct llist_node wake_entry; int on_cpu; unsigned int wakee_flips; unsigned long wakee_flip_decay_ts; struct task_struct *last_wakee; int wake_cpu; int on_rq; int prio; int static_prio; int normal_prio; unsigned int rt_priority; const struct sched_class *sched_class; struct sched_entity se; struct sched_rt_entity rt; struct task_group *sched_task_group; struct sched_dl_entity dl; struct hlist_head preempt_notifiers; unsigned int policy; int nr_cpus_allowed; cpumask_t cpus_allowed; unsigned long rcu_tasks_nvcsw; bool rcu_tasks_holdout; struct list_head rcu_tasks_holdout_list; int rcu_tasks_idle_cpu; struct sched_info sched_info; struct list_head tasks; struct plist_node pushable_tasks; struct rb_node pushable_dl_tasks; struct mm_struct *mm; struct mm_struct *active_mm; u32 vmacache_seqnum; struct vm_area_struct *vmacache[4U]; struct task_rss_stat rss_stat; int exit_state; int exit_code; int exit_signal; int pdeath_signal; unsigned long jobctl; unsigned int personality; unsigned char sched_reset_on_fork; unsigned char sched_contributes_to_load; unsigned char sched_migrated; unsigned char; unsigned char in_execve; unsigned char in_iowait; unsigned char memcg_may_oom; unsigned char memcg_kmem_skip_account; unsigned char brk_randomized; unsigned long atomic_flags; struct restart_block restart_block; pid_t pid; pid_t tgid; struct task_struct *real_parent; struct task_struct *parent; struct list_head children; struct list_head sibling; struct task_struct *group_leader; struct list_head ptraced; struct list_head ptrace_entry; struct pid_link pids[3U]; struct list_head thread_group; struct list_head thread_node; struct completion *vfork_done; int *set_child_tid; int *clear_child_tid; cputime_t utime; cputime_t stime; cputime_t utimescaled; cputime_t stimescaled; cputime_t gtime; struct prev_cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; u64 start_time; u64 real_start_time; unsigned long min_flt; unsigned long maj_flt; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; const struct cred *real_cred; const struct cred *cred; char comm[16U]; struct nameidata *nameidata; struct sysv_sem sysvsem; struct sysv_shm sysvshm; unsigned long last_switch_count; struct fs_struct *fs; struct files_struct *files; struct nsproxy *nsproxy; struct signal_struct *signal; struct sighand_struct *sighand; sigset_t blocked; sigset_t real_blocked; sigset_t saved_sigmask; struct sigpending pending; unsigned long sas_ss_sp; size_t sas_ss_size; struct callback_head *task_works; struct audit_context *audit_context; kuid_t loginuid; unsigned int sessionid; struct seccomp seccomp; u32 parent_exec_id; u32 self_exec_id; spinlock_t alloc_lock; raw_spinlock_t pi_lock; struct wake_q_node wake_q; struct rb_root pi_waiters; struct rb_node *pi_waiters_leftmost; struct rt_mutex_waiter *pi_blocked_on; struct mutex_waiter *blocked_on; unsigned int irq_events; unsigned long hardirq_enable_ip; unsigned long hardirq_disable_ip; unsigned int hardirq_enable_event; unsigned int hardirq_disable_event; int hardirqs_enabled; int hardirq_context; unsigned long softirq_disable_ip; unsigned long softirq_enable_ip; unsigned int softirq_disable_event; unsigned int softirq_enable_event; int softirqs_enabled; int softirq_context; u64 curr_chain_key; int lockdep_depth; unsigned int lockdep_recursion; struct held_lock held_locks[48U]; gfp_t lockdep_reclaim_gfp; unsigned int in_ubsan; void *journal_info; struct bio_list *bio_list; struct blk_plug *plug; struct reclaim_state *reclaim_state; struct backing_dev_info *backing_dev_info; struct io_context *io_context; unsigned long ptrace_message; siginfo_t *last_siginfo; struct task_io_accounting ioac; u64 acct_rss_mem1; u64 acct_vm_mem1; cputime_t acct_timexpd; nodemask_t mems_allowed; seqcount_t mems_allowed_seq; int cpuset_mem_spread_rotor; int cpuset_slab_spread_rotor; struct css_set *cgroups; struct list_head cg_list; struct robust_list_head *robust_list; struct compat_robust_list_head *compat_robust_list; struct list_head pi_state_list; struct futex_pi_state *pi_state_cache; struct perf_event_context *perf_event_ctxp[2U]; struct mutex perf_event_mutex; struct list_head perf_event_list; struct mempolicy *mempolicy; short il_next; short pref_node_fork; int numa_scan_seq; unsigned int numa_scan_period; unsigned int numa_scan_period_max; int numa_preferred_nid; unsigned long numa_migrate_retry; u64 node_stamp; u64 last_task_numa_placement; u64 last_sum_exec_runtime; struct callback_head numa_work; struct list_head numa_entry; struct numa_group *numa_group; unsigned long *numa_faults; unsigned long total_numa_faults; unsigned long numa_faults_locality[3U]; unsigned long numa_pages_migrated; struct tlbflush_unmap_batch tlb_ubc; struct callback_head rcu; struct pipe_inode_info *splice_pipe; struct page_frag task_frag; struct task_delay_info *delays; int make_it_fail; int nr_dirtied; int nr_dirtied_pause; unsigned long dirty_paused_when; int latency_record_count; struct latency_record latency_record[32U]; unsigned long timer_slack_ns; unsigned long default_timer_slack_ns; unsigned int kasan_depth; unsigned long trace; unsigned long trace_recursion; struct mem_cgroup *memcg_in_oom; gfp_t memcg_oom_gfp_mask; int memcg_oom_order; unsigned int memcg_nr_pages_over_high; struct uprobe_task *utask; unsigned int sequential_io; unsigned int sequential_io_avg; unsigned long task_state_change; int pagefault_disabled; struct thread_struct thread; } ; 19 struct dma_pool ; 1992 struct umc_dev { u16 version; u8 cap_id; u8 bar; struct resource resource; unsigned int irq; struct device dev; } ; 253 struct usb_device_descriptor { __u8 bLength; __u8 bDescriptorType; __le16 bcdUSB; __u8 bDeviceClass; __u8 bDeviceSubClass; __u8 bDeviceProtocol; __u8 bMaxPacketSize0; __le16 idVendor; __le16 idProduct; __le16 bcdDevice; __u8 iManufacturer; __u8 iProduct; __u8 iSerialNumber; __u8 bNumConfigurations; } ; 275 struct usb_config_descriptor { __u8 bLength; __u8 bDescriptorType; __le16 wTotalLength; __u8 bNumInterfaces; __u8 bConfigurationValue; __u8 iConfiguration; __u8 bmAttributes; __u8 bMaxPower; } ; 343 struct usb_interface_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bInterfaceNumber; __u8 bAlternateSetting; __u8 bNumEndpoints; __u8 bInterfaceClass; __u8 bInterfaceSubClass; __u8 bInterfaceProtocol; __u8 iInterface; } ; 363 struct usb_endpoint_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bEndpointAddress; __u8 bmAttributes; __le16 wMaxPacketSize; __u8 bInterval; __u8 bRefresh; __u8 bSynchAddress; } ; 613 struct usb_ss_ep_comp_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bMaxBurst; __u8 bmAttributes; __le16 wBytesPerInterval; } ; 704 struct usb_interface_assoc_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bFirstInterface; __u8 bInterfaceCount; __u8 bFunctionClass; __u8 bFunctionSubClass; __u8 bFunctionProtocol; __u8 iFunction; } ; 733 struct usb_key_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 tTKID[3U]; __u8 bReserved; __u8 bKeyData[0U]; } ; 747 struct usb_encryption_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bEncryptionType; __u8 bEncryptionValue; __u8 bAuthKeyIndex; } ; 763 struct usb_bos_descriptor { __u8 bLength; __u8 bDescriptorType; __le16 wTotalLength; __u8 bNumDeviceCaps; } ; 785 struct usb_wireless_cap_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDevCapabilityType; __u8 bmAttributes; __le16 wPHYRates; __u8 bmTFITXPowerInfo; __u8 bmFFITXPowerInfo; __le16 bmBandGroup; __u8 bReserved; } ; 813 struct usb_ext_cap_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDevCapabilityType; __le32 bmAttributes; } ; 823 struct usb_ss_cap_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDevCapabilityType; __u8 bmAttributes; __le16 wSpeedSupported; __u8 bFunctionalitySupport; __u8 bU1devExitLat; __le16 bU2DevExitLat; } ; 852 struct usb_ss_container_id_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDevCapabilityType; __u8 bReserved; __u8 ContainerID[16U]; } ; 867 struct usb_ssp_cap_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDevCapabilityType; __u8 bReserved; __le32 bmAttributes; __u16 wFunctionalitySupport; __le16 wReserved; __le32 bmSublinkSpeedAttr[1U]; } ; 946 enum usb_device_speed { USB_SPEED_UNKNOWN = 0, USB_SPEED_LOW = 1, USB_SPEED_FULL = 2, USB_SPEED_HIGH = 3, USB_SPEED_WIRELESS = 4, USB_SPEED_SUPER = 5 } ; 955 enum usb_device_state { USB_STATE_NOTATTACHED = 0, USB_STATE_ATTACHED = 1, USB_STATE_POWERED = 2, USB_STATE_RECONNECTING = 3, USB_STATE_UNAUTHENTICATED = 4, USB_STATE_DEFAULT = 5, USB_STATE_ADDRESS = 6, USB_STATE_CONFIGURED = 7, USB_STATE_SUSPENDED = 8 } ; 967 enum usb3_link_state { USB3_LPM_U0 = 0, USB3_LPM_U1 = 1, USB3_LPM_U2 = 2, USB3_LPM_U3 = 3 } ; 54 enum irqreturn { IRQ_NONE = 0, IRQ_HANDLED = 1, IRQ_WAKE_THREAD = 2 } ; 16 typedef enum irqreturn irqreturn_t; 468 struct tasklet_struct { struct tasklet_struct *next; unsigned long state; atomic_t count; void (*func)(unsigned long); unsigned long data; } ; 81 struct hlist_bl_node ; 81 struct hlist_bl_head { struct hlist_bl_node *first; } ; 36 struct hlist_bl_node { struct hlist_bl_node *next; struct hlist_bl_node **pprev; } ; 114 struct __anonstruct____missing_field_name_309 { spinlock_t lock; int count; } ; 114 union __anonunion____missing_field_name_308 { struct __anonstruct____missing_field_name_309 __annonCompField72; } ; 114 struct lockref { union __anonunion____missing_field_name_308 __annonCompField73; } ; 50 struct vfsmount ; 51 struct __anonstruct____missing_field_name_311 { u32 hash; u32 len; } ; 51 union __anonunion____missing_field_name_310 { struct __anonstruct____missing_field_name_311 __annonCompField74; u64 hash_len; } ; 51 struct qstr { union __anonunion____missing_field_name_310 __annonCompField75; const unsigned char *name; } ; 90 struct dentry_operations ; 90 union __anonunion_d_u_312 { struct hlist_node d_alias; struct callback_head d_rcu; } ; 90 struct dentry { unsigned int d_flags; seqcount_t d_seq; struct hlist_bl_node d_hash; struct dentry *d_parent; struct qstr d_name; struct inode *d_inode; unsigned char d_iname[32U]; struct lockref d_lockref; const struct dentry_operations *d_op; struct super_block *d_sb; unsigned long d_time; void *d_fsdata; struct list_head d_lru; struct list_head d_child; struct list_head d_subdirs; union __anonunion_d_u_312 d_u; } ; 142 struct dentry_operations { int (*d_revalidate)(struct dentry *, unsigned int); int (*d_weak_revalidate)(struct dentry *, unsigned int); int (*d_hash)(const struct dentry *, struct qstr *); int (*d_compare)(const struct dentry *, const struct dentry *, unsigned int, const char *, const struct qstr *); int (*d_delete)(const struct dentry *); void (*d_release)(struct dentry *); void (*d_prune)(struct dentry *); void (*d_iput)(struct dentry *, struct inode *); char * (*d_dname)(struct dentry *, char *, int); struct vfsmount * (*d_automount)(struct path *); int (*d_manage)(struct dentry *, bool ); struct inode * (*d_select_inode)(struct dentry *, unsigned int); } ; 586 struct path { struct vfsmount *mnt; struct dentry *dentry; } ; 27 struct list_lru_one { struct list_head list; long nr_items; } ; 32 struct list_lru_memcg { struct list_lru_one *lru[0U]; } ; 37 struct list_lru_node { spinlock_t lock; struct list_lru_one lru; struct list_lru_memcg *memcg_lrus; } ; 47 struct list_lru { struct list_lru_node *node; struct list_head list; } ; 67 struct __anonstruct____missing_field_name_316 { struct radix_tree_node *parent; void *private_data; } ; 67 union __anonunion____missing_field_name_315 { struct __anonstruct____missing_field_name_316 __annonCompField76; struct callback_head callback_head; } ; 67 struct radix_tree_node { unsigned int path; unsigned int count; union __anonunion____missing_field_name_315 __annonCompField77; struct list_head private_list; void *slots[64U]; unsigned long tags[3U][1U]; } ; 114 struct radix_tree_root { unsigned int height; gfp_t gfp_mask; struct radix_tree_node *rnode; } ; 45 struct fiemap_extent { __u64 fe_logical; __u64 fe_physical; __u64 fe_length; __u64 fe_reserved64[2U]; __u32 fe_flags; __u32 fe_reserved[3U]; } ; 38 enum migrate_mode { MIGRATE_ASYNC = 0, MIGRATE_SYNC_LIGHT = 1, MIGRATE_SYNC = 2 } ; 47 struct block_device ; 19 struct bio_vec { struct page *bv_page; unsigned int bv_len; unsigned int bv_offset; } ; 268 struct delayed_call { void (*fn)(void *); void *arg; } ; 162 struct export_operations ; 164 struct iovec ; 165 struct kiocb ; 166 struct poll_table_struct ; 167 struct kstatfs ; 168 struct swap_info_struct ; 169 struct iov_iter ; 76 struct iattr { unsigned int ia_valid; umode_t ia_mode; kuid_t ia_uid; kgid_t ia_gid; loff_t ia_size; struct timespec ia_atime; struct timespec ia_mtime; struct timespec ia_ctime; struct file *ia_file; } ; 212 struct dquot ; 19 typedef __kernel_uid32_t projid_t; 23 struct __anonstruct_kprojid_t_322 { projid_t val; } ; 23 typedef struct __anonstruct_kprojid_t_322 kprojid_t; 166 enum quota_type { USRQUOTA = 0, GRPQUOTA = 1, PRJQUOTA = 2 } ; 66 typedef long long qsize_t; 67 union __anonunion____missing_field_name_323 { kuid_t uid; kgid_t gid; kprojid_t projid; } ; 67 struct kqid { union __anonunion____missing_field_name_323 __annonCompField79; enum quota_type type; } ; 184 struct mem_dqblk { qsize_t dqb_bhardlimit; qsize_t dqb_bsoftlimit; qsize_t dqb_curspace; qsize_t dqb_rsvspace; qsize_t dqb_ihardlimit; qsize_t dqb_isoftlimit; qsize_t dqb_curinodes; time_t dqb_btime; time_t dqb_itime; } ; 206 struct quota_format_type ; 207 struct mem_dqinfo { struct quota_format_type *dqi_format; int dqi_fmt_id; struct list_head dqi_dirty_list; unsigned long dqi_flags; unsigned int dqi_bgrace; unsigned int dqi_igrace; qsize_t dqi_max_spc_limit; qsize_t dqi_max_ino_limit; void *dqi_priv; } ; 272 struct dquot { struct hlist_node dq_hash; struct list_head dq_inuse; struct list_head dq_free; struct list_head dq_dirty; struct mutex dq_lock; atomic_t dq_count; wait_queue_head_t dq_wait_unused; struct super_block *dq_sb; struct kqid dq_id; loff_t dq_off; unsigned long dq_flags; struct mem_dqblk dq_dqb; } ; 299 struct quota_format_ops { int (*check_quota_file)(struct super_block *, int); int (*read_file_info)(struct super_block *, int); int (*write_file_info)(struct super_block *, int); int (*free_file_info)(struct super_block *, int); int (*read_dqblk)(struct dquot *); int (*commit_dqblk)(struct dquot *); int (*release_dqblk)(struct dquot *); } ; 310 struct dquot_operations { int (*write_dquot)(struct dquot *); struct dquot * (*alloc_dquot)(struct super_block *, int); void (*destroy_dquot)(struct dquot *); int (*acquire_dquot)(struct dquot *); int (*release_dquot)(struct dquot *); int (*mark_dirty)(struct dquot *); int (*write_info)(struct super_block *, int); qsize_t * (*get_reserved_space)(struct inode *); int (*get_projid)(struct inode *, kprojid_t *); } ; 325 struct qc_dqblk { int d_fieldmask; u64 d_spc_hardlimit; u64 d_spc_softlimit; u64 d_ino_hardlimit; u64 d_ino_softlimit; u64 d_space; u64 d_ino_count; s64 d_ino_timer; s64 d_spc_timer; int d_ino_warns; int d_spc_warns; u64 d_rt_spc_hardlimit; u64 d_rt_spc_softlimit; u64 d_rt_space; s64 d_rt_spc_timer; int d_rt_spc_warns; } ; 348 struct qc_type_state { unsigned int flags; unsigned int spc_timelimit; unsigned int ino_timelimit; unsigned int rt_spc_timelimit; unsigned int spc_warnlimit; unsigned int ino_warnlimit; unsigned int rt_spc_warnlimit; unsigned long long ino; blkcnt_t blocks; blkcnt_t nextents; } ; 394 struct qc_state { unsigned int s_incoredqs; struct qc_type_state s_state[3U]; } ; 405 struct qc_info { int i_fieldmask; unsigned int i_flags; unsigned int i_spc_timelimit; unsigned int i_ino_timelimit; unsigned int i_rt_spc_timelimit; unsigned int i_spc_warnlimit; unsigned int i_ino_warnlimit; unsigned int i_rt_spc_warnlimit; } ; 418 struct quotactl_ops { int (*quota_on)(struct super_block *, int, int, struct path *); int (*quota_off)(struct super_block *, int); int (*quota_enable)(struct super_block *, unsigned int); int (*quota_disable)(struct super_block *, unsigned int); int (*quota_sync)(struct super_block *, int); int (*set_info)(struct super_block *, int, struct qc_info *); int (*get_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *); int (*set_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *); int (*get_state)(struct super_block *, struct qc_state *); int (*rm_xquota)(struct super_block *, unsigned int); } ; 432 struct quota_format_type { int qf_fmt_id; const struct quota_format_ops *qf_ops; struct module *qf_owner; struct quota_format_type *qf_next; } ; 496 struct quota_info { unsigned int flags; struct mutex dqio_mutex; struct mutex dqonoff_mutex; struct inode *files[3U]; struct mem_dqinfo info[3U]; const struct quota_format_ops *ops[3U]; } ; 526 struct kiocb { struct file *ki_filp; loff_t ki_pos; void (*ki_complete)(struct kiocb *, long, long); void *private; int ki_flags; } ; 367 struct address_space_operations { int (*writepage)(struct page *, struct writeback_control *); int (*readpage)(struct file *, struct page *); int (*writepages)(struct address_space *, struct writeback_control *); int (*set_page_dirty)(struct page *); int (*readpages)(struct file *, struct address_space *, struct list_head *, unsigned int); int (*write_begin)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page **, void **); int (*write_end)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page *, void *); sector_t (*bmap)(struct address_space *, sector_t ); void (*invalidatepage)(struct page *, unsigned int, unsigned int); int (*releasepage)(struct page *, gfp_t ); void (*freepage)(struct page *); ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *, loff_t ); int (*migratepage)(struct address_space *, struct page *, struct page *, enum migrate_mode ); int (*launder_page)(struct page *); int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long); void (*is_dirty_writeback)(struct page *, bool *, bool *); int (*error_remove_page)(struct address_space *, struct page *); int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *); void (*swap_deactivate)(struct file *); } ; 424 struct address_space { struct inode *host; struct radix_tree_root page_tree; spinlock_t tree_lock; atomic_t i_mmap_writable; struct rb_root i_mmap; struct rw_semaphore i_mmap_rwsem; unsigned long nrpages; unsigned long nrexceptional; unsigned long writeback_index; const struct address_space_operations *a_ops; unsigned long flags; spinlock_t private_lock; struct list_head private_list; void *private_data; } ; 445 struct request_queue ; 446 struct hd_struct ; 446 struct gendisk ; 446 struct block_device { dev_t bd_dev; int bd_openers; struct inode *bd_inode; struct super_block *bd_super; struct mutex bd_mutex; struct list_head bd_inodes; void *bd_claiming; void *bd_holder; int bd_holders; bool bd_write_holder; struct list_head bd_holder_disks; struct block_device *bd_contains; unsigned int bd_block_size; struct hd_struct *bd_part; unsigned int bd_part_count; int bd_invalidated; struct gendisk *bd_disk; struct request_queue *bd_queue; struct list_head bd_list; unsigned long bd_private; int bd_fsfreeze_count; struct mutex bd_fsfreeze_mutex; int bd_map_count; } ; 565 struct posix_acl ; 566 struct inode_operations ; 566 union __anonunion____missing_field_name_328 { const unsigned int i_nlink; unsigned int __i_nlink; } ; 566 union __anonunion____missing_field_name_329 { struct hlist_head i_dentry; struct callback_head i_rcu; } ; 566 struct file_lock_context ; 566 struct cdev ; 566 union __anonunion____missing_field_name_330 { struct pipe_inode_info *i_pipe; struct block_device *i_bdev; struct cdev *i_cdev; char *i_link; } ; 566 struct inode { umode_t i_mode; unsigned short i_opflags; kuid_t i_uid; kgid_t i_gid; unsigned int i_flags; struct posix_acl *i_acl; struct posix_acl *i_default_acl; const struct inode_operations *i_op; struct super_block *i_sb; struct address_space *i_mapping; void *i_security; unsigned long i_ino; union __anonunion____missing_field_name_328 __annonCompField80; dev_t i_rdev; loff_t i_size; struct timespec i_atime; struct timespec i_mtime; struct timespec i_ctime; spinlock_t i_lock; unsigned short i_bytes; unsigned int i_blkbits; blkcnt_t i_blocks; unsigned long i_state; struct mutex i_mutex; unsigned long dirtied_when; unsigned long dirtied_time_when; struct hlist_node i_hash; struct list_head i_io_list; struct bdi_writeback *i_wb; int i_wb_frn_winner; u16 i_wb_frn_avg_time; u16 i_wb_frn_history; struct list_head i_lru; struct list_head i_sb_list; union __anonunion____missing_field_name_329 __annonCompField81; u64 i_version; atomic_t i_count; atomic_t i_dio_count; atomic_t i_writecount; atomic_t i_readcount; const struct file_operations *i_fop; struct file_lock_context *i_flctx; struct address_space i_data; struct list_head i_devices; union __anonunion____missing_field_name_330 __annonCompField82; __u32 i_generation; __u32 i_fsnotify_mask; struct hlist_head i_fsnotify_marks; void *i_private; } ; 837 struct fown_struct { rwlock_t lock; struct pid *pid; enum pid_type pid_type; kuid_t uid; kuid_t euid; int signum; } ; 845 struct file_ra_state { unsigned long start; unsigned int size; unsigned int async_size; unsigned int ra_pages; unsigned int mmap_miss; loff_t prev_pos; } ; 868 union __anonunion_f_u_331 { struct llist_node fu_llist; struct callback_head fu_rcuhead; } ; 868 struct file { union __anonunion_f_u_331 f_u; struct path f_path; struct inode *f_inode; const struct file_operations *f_op; spinlock_t f_lock; atomic_long_t f_count; unsigned int f_flags; fmode_t f_mode; struct mutex f_pos_lock; loff_t f_pos; struct fown_struct f_owner; const struct cred *f_cred; struct file_ra_state f_ra; u64 f_version; void *f_security; void *private_data; struct list_head f_ep_links; struct list_head f_tfile_llink; struct address_space *f_mapping; } ; 953 typedef void *fl_owner_t; 954 struct file_lock ; 955 struct file_lock_operations { void (*fl_copy_lock)(struct file_lock *, struct file_lock *); void (*fl_release_private)(struct file_lock *); } ; 961 struct lock_manager_operations { int (*lm_compare_owner)(struct file_lock *, struct file_lock *); unsigned long int (*lm_owner_key)(struct file_lock *); fl_owner_t (*lm_get_owner)(fl_owner_t ); void (*lm_put_owner)(fl_owner_t ); void (*lm_notify)(struct file_lock *); int (*lm_grant)(struct file_lock *, int); bool (*lm_break)(struct file_lock *); int (*lm_change)(struct file_lock *, int, struct list_head *); void (*lm_setup)(struct file_lock *, void **); } ; 988 struct nlm_lockowner ; 989 struct nfs_lock_info { u32 state; struct nlm_lockowner *owner; struct list_head list; } ; 14 struct nfs4_lock_state ; 15 struct nfs4_lock_info { struct nfs4_lock_state *owner; } ; 19 struct fasync_struct ; 19 struct __anonstruct_afs_333 { struct list_head link; int state; } ; 19 union __anonunion_fl_u_332 { struct nfs_lock_info nfs_fl; struct nfs4_lock_info nfs4_fl; struct __anonstruct_afs_333 afs; } ; 19 struct file_lock { struct file_lock *fl_next; struct list_head fl_list; struct hlist_node fl_link; struct list_head fl_block; fl_owner_t fl_owner; unsigned int fl_flags; unsigned char fl_type; unsigned int fl_pid; int fl_link_cpu; struct pid *fl_nspid; wait_queue_head_t fl_wait; struct file *fl_file; loff_t fl_start; loff_t fl_end; struct fasync_struct *fl_fasync; unsigned long fl_break_time; unsigned long fl_downgrade_time; const struct file_lock_operations *fl_ops; const struct lock_manager_operations *fl_lmops; union __anonunion_fl_u_332 fl_u; } ; 1041 struct file_lock_context { spinlock_t flc_lock; struct list_head flc_flock; struct list_head flc_posix; struct list_head flc_lease; } ; 1244 struct fasync_struct { spinlock_t fa_lock; int magic; int fa_fd; struct fasync_struct *fa_next; struct file *fa_file; struct callback_head fa_rcu; } ; 1279 struct sb_writers { int frozen; wait_queue_head_t wait_unfrozen; struct percpu_rw_semaphore rw_sem[3U]; } ; 1305 struct super_operations ; 1305 struct xattr_handler ; 1305 struct mtd_info ; 1305 struct super_block { struct list_head s_list; dev_t s_dev; unsigned char s_blocksize_bits; unsigned long s_blocksize; loff_t s_maxbytes; struct file_system_type *s_type; const struct super_operations *s_op; const struct dquot_operations *dq_op; const struct quotactl_ops *s_qcop; const struct export_operations *s_export_op; unsigned long s_flags; unsigned long s_iflags; unsigned long s_magic; struct dentry *s_root; struct rw_semaphore s_umount; int s_count; atomic_t s_active; void *s_security; const struct xattr_handler **s_xattr; struct hlist_bl_head s_anon; struct list_head s_mounts; struct block_device *s_bdev; struct backing_dev_info *s_bdi; struct mtd_info *s_mtd; struct hlist_node s_instances; unsigned int s_quota_types; struct quota_info s_dquot; struct sb_writers s_writers; char s_id[32U]; u8 s_uuid[16U]; void *s_fs_info; unsigned int s_max_links; fmode_t s_mode; u32 s_time_gran; struct mutex s_vfs_rename_mutex; char *s_subtype; char *s_options; const struct dentry_operations *s_d_op; int cleancache_poolid; struct shrinker s_shrink; atomic_long_t s_remove_count; int s_readonly_remount; struct workqueue_struct *s_dio_done_wq; struct hlist_head s_pins; struct list_lru s_dentry_lru; struct list_lru s_inode_lru; struct callback_head rcu; struct work_struct destroy_work; struct mutex s_sync_lock; int s_stack_depth; spinlock_t s_inode_list_lock; struct list_head s_inodes; } ; 1554 struct fiemap_extent_info { unsigned int fi_flags; unsigned int fi_extents_mapped; unsigned int fi_extents_max; struct fiemap_extent *fi_extents_start; } ; 1568 struct dir_context ; 1593 struct dir_context { int (*actor)(struct dir_context *, const char *, int, loff_t , u64 , unsigned int); loff_t pos; } ; 1600 struct file_operations { struct module *owner; loff_t (*llseek)(struct file *, loff_t , int); ssize_t (*read)(struct file *, char *, size_t , loff_t *); ssize_t (*write)(struct file *, const char *, size_t , loff_t *); ssize_t (*read_iter)(struct kiocb *, struct iov_iter *); ssize_t (*write_iter)(struct kiocb *, struct iov_iter *); int (*iterate)(struct file *, struct dir_context *); unsigned int (*poll)(struct file *, struct poll_table_struct *); long int (*unlocked_ioctl)(struct file *, unsigned int, unsigned long); long int (*compat_ioctl)(struct file *, unsigned int, unsigned long); int (*mmap)(struct file *, struct vm_area_struct *); int (*open)(struct inode *, struct file *); int (*flush)(struct file *, fl_owner_t ); int (*release)(struct inode *, struct file *); int (*fsync)(struct file *, loff_t , loff_t , int); int (*aio_fsync)(struct kiocb *, int); int (*fasync)(int, struct file *, int); int (*lock)(struct file *, int, struct file_lock *); ssize_t (*sendpage)(struct file *, struct page *, int, size_t , loff_t *, int); unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); int (*check_flags)(int); int (*flock)(struct file *, int, struct file_lock *); ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t , unsigned int); ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t , unsigned int); int (*setlease)(struct file *, long, struct file_lock **, void **); long int (*fallocate)(struct file *, int, loff_t , loff_t ); void (*show_fdinfo)(struct seq_file *, struct file *); ssize_t (*copy_file_range)(struct file *, loff_t , struct file *, loff_t , size_t , unsigned int); int (*clone_file_range)(struct file *, loff_t , struct file *, loff_t , u64 ); ssize_t (*dedupe_file_range)(struct file *, u64 , u64 , struct file *, u64 ); } ; 1668 struct inode_operations { struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int); const char * (*get_link)(struct dentry *, struct inode *, struct delayed_call *); int (*permission)(struct inode *, int); struct posix_acl * (*get_acl)(struct inode *, int); int (*readlink)(struct dentry *, char *, int); int (*create)(struct inode *, struct dentry *, umode_t , bool ); int (*link)(struct dentry *, struct inode *, struct dentry *); int (*unlink)(struct inode *, struct dentry *); int (*symlink)(struct inode *, struct dentry *, const char *); int (*mkdir)(struct inode *, struct dentry *, umode_t ); int (*rmdir)(struct inode *, struct dentry *); int (*mknod)(struct inode *, struct dentry *, umode_t , dev_t ); int (*rename)(struct inode *, struct dentry *, struct inode *, struct dentry *); int (*rename2)(struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); int (*setattr)(struct dentry *, struct iattr *); int (*getattr)(struct vfsmount *, struct dentry *, struct kstat *); int (*setxattr)(struct dentry *, const char *, const void *, size_t , int); ssize_t (*getxattr)(struct dentry *, const char *, void *, size_t ); ssize_t (*listxattr)(struct dentry *, char *, size_t ); int (*removexattr)(struct dentry *, const char *); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 , u64 ); int (*update_time)(struct inode *, struct timespec *, int); int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t , int *); int (*tmpfile)(struct inode *, struct dentry *, umode_t ); int (*set_acl)(struct inode *, struct posix_acl *, int); } ; 1723 struct super_operations { struct inode * (*alloc_inode)(struct super_block *); void (*destroy_inode)(struct inode *); void (*dirty_inode)(struct inode *, int); int (*write_inode)(struct inode *, struct writeback_control *); int (*drop_inode)(struct inode *); void (*evict_inode)(struct inode *); void (*put_super)(struct super_block *); int (*sync_fs)(struct super_block *, int); int (*freeze_super)(struct super_block *); int (*freeze_fs)(struct super_block *); int (*thaw_super)(struct super_block *); int (*unfreeze_fs)(struct super_block *); int (*statfs)(struct dentry *, struct kstatfs *); int (*remount_fs)(struct super_block *, int *, char *); void (*umount_begin)(struct super_block *); int (*show_options)(struct seq_file *, struct dentry *); int (*show_devname)(struct seq_file *, struct dentry *); int (*show_path)(struct seq_file *, struct dentry *); int (*show_stats)(struct seq_file *, struct dentry *); ssize_t (*quota_read)(struct super_block *, int, char *, size_t , loff_t ); ssize_t (*quota_write)(struct super_block *, int, const char *, size_t , loff_t ); struct dquot ** (*get_dquots)(struct inode *); int (*bdev_try_to_free_page)(struct super_block *, struct page *, gfp_t ); long int (*nr_cached_objects)(struct super_block *, struct shrink_control *); long int (*free_cached_objects)(struct super_block *, struct shrink_control *); } ; 1962 struct file_system_type { const char *name; int fs_flags; struct dentry * (*mount)(struct file_system_type *, int, const char *, void *); void (*kill_sb)(struct super_block *); struct module *owner; struct file_system_type *next; struct hlist_head fs_supers; struct lock_class_key s_lock_key; struct lock_class_key s_umount_key; struct lock_class_key s_vfs_rename_key; struct lock_class_key s_writers_key[3U]; struct lock_class_key i_lock_key; struct lock_class_key i_mutex_key; struct lock_class_key i_mutex_dir_key; } ; 281 struct usb_device ; 283 struct wusb_dev ; 284 struct ep_device ; 285 struct usb_host_endpoint { struct usb_endpoint_descriptor desc; struct usb_ss_ep_comp_descriptor ss_ep_comp; struct list_head urb_list; void *hcpriv; struct ep_device *ep_dev; unsigned char *extra; int extralen; int enabled; int streams; } ; 77 struct usb_host_interface { struct usb_interface_descriptor desc; int extralen; unsigned char *extra; struct usb_host_endpoint *endpoint; char *string; } ; 92 enum usb_interface_condition { USB_INTERFACE_UNBOUND = 0, USB_INTERFACE_BINDING = 1, USB_INTERFACE_BOUND = 2, USB_INTERFACE_UNBINDING = 3 } ; 99 struct usb_interface { struct usb_host_interface *altsetting; struct usb_host_interface *cur_altsetting; unsigned int num_altsetting; struct usb_interface_assoc_descriptor *intf_assoc; int minor; enum usb_interface_condition condition; unsigned char sysfs_files_created; unsigned char ep_devs_created; unsigned char unregistering; unsigned char needs_remote_wakeup; unsigned char needs_altsetting0; unsigned char needs_binding; unsigned char resetting_device; unsigned char authorized; struct device dev; struct device *usb_dev; atomic_t pm_usage_cnt; struct work_struct reset_ws; } ; 204 struct usb_interface_cache { unsigned int num_altsetting; struct kref ref; struct usb_host_interface altsetting[0U]; } ; 259 struct usb_host_config { struct usb_config_descriptor desc; char *string; struct usb_interface_assoc_descriptor *intf_assoc[16U]; struct usb_interface *interface[32U]; struct usb_interface_cache *intf_cache[32U]; unsigned char *extra; int extralen; } ; 323 struct usb_host_bos { struct usb_bos_descriptor *desc; struct usb_ext_cap_descriptor *ext_cap; struct usb_ss_cap_descriptor *ss_cap; struct usb_ssp_cap_descriptor *ssp_cap; struct usb_ss_container_id_descriptor *ss_id; } ; 336 struct usb_devmap { unsigned long devicemap[2U]; } ; 348 struct mon_bus ; 348 struct usb_bus { struct device *controller; int busnum; const char *bus_name; u8 uses_dma; u8 uses_pio_for_control; u8 otg_port; unsigned char is_b_host; unsigned char b_hnp_enable; unsigned char no_stop_on_short; unsigned char no_sg_constraint; unsigned int sg_tablesize; int devnum_next; struct usb_devmap devmap; struct usb_device *root_hub; struct usb_bus *hs_companion; struct list_head bus_list; struct mutex usb_address0_mutex; int bandwidth_allocated; int bandwidth_int_reqs; int bandwidth_isoc_reqs; unsigned int resuming_ports; struct mon_bus *mon_bus; int monitored; } ; 399 struct usb_tt ; 400 enum usb_device_removable { USB_DEVICE_REMOVABLE_UNKNOWN = 0, USB_DEVICE_REMOVABLE = 1, USB_DEVICE_FIXED = 2 } ; 413 struct usb2_lpm_parameters { unsigned int besl; int timeout; } ; 434 struct usb3_lpm_parameters { unsigned int mel; unsigned int pel; unsigned int sel; int timeout; } ; 473 struct usb_device { int devnum; char devpath[16U]; u32 route; enum usb_device_state state; enum usb_device_speed speed; struct usb_tt *tt; int ttport; unsigned int toggle[2U]; struct usb_device *parent; struct usb_bus *bus; struct usb_host_endpoint ep0; struct device dev; struct usb_device_descriptor descriptor; struct usb_host_bos *bos; struct usb_host_config *config; struct usb_host_config *actconfig; struct usb_host_endpoint *ep_in[16U]; struct usb_host_endpoint *ep_out[16U]; char **rawdescriptors; unsigned short bus_mA; u8 portnum; u8 level; unsigned char can_submit; unsigned char persist_enabled; unsigned char have_langid; unsigned char authorized; unsigned char authenticated; unsigned char wusb; unsigned char lpm_capable; unsigned char usb2_hw_lpm_capable; unsigned char usb2_hw_lpm_besl_capable; unsigned char usb2_hw_lpm_enabled; unsigned char usb2_hw_lpm_allowed; unsigned char usb3_lpm_u1_enabled; unsigned char usb3_lpm_u2_enabled; int string_langid; char *product; char *manufacturer; char *serial; struct list_head filelist; int maxchild; u32 quirks; atomic_t urbnum; unsigned long active_duration; unsigned long connect_time; unsigned char do_remote_wakeup; unsigned char reset_resume; unsigned char port_is_suspended; struct wusb_dev *wusb_dev; int slot_id; enum usb_device_removable removable; struct usb2_lpm_parameters l1_params; struct usb3_lpm_parameters u1_params; struct usb3_lpm_parameters u2_params; unsigned int lpm_disable_count; } ; 1206 struct usb_iso_packet_descriptor { unsigned int offset; unsigned int length; unsigned int actual_length; int status; } ; 1248 struct urb ; 1249 struct usb_anchor { struct list_head urb_list; wait_queue_head_t wait; spinlock_t lock; atomic_t suspend_wakeups; unsigned char poisoned; } ; 1268 struct urb { struct kref kref; void *hcpriv; atomic_t use_count; atomic_t reject; int unlinked; struct list_head urb_list; struct list_head anchor_list; struct usb_anchor *anchor; struct usb_device *dev; struct usb_host_endpoint *ep; unsigned int pipe; unsigned int stream_id; int status; unsigned int transfer_flags; void *transfer_buffer; dma_addr_t transfer_dma; struct scatterlist *sg; int num_mapped_sgs; int num_sgs; u32 transfer_buffer_length; u32 actual_length; unsigned char *setup_packet; dma_addr_t setup_dma; int start_frame; int number_of_packets; int interval; int error_count; void *context; void (*complete)(struct urb *); struct usb_iso_packet_descriptor iso_frame_desc[0U]; } ; 1900 struct giveback_urb_bh { bool running; spinlock_t lock; struct list_head head; struct tasklet_struct bh; struct usb_host_endpoint *completing_ep; } ; 72 struct hc_driver ; 72 struct usb_phy ; 72 struct phy ; 72 struct usb_hcd { struct usb_bus self; struct kref kref; const char *product_desc; int speed; char irq_descr[24U]; struct timer_list rh_timer; struct urb *status_urb; struct work_struct wakeup_work; const struct hc_driver *driver; struct usb_phy *usb_phy; struct phy *phy; unsigned long flags; unsigned char rh_registered; unsigned char rh_pollable; unsigned char msix_enabled; unsigned char remove_phy; unsigned char uses_new_polling; unsigned char wireless; unsigned char has_tt; unsigned char amd_resume_bug; unsigned char can_do_streams; unsigned char tpl_support; unsigned char cant_recv_wakeups; unsigned int irq; void *regs; resource_size_t rsrc_start; resource_size_t rsrc_len; unsigned int power_budget; struct giveback_urb_bh high_prio_bh; struct giveback_urb_bh low_prio_bh; struct mutex *bandwidth_mutex; struct usb_hcd *shared_hcd; struct usb_hcd *primary_hcd; struct dma_pool *pool[4U]; int state; unsigned long hcd_priv[0U]; } ; 233 struct hc_driver { const char *description; const char *product_desc; size_t hcd_priv_size; irqreturn_t (*irq)(struct usb_hcd *); int flags; int (*reset)(struct usb_hcd *); int (*start)(struct usb_hcd *); int (*pci_suspend)(struct usb_hcd *, bool ); int (*pci_resume)(struct usb_hcd *, bool ); void (*stop)(struct usb_hcd *); void (*shutdown)(struct usb_hcd *); int (*get_frame_number)(struct usb_hcd *); int (*urb_enqueue)(struct usb_hcd *, struct urb *, gfp_t ); int (*urb_dequeue)(struct usb_hcd *, struct urb *, int); int (*map_urb_for_dma)(struct usb_hcd *, struct urb *, gfp_t ); void (*unmap_urb_for_dma)(struct usb_hcd *, struct urb *); void (*endpoint_disable)(struct usb_hcd *, struct usb_host_endpoint *); void (*endpoint_reset)(struct usb_hcd *, struct usb_host_endpoint *); int (*hub_status_data)(struct usb_hcd *, char *); int (*hub_control)(struct usb_hcd *, u16 , u16 , u16 , char *, u16 ); int (*bus_suspend)(struct usb_hcd *); int (*bus_resume)(struct usb_hcd *); int (*start_port_reset)(struct usb_hcd *, unsigned int); void (*relinquish_port)(struct usb_hcd *, int); int (*port_handed_over)(struct usb_hcd *, int); void (*clear_tt_buffer_complete)(struct usb_hcd *, struct usb_host_endpoint *); int (*alloc_dev)(struct usb_hcd *, struct usb_device *); void (*free_dev)(struct usb_hcd *, struct usb_device *); int (*alloc_streams)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint **, unsigned int, unsigned int, gfp_t ); int (*free_streams)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint **, unsigned int, gfp_t ); int (*add_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *); int (*drop_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *); int (*check_bandwidth)(struct usb_hcd *, struct usb_device *); void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *); int (*address_device)(struct usb_hcd *, struct usb_device *); int (*enable_device)(struct usb_hcd *, struct usb_device *); int (*update_hub_device)(struct usb_hcd *, struct usb_device *, struct usb_tt *, gfp_t ); int (*reset_device)(struct usb_hcd *, struct usb_device *); int (*update_device)(struct usb_hcd *, struct usb_device *); int (*set_usb2_hw_lpm)(struct usb_hcd *, struct usb_device *, int); int (*enable_usb3_lpm_timeout)(struct usb_hcd *, struct usb_device *, enum usb3_link_state ); int (*disable_usb3_lpm_timeout)(struct usb_hcd *, struct usb_device *, enum usb3_link_state ); int (*find_raw_port_number)(struct usb_hcd *, int); int (*port_power)(struct usb_hcd *, int, bool ); } ; 266 struct usb_tt { struct usb_device *hub; int multi; unsigned int think_time; void *hcpriv; spinlock_t lock; struct list_head clear_list; struct work_struct clear_work; } ; 716 struct iovec { void *iov_base; __kernel_size_t iov_len; } ; 21 struct kvec { void *iov_base; size_t iov_len; } ; 27 union __anonunion____missing_field_name_337 { const struct iovec *iov; const struct kvec *kvec; const struct bio_vec *bvec; } ; 27 struct iov_iter { int type; size_t iov_offset; size_t count; union __anonunion____missing_field_name_337 __annonCompField83; unsigned long nr_segs; } ; 186 struct pipe_buf_operations ; 186 struct pipe_buffer { struct page *page; unsigned int offset; unsigned int len; const struct pipe_buf_operations *ops; unsigned int flags; unsigned long private; } ; 27 struct pipe_inode_info { struct mutex mutex; wait_queue_head_t wait; unsigned int nrbufs; unsigned int curbuf; unsigned int buffers; unsigned int readers; unsigned int writers; unsigned int files; unsigned int waiting_writers; unsigned int r_counter; unsigned int w_counter; struct page *tmp_page; struct fasync_struct *fasync_readers; struct fasync_struct *fasync_writers; struct pipe_buffer *bufs; struct user_struct *user; } ; 63 struct pipe_buf_operations { int can_merge; int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *); void (*release)(struct pipe_inode_info *, struct pipe_buffer *); int (*steal)(struct pipe_inode_info *, struct pipe_buffer *); void (*get)(struct pipe_inode_info *, struct pipe_buffer *); } ; 66 struct uwb_mac_addr { u8 data[6U]; } ; 136 struct uwb_dev_addr { u8 data[2U]; } ; 192 enum uwb_drp_type { UWB_DRP_TYPE_ALIEN_BP = 0, UWB_DRP_TYPE_HARD = 1, UWB_DRP_TYPE_SOFT = 2, UWB_DRP_TYPE_PRIVATE = 3, UWB_DRP_TYPE_PCA = 4 } ; 219 struct uwb_drp_alloc { __le16 zone_bm; __le16 mas_bm; } ; 289 struct uwb_ie_hdr { u8 element_id; u8 length; } ; 300 struct uwb_ie_drp { struct uwb_ie_hdr hdr; __le16 drp_control; struct uwb_dev_addr dev_addr; struct uwb_drp_alloc allocs[]; } ; 394 struct uwb_ie_drp_avail { struct uwb_ie_hdr hdr; unsigned long bmp[4U]; } ; 468 struct uwb_rccb { u8 bCommandType; __le16 wCommand; u8 bCommandContext; } ; 482 struct uwb_rceb { u8 bEventType; __le16 wEvent; u8 bEventContext; } ; 612 struct uwb_rc_cmd_set_ie { struct uwb_rccb rccb; __le16 wIELength; u8 IEData[]; } ; 780 struct uwb_dev ; 781 struct uwb_beca_e ; 782 struct uwb_rc ; 783 struct uwb_rsv ; 784 struct uwb_dbg ; 785 struct uwb_dev { struct mutex mutex; struct list_head list_node; struct device dev; struct uwb_rc *rc; struct uwb_beca_e *bce; struct uwb_mac_addr mac_addr; struct uwb_dev_addr dev_addr; int beacon_slot; unsigned long streams[1U]; unsigned long last_availability_bm[4U]; } ; 77 struct uwb_notifs_chain { struct list_head list; struct mutex mutex; } ; 91 struct uwb_beca { struct list_head list; size_t entries; struct mutex mutex; } ; 98 struct uwbd { int pid; struct task_struct *task; wait_queue_head_t wq; struct list_head event_list; spinlock_t event_list_lock; } ; 107 struct uwb_mas_bm { unsigned long bm[4U]; unsigned long unsafe_bm[4U]; int safe; int unsafe; } ; 118 enum uwb_rsv_state { UWB_RSV_STATE_NONE = 0, UWB_RSV_STATE_O_INITIATED = 1, UWB_RSV_STATE_O_PENDING = 2, UWB_RSV_STATE_O_MODIFIED = 3, UWB_RSV_STATE_O_ESTABLISHED = 4, UWB_RSV_STATE_O_TO_BE_MOVED = 5, UWB_RSV_STATE_O_MOVE_EXPANDING = 6, UWB_RSV_STATE_O_MOVE_COMBINING = 7, UWB_RSV_STATE_O_MOVE_REDUCING = 8, UWB_RSV_STATE_T_ACCEPTED = 9, UWB_RSV_STATE_T_DENIED = 10, UWB_RSV_STATE_T_CONFLICT = 11, UWB_RSV_STATE_T_PENDING = 12, UWB_RSV_STATE_T_EXPANDING_ACCEPTED = 13, UWB_RSV_STATE_T_EXPANDING_CONFLICT = 14, UWB_RSV_STATE_T_EXPANDING_PENDING = 15, UWB_RSV_STATE_T_EXPANDING_DENIED = 16, UWB_RSV_STATE_T_RESIZED = 17, UWB_RSV_STATE_LAST = 18 } ; 140 enum uwb_rsv_target_type { UWB_RSV_TARGET_DEV = 0, UWB_RSV_TARGET_DEVADDR = 1 } ; 145 union __anonunion____missing_field_name_361 { struct uwb_dev *dev; struct uwb_dev_addr devaddr; } ; 145 struct uwb_rsv_target { enum uwb_rsv_target_type type; union __anonunion____missing_field_name_361 __annonCompField99; } ; 183 struct uwb_rsv_move { struct uwb_mas_bm final_mas; struct uwb_ie_drp *companion_drp_ie; struct uwb_mas_bm companion_mas; } ; 196 struct uwb_rsv { struct uwb_rc *rc; struct list_head rc_node; struct list_head pal_node; struct kref kref; struct uwb_dev *owner; struct uwb_rsv_target target; enum uwb_drp_type type; int max_mas; int min_mas; int max_interval; bool is_multicast; void (*callback)(struct uwb_rsv *); void *pal_priv; enum uwb_rsv_state state; bool needs_release_companion_mas; u8 stream; u8 tiebreaker; struct uwb_mas_bm mas; struct uwb_ie_drp *drp_ie; struct uwb_rsv_move mv; bool ie_valid; struct timer_list timer; struct work_struct handle_timeout_work; } ; 275 struct uwb_drp_avail { unsigned long global[4U]; unsigned long local[4U]; unsigned long pending[4U]; struct uwb_ie_drp_avail ie; bool ie_valid; } ; 306 struct uwb_drp_backoff_win { u8 window; u8 n; int total_expired; struct timer_list timer; bool can_reserve_extra_mases; } ; 330 struct uwb_rc { struct uwb_dev uwb_dev; int index; u16 version; struct module *owner; void *priv; int (*start)(struct uwb_rc *); void (*stop)(struct uwb_rc *); int (*cmd)(struct uwb_rc *, const struct uwb_rccb *, size_t ); int (*reset)(struct uwb_rc *); int (*filter_cmd)(struct uwb_rc *, struct uwb_rccb **, size_t *); int (*filter_event)(struct uwb_rc *, struct uwb_rceb **, const size_t , size_t *, size_t *); spinlock_t neh_lock; struct list_head neh_list; unsigned long ctx_bm[4U]; u8 ctx_roll; int beaconing; int beaconing_forced; int scanning; unsigned char scan_type; unsigned char ready; struct uwb_notifs_chain notifs_chain; struct uwb_beca uwb_beca; struct uwbd uwbd; struct uwb_drp_backoff_win bow; struct uwb_drp_avail drp_avail; struct list_head reservations; struct list_head cnflt_alien_list; struct uwb_mas_bm cnflt_alien_bitmap; struct mutex rsvs_mutex; spinlock_t rsvs_lock; struct workqueue_struct *rsv_workq; struct delayed_work rsv_update_work; struct delayed_work rsv_alien_bp_work; int set_drp_ie_pending; struct mutex ies_mutex; struct uwb_rc_cmd_set_ie *ies; size_t ies_capacity; struct list_head pals; int active_pals; struct uwb_dbg *dbg; } ; 422 struct uwb_pal { struct list_head node; const char *name; struct device *device; struct uwb_rc *rc; void (*channel_changed)(struct uwb_pal *, int); void (*new_rsv)(struct uwb_pal *, struct uwb_rsv *); int channel; struct dentry *debugfs_dir; } ; 830 struct wuie_hdr { u8 bLength; u8 bIEIdentifier; } ; 61 struct wusb_ckhdid { u8 data[16U]; } ; 84 struct wuie_host_info { struct wuie_hdr hdr; __le16 attributes; struct wusb_ckhdid CHID; } ; 98 struct __anonstruct_blk_362 { struct wusb_ckhdid CDID; u8 bDeviceAddress; u8 bReserved; } ; 98 struct wuie_connect_ack { struct wuie_hdr hdr; struct __anonstruct_blk_362 blk[4U]; } ; 136 struct wuie_keep_alive { struct wuie_hdr hdr; u8 bDeviceAddress[4U]; } ; 375 struct wusbhc ; 375 struct wusb_dev { struct kref refcnt; struct wusbhc *wusbhc; struct list_head cack_node; struct list_head rekey_node; u8 port_idx; u8 addr; unsigned char beacon_type; struct usb_encryption_descriptor ccm1_etd; struct wusb_ckhdid cdid; unsigned long entry_ts; struct usb_bos_descriptor *bos; struct usb_wireless_cap_descriptor *wusb_cap_descr; struct uwb_mas_bm availability; struct work_struct devconnect_acked_work; struct usb_device *usb_dev; } ; 134 struct wusb_port { u16 status; u16 change; struct wusb_dev *wusb_dev; u32 ptk_tkid; } ; 158 struct wusb_dev_info ; 158 struct __anonstruct_gtk_363 { struct usb_key_descriptor descr; u8 data[16U]; } ; 158 struct wusbhc { struct usb_hcd usb_hcd; struct device *dev; struct uwb_rc *uwb_rc; struct uwb_pal pal; unsigned int trust_timeout; struct wusb_ckhdid chid; uint8_t phy_rate; uint8_t dnts_num_slots; uint8_t dnts_interval; uint8_t retry_count; struct wuie_host_info *wuie_host_info; struct mutex mutex; u16 cluster_id; struct wusb_port *port; struct wusb_dev_info *dev_info; u8 ports_max; unsigned char active; struct wuie_keep_alive keep_alive_ie; struct delayed_work keep_alive_timer; struct list_head cack_list; size_t cack_count; struct wuie_connect_ack cack_ie; struct uwb_rsv *rsv; struct mutex mmcie_mutex; struct wuie_hdr **mmcie; u8 mmcies_max; int (*start)(struct wusbhc *); void (*stop)(struct wusbhc *, int); int (*mmcie_add)(struct wusbhc *, u8 , u8 , u8 , struct wuie_hdr *); int (*mmcie_rm)(struct wusbhc *, u8 ); int (*dev_info_set)(struct wusbhc *, struct wusb_dev *); int (*bwa_set)(struct wusbhc *, s8 , const struct uwb_mas_bm *); int (*set_ptk)(struct wusbhc *, u8 , u32 , const void *, size_t ); int (*set_gtk)(struct wusbhc *, u32 , const void *, size_t ); int (*set_num_dnts)(struct wusbhc *, u8 , u8 ); struct __anonstruct_gtk_363 gtk; u8 gtk_index; u32 gtk_tkid; struct workqueue_struct *wq_security; struct work_struct gtk_rekey_work; struct usb_encryption_descriptor *ccm1_etd; } ; 114 struct whc_qtd { __le32 status; __le32 options; __le64 page_list_ptr; __u8 setup[8U]; } ; 55 struct whc_itd { __le16 presentation_time; __u8 num_segments; __u8 status; __le32 options; __le64 page_list_ptr; __le64 seg_list_ptr; } ; 87 struct whc_page_list_entry { __le64 buf_ptr; } ; 127 union __anonunion_overlay_364 { struct whc_qtd qtd; struct whc_itd itd; } ; 127 struct whc_qhead { __le64 link; __le32 info1; __le32 info2; __le32 info3; __le16 status; __le16 err_count; __le32 cur_window; __le32 scratch[3U]; union __anonunion_overlay_364 overlay; } ; 198 union __anonunion____missing_field_name_365 { struct whc_qtd qtd[8U]; struct whc_itd itd[8U]; } ; 198 struct whc ; 198 struct whc_qset { struct whc_qhead qh; union __anonunion____missing_field_name_365 __annonCompField100; dma_addr_t qset_dma; struct whc *whc; struct usb_host_endpoint *ep; struct list_head stds; int ntds; int td_start; int td_end; struct list_head list_node; unsigned char in_sw_list; unsigned char in_hw_list; unsigned char remove; unsigned char reset; struct urb *pause_after_urb; struct completion remove_complete; uint16_t max_packet; uint8_t max_burst; uint8_t max_seq; } ; 275 struct di_buf_entry { __le32 availability_info[8U]; __le32 addr_sec_info; __le32 reserved[7U]; } ; 286 struct dn_buf_entry { __u8 msg_size; __u8 reserved1; __u8 src_addr; __u8 status; __le32 tkid; __u8 dn_data[56U]; } ; 307 struct whc_dbg ; 308 struct whc { struct wusbhc wusbhc; struct umc_dev *umc; resource_size_t base_phys; void *base; int irq; u8 n_devices; u8 n_keys; u8 n_mmc_ies; u64 *pz_list; struct dn_buf_entry *dn_buf; struct di_buf_entry *di_buf; dma_addr_t pz_list_dma; dma_addr_t dn_buf_dma; dma_addr_t di_buf_dma; spinlock_t lock; struct mutex mutex; void *gen_cmd_buf; dma_addr_t gen_cmd_buf_dma; wait_queue_head_t cmd_wq; struct workqueue_struct *workqueue; struct work_struct dn_work; struct dma_pool *qset_pool; struct list_head async_list; struct list_head async_removed_list; wait_queue_head_t async_list_wq; struct work_struct async_work; struct list_head periodic_list[5U]; struct list_head periodic_removed_list; wait_queue_head_t periodic_list_wq; struct work_struct periodic_work; struct whc_dbg *dbg; } ; 77 struct whc_std { struct urb *urb; size_t len; int ntds_remaining; struct whc_qtd *qtd; struct list_head list_node; int num_pointers; dma_addr_t dma_addr; struct whc_page_list_entry *pl_virt; void *bounce_buf; struct scatterlist *bounce_sg; unsigned int bounce_offset; } ; 110 struct whc_urb { struct urb *urb; struct whc_qset *qset; struct work_struct dequeue_work; bool is_async; int status; } ; 135 enum whc_update { WHC_UPDATE_ADDED = 1, WHC_UPDATE_REMOVED = 2, WHC_UPDATE_UPDATED = 4 } ; 214 struct whc_dbg { struct dentry *di_f; struct dentry *asl_f; struct dentry *pzl_f; } ; 99 struct kernel_symbol { unsigned long value; const char *name; } ; 131 typedef void (*ctor_fn_t)(); 555 struct bug_entry { int bug_addr_disp; int file_disp; unsigned short line; unsigned short flags; } ; 82 struct static_key { atomic_t enabled; } ; 264 struct tracepoint_func { void *func; void *data; int prio; } ; 18 struct tracepoint { const char *name; struct static_key key; void (*regfunc)(); void (*unregfunc)(); struct tracepoint_func *funcs; } ; 15 typedef __u64 Elf64_Addr; 16 typedef __u16 Elf64_Half; 20 typedef __u32 Elf64_Word; 21 typedef __u64 Elf64_Xword; 190 struct elf64_sym { Elf64_Word st_name; unsigned char st_info; unsigned char st_other; Elf64_Half st_shndx; Elf64_Addr st_value; Elf64_Xword st_size; } ; 198 typedef struct elf64_sym Elf64_Sym; 223 struct kernel_param ; 228 struct kernel_param_ops { unsigned int flags; int (*set)(const char *, const struct kernel_param *); int (*get)(char *, const struct kernel_param *); void (*free)(void *); } ; 62 struct kparam_string ; 62 struct kparam_array ; 62 union __anonunion____missing_field_name_224 { void *arg; const struct kparam_string *str; const struct kparam_array *arr; } ; 62 struct kernel_param { const char *name; struct module *mod; const struct kernel_param_ops *ops; const u16 perm; s8 level; u8 flags; union __anonunion____missing_field_name_224 __annonCompField52; } ; 83 struct kparam_string { unsigned int maxlen; char *string; } ; 89 struct kparam_array { unsigned int max; unsigned int elemsize; unsigned int *num; const struct kernel_param_ops *ops; void *elem; } ; 470 struct latch_tree_node { struct rb_node node[2U]; } ; 211 struct mod_arch_specific { } ; 38 struct module_param_attrs ; 38 struct module_kobject { struct kobject kobj; struct module *mod; struct kobject *drivers_dir; struct module_param_attrs *mp; struct completion *kobj_completion; } ; 48 struct module_attribute { struct attribute attr; ssize_t (*show)(struct module_attribute *, struct module_kobject *, char *); ssize_t (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t ); void (*setup)(struct module *, const char *); int (*test)(struct module *); void (*free)(struct module *); } ; 74 struct exception_table_entry ; 290 enum module_state { MODULE_STATE_LIVE = 0, MODULE_STATE_COMING = 1, MODULE_STATE_GOING = 2, MODULE_STATE_UNFORMED = 3 } ; 297 struct mod_tree_node { struct module *mod; struct latch_tree_node node; } ; 304 struct module_layout { void *base; unsigned int size; unsigned int text_size; unsigned int ro_size; struct mod_tree_node mtn; } ; 318 struct module_sect_attrs ; 318 struct module_notes_attrs ; 318 struct trace_event_call ; 318 struct trace_enum_map ; 318 struct module { enum module_state state; struct list_head list; char name[56U]; struct module_kobject mkobj; struct module_attribute *modinfo_attrs; const char *version; const char *srcversion; struct kobject *holders_dir; const struct kernel_symbol *syms; const unsigned long *crcs; unsigned int num_syms; struct mutex param_lock; struct kernel_param *kp; unsigned int num_kp; unsigned int num_gpl_syms; const struct kernel_symbol *gpl_syms; const unsigned long *gpl_crcs; const struct kernel_symbol *unused_syms; const unsigned long *unused_crcs; unsigned int num_unused_syms; unsigned int num_unused_gpl_syms; const struct kernel_symbol *unused_gpl_syms; const unsigned long *unused_gpl_crcs; bool sig_ok; bool async_probe_requested; const struct kernel_symbol *gpl_future_syms; const unsigned long *gpl_future_crcs; unsigned int num_gpl_future_syms; unsigned int num_exentries; struct exception_table_entry *extable; int (*init)(); struct module_layout core_layout; struct module_layout init_layout; struct mod_arch_specific arch; unsigned int taints; unsigned int num_bugs; struct list_head bug_list; struct bug_entry *bug_table; Elf64_Sym *symtab; Elf64_Sym *core_symtab; unsigned int num_symtab; unsigned int core_num_syms; char *strtab; char *core_strtab; struct module_sect_attrs *sect_attrs; struct module_notes_attrs *notes_attrs; char *args; void *percpu; unsigned int percpu_size; unsigned int num_tracepoints; const struct tracepoint **tracepoints_ptrs; unsigned int num_trace_bprintk_fmt; const char **trace_bprintk_fmt_start; struct trace_event_call **trace_events; unsigned int num_trace_events; struct trace_enum_map **trace_enums; unsigned int num_trace_enums; bool klp_alive; struct list_head source_list; struct list_head target_list; void (*exit)(); atomic_t refcnt; ctor_fn_t (**ctors)(); unsigned int num_ctors; } ; 14 struct pci_device_id { __u32 vendor; __u32 device; __u32 subvendor; __u32 subdevice; __u32 class; __u32 class_mask; kernel_ulong_t driver_data; } ; 72 struct umc_driver { char *name; u8 cap_id; int (*match)(struct umc_driver *, struct umc_dev *); const void *match_data; int (*probe)(struct umc_dev *); void (*remove)(struct umc_dev *); int (*pre_reset)(struct umc_dev *); int (*post_reset)(struct umc_dev *); struct device_driver driver; } ; 62 struct exception_table_entry { int insn; int fixup; } ; 27 union __anonunion___u_16___0 { struct list_head *__val; char __c[1U]; } ; 234 struct _ddebug { const char *modname; const char *function; const char *filename; const char *format; unsigned int lineno; unsigned char flags; } ; 27 union __anonunion___u_16___1 { struct list_head *__val; char __c[1U]; } ; 181 struct wusb_dn_hdr { u8 bType; u8 notifdata[]; } ; 27 union __anonunion___u_16___2 { struct list_head *__val; char __c[1U]; } ; 200 union __anonunion___u_20___0 { struct list_head *__val; char __c[1U]; } ; 27 union __anonunion___u_16___3 { struct list_head *__val; char __c[1U]; } ; 890 struct usb_wireless_ep_comp_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bMaxBurst; __u8 bMaxSequence; __le16 wMaxStreamDelay; __le16 wOverTheAirPacketSize; __u8 bOverTheAirInterval; __u8 bmCompAttributes; } ; 38 typedef int Set; 1 long int __builtin_expect(long, long); 1 void * __builtin_memcpy(void *, const void *, unsigned long); 216 void __read_once_size(const volatile void *p, void *res, int size); 241 void __write_once_size(volatile void *p, void *res, int size); 178 void __might_sleep(const char *, int, int); 25 void INIT_LIST_HEAD(struct list_head *list); 48 void __list_add(struct list_head *, struct list_head *, struct list_head *); 61 void list_add(struct list_head *new, struct list_head *head); 112 void __list_del_entry(struct list_head *); 113 void list_del(struct list_head *); 165 void list_move(struct list_head *list, struct list_head *head); 198 int list_empty(const struct list_head *head); 31 void _raw_spin_lock_irq(raw_spinlock_t *); 34 unsigned long int _raw_spin_lock_irqsave(raw_spinlock_t *); 43 void _raw_spin_unlock_irq(raw_spinlock_t *); 45 void _raw_spin_unlock_irqrestore(raw_spinlock_t *, unsigned long); 289 raw_spinlock_t * spinlock_check(spinlock_t *lock); 330 void spin_lock_irq(spinlock_t *lock); 355 void spin_unlock_irq(spinlock_t *lock); 360 void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags); 977 long int prepare_to_wait_event(wait_queue_head_t *, wait_queue_t *, int); 978 void finish_wait(wait_queue_head_t *, wait_queue_t *); 138 void mutex_lock_nested(struct mutex *, unsigned int); 174 void mutex_unlock(struct mutex *); 292 unsigned long int __msecs_to_jiffies(const unsigned int); 354 unsigned long int msecs_to_jiffies(const unsigned int m); 429 bool queue_work_on(int, struct workqueue_struct *, struct work_struct *); 469 bool queue_work(struct workqueue_struct *wq, struct work_struct *work); 31 unsigned int ioread32(void *); 37 void iowrite32(u32 , void *); 426 long int schedule_timeout(long); 412 int usb_hcd_link_urb_to_ep(struct usb_hcd *, struct urb *); 413 int usb_hcd_check_unlink_urb(struct usb_hcd *, struct urb *, int); 415 void usb_hcd_unlink_urb_from_ep(struct usb_hcd *, struct urb *); 88 u32 le_readl(void *addr); 96 void le_writeq(u64 value, void *addr); 113 int whci_wait_for(struct device *, u32 *, u32 , u32 , unsigned long, const char *); 268 void whc_qset_set_link_ptr(u64 *ptr, u64 target); 147 void whc_write_wusbcmd(struct whc *whc, u32 mask, u32 val); 149 void whc_hw_error(struct whc *whc, const char *reason); 171 void asl_start(struct whc *whc); 172 void asl_stop(struct whc *whc); 173 int asl_init(struct whc *whc); 174 void asl_clean_up(struct whc *whc); 175 int asl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags); 176 int asl_urb_dequeue(struct whc *whc, struct urb *urb, int status); 177 void asl_qset_delete(struct whc *whc, struct whc_qset *qset); 178 void scan_async_work(struct work_struct *work); 191 struct whc_qset * qset_alloc(struct whc *whc, gfp_t mem_flags); 192 void qset_free(struct whc *whc, struct whc_qset *qset); 193 struct whc_qset * get_qset(struct whc *whc, struct urb *urb, gfp_t mem_flags); 194 void qset_delete(struct whc *whc, struct whc_qset *qset); 195 void qset_clear(struct whc *whc, struct whc_qset *qset); 196 void qset_reset(struct whc *whc, struct whc_qset *qset); 197 int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb, gfp_t mem_flags); 199 void qset_free_std(struct whc *whc, struct whc_std *std); 200 void qset_remove_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb, int status); 202 void process_halted_qtd(struct whc *whc, struct whc_qset *qset, struct whc_qtd *qtd); 204 void process_inactive_qtd(struct whc *whc, struct whc_qset *qset, struct whc_qtd *qtd); 206 enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset); 207 void qset_remove_complete(struct whc *whc, struct whc_qset *qset); 209 void asl_update(struct whc *whc, uint32_t wusbcmd); 28 void qset_get_next_prev(struct whc *whc, struct whc_qset *qset, struct whc_qset **next, struct whc_qset **prev); 47 void asl_qset_insert_begin(struct whc *whc, struct whc_qset *qset); 53 void asl_qset_insert(struct whc *whc, struct whc_qset *qset); 66 void asl_qset_remove(struct whc *whc, struct whc_qset *qset); 97 uint32_t process_qset(struct whc *whc, struct whc_qset *qset); 33 extern struct module __this_module; 148 void kfree(const void *); 312 void * __kmalloc(size_t , gfp_t ); 451 void * kmalloc(size_t size, gfp_t flags); 605 void * kzalloc(size_t size, gfp_t flags); 114 ssize_t seq_read(struct file *, char *, size_t , loff_t *); 115 loff_t seq_lseek(struct file *, loff_t , int); 122 void seq_printf(struct seq_file *, const char *, ...); 140 int single_open(struct file *, int (*)(struct seq_file *, void *), void *); 142 int single_release(struct inode *, struct file *); 49 struct dentry * debugfs_create_file(const char *, umode_t , struct dentry *, void *, const struct file_operations *); 68 void debugfs_remove(struct dentry *); 212 void whc_dbg_init(struct whc *whc); 213 void whc_dbg_clean_up(struct whc *whc); 35 void qset_print(struct seq_file *s, struct whc_qset *qset); 87 int di_print(struct seq_file *s, void *p); 107 int asl_print(struct seq_file *s, void *p); 119 int pzl_print(struct seq_file *s, void *p); 134 int di_open(struct inode *inode, struct file *file); 139 int asl_open(struct inode *inode, struct file *file); 144 int pzl_open(struct inode *inode, struct file *file); 149 const struct file_operations di_fops = { &__this_module, &seq_lseek, &seq_read, 0, 0, 0, 0, 0, 0, 0, 0, &di_open, 0, &single_release, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; 157 const struct file_operations asl_fops = { &__this_module, &seq_lseek, &seq_read, 0, 0, 0, 0, 0, 0, 0, 0, &asl_open, 0, &single_release, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; 165 const struct file_operations pzl_fops = { &__this_module, &seq_lseek, &seq_read, 0, 0, 0, 0, 0, 0, 0, 0, &pzl_open, 0, &single_release, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; 219 void ldv_check_final_state(); 222 void ldv_check_return_value(int); 228 void ldv_initialize(); 231 void ldv_handler_precall(); 234 int nondet_int(); 237 int LDV_IN_INTERRUPT = 0; 240 void ldv_main1_sequence_infinite_withcheck_stateful(); 72 void set_bit(long nr, volatile unsigned long *addr); 100 int device_wakeup_enable(struct device *); 897 void * dev_get_drvdata(const struct device *dev); 1120 void dev_err(const struct device *, const char *, ...); 1122 void dev_warn(const struct device *, const char *, ...); 104 int __umc_driver_register(struct umc_driver *, struct module *, const char *); 115 void umc_driver_unregister(struct umc_driver *); 471 int usb_endpoint_xfer_bulk(const struct usb_endpoint_descriptor *epd); 484 int usb_endpoint_xfer_control(const struct usb_endpoint_descriptor *epd); 438 struct usb_hcd * usb_create_hcd(const struct hc_driver *, struct device *, const char *); 444 void usb_put_hcd(struct usb_hcd *); 446 int usb_add_hcd(struct usb_hcd *, unsigned int, unsigned long); 448 void usb_remove_hcd(struct usb_hcd *); 508 struct uwb_rc * uwb_rc_get_by_grandpa(const struct device *); 509 void uwb_rc_put(struct uwb_rc *); 309 int wusbhc_create(struct wusbhc *); 310 int wusbhc_b_create(struct wusbhc *); 311 void wusbhc_b_destroy(struct wusbhc *); 312 void wusbhc_destroy(struct wusbhc *); 407 int wusbhc_rh_status_data(struct usb_hcd *, char *); 408 int wusbhc_rh_control(struct usb_hcd *, u16 , u16 , u16 , char *, u16 ); 409 int wusbhc_rh_start_port_reset(struct usb_hcd *, unsigned int); 444 u8 wusb_cluster_id_get(); 445 void wusb_cluster_id_put(u8 ); 80 void le_writel(u32 value, void *addr); 143 int whc_init(struct whc *whc); 144 void whc_clean_up(struct whc *whc); 152 int whc_wusbhc_start(struct wusbhc *wusbhc); 153 void whc_wusbhc_stop(struct wusbhc *wusbhc, int delay); 154 int whc_mmcie_add(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt, u8 handle, struct wuie_hdr *wuie); 156 int whc_mmcie_rm(struct wusbhc *wusbhc, u8 handle); 157 int whc_bwa_set(struct wusbhc *wusbhc, s8 stream_index, const struct uwb_mas_bm *mas_bm); 158 int whc_dev_info_set(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev); 159 int whc_set_num_dnts(struct wusbhc *wusbhc, u8 interval, u8 slots); 160 int whc_set_ptk(struct wusbhc *wusbhc, u8 port_idx, u32 tkid, const void *ptk, size_t key_size); 162 int whc_set_gtk(struct wusbhc *wusbhc, u32 tkid, const void *gtk, size_t key_size); 164 int whc_set_cluster_id(struct whc *whc, u8 bcid); 167 irqreturn_t whc_int_handler(struct usb_hcd *hcd); 185 int pzl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags); 186 int pzl_urb_dequeue(struct whc *whc, struct urb *urb, int status); 187 void pzl_qset_delete(struct whc *whc, struct whc_qset *qset); 33 int whc_reset(struct usb_hcd *usb_hcd); 45 int whc_start(struct usb_hcd *usb_hcd); 89 void whc_stop(struct usb_hcd *usb_hcd); 108 int whc_get_frame_number(struct usb_hcd *usb_hcd); 118 int whc_urb_enqueue(struct usb_hcd *usb_hcd, struct urb *urb, gfp_t mem_flags); 146 int whc_urb_dequeue(struct usb_hcd *usb_hcd, struct urb *urb, int status); 173 void whc_endpoint_disable(struct usb_hcd *usb_hcd, struct usb_host_endpoint *ep); 191 void whc_endpoint_reset(struct usb_hcd *usb_hcd, struct usb_host_endpoint *ep); 217 struct hc_driver whc_hc_driver = { "whci-hcd", "Wireless host controller", 2008UL, &whc_int_handler, 32, &whc_reset, &whc_start, 0, 0, &whc_stop, 0, &whc_get_frame_number, &whc_urb_enqueue, &whc_urb_dequeue, 0, 0, &whc_endpoint_disable, &whc_endpoint_reset, &wusbhc_rh_status_data, &wusbhc_rh_control, 0, 0, &wusbhc_rh_start_port_reset, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; 238 int whc_probe(struct umc_dev *umc); 322 void whc_remove(struct umc_dev *umc); 339 struct umc_driver whci_hc_driver = { (char *)"whci-hcd", 1U, 0, 0, &whc_probe, &whc_remove, 0, 0, { 0, 0, 0, 0, (_Bool)0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }; 346 int whci_hc_driver_init(); 352 void whci_hc_driver_exit(); 363 const struct pci_device_id __mod_pci__whci_hcd_id_table_device_table[2U] = { }; 391 void ldv_check_return_value_probe(int); 406 void ldv_main2_sequence_infinite_withcheck_stateful(); 31 void * __memcpy(void *, const void *, size_t ); 25 void INIT_LIST_HEAD___0(struct list_head *list); 322 void wusbhc_reset_all(struct wusbhc *); 148 int whc_do_gencmd(struct whc *whc, u32 cmd, u32 params, void *addr, size_t len); 53 void __dynamic_dev_dbg(struct _ddebug *, const struct device *, const char *, ...); 25 void INIT_LIST_HEAD___1(struct list_head *list); 56 void * __memset(void *, int, size_t ); 280 void lockdep_init_map(struct lockdep_map *, const char *, struct lock_class_key *, int); 93 void __raw_spin_lock_init(raw_spinlock_t *, const char *, struct lock_class_key *); 72 void __init_waitqueue_head(wait_queue_head_t *, const char *, struct lock_class_key *); 119 void __mutex_init(struct mutex *, const char *, struct lock_class_key *); 181 void __init_work(struct work_struct *, int); 361 struct workqueue_struct * __alloc_workqueue_key(const char *, unsigned int, int, struct lock_class_key *, const char *, ...); 421 void destroy_workqueue(struct workqueue_struct *); 140 extern struct resource iomem_resource; 165 resource_size_t resource_size(const struct resource *res); 193 struct resource * __request_region(struct resource *, resource_size_t , resource_size_t , const char *, int); 202 void __release_region(struct resource *, resource_size_t , resource_size_t ); 181 void * ioremap_nocache(resource_size_t , unsigned long); 192 void * ioremap(resource_size_t offset, unsigned long size); 197 void iounmap(volatile void *); 87 const char * kobject_name(const struct kobject *kobj); 850 const char * dev_name(const struct device *dev); 53 void debug_dma_alloc_coherent(struct device *, size_t , dma_addr_t , void *); 29 extern struct dma_map_ops *dma_ops; 31 struct dma_map_ops * get_dma_ops(struct device *dev); 43 bool arch_dma_alloc_attrs(struct device **, gfp_t *); 355 void * dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs); 396 void * dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag); 402 void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle); 19 struct dma_pool * dma_pool_create(const char *, struct device *, size_t , size_t , size_t ); 22 void dma_pool_destroy(struct dma_pool *); 168 void whc_dn_work(struct work_struct *work); 181 int pzl_init(struct whc *whc); 182 void pzl_clean_up(struct whc *whc); 188 void scan_periodic_work(struct work_struct *work); 30 void whc_hw_reset(struct whc *whc); 37 void whc_hw_init_di_buf(struct whc *whc); 48 void whc_hw_init_dn_buf(struct whc *whc); 200 void __wake_up(wait_queue_head_t *, unsigned int, int, void *); 395 void wusbhc_handle_dn(struct wusbhc *, u8 , struct wusb_dn_hdr *, size_t ); 25 void transfer_done(struct whc *whc); 67 int process_dn_buf(struct whc *whc); 25 void INIT_LIST_HEAD___2(struct list_head *list); 198 int list_empty___0(const struct list_head *head); 183 void pzl_start(struct whc *whc); 184 void pzl_stop(struct whc *whc); 208 void pzl_update(struct whc *whc, uint32_t wusbcmd); 28 void update_pzl_pointers(struct whc *whc, int period, u64 addr); 65 int qset_get_period(struct whc *whc, struct whc_qset *qset); 76 void qset_insert_in_sw_list(struct whc *whc, struct whc_qset *qset); 87 void pzl_qset_remove(struct whc *whc, struct whc_qset *qset); 103 enum whc_update pzl_process_qset(struct whc *whc, struct whc_qset *qset); 207 void update_pzl_hw_view(struct whc *whc); 7 dma_addr_t ldv_dma_map_single_attrs(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs); 8 int ldv_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); 437 int fls(int x); 25 void INIT_LIST_HEAD___3(struct list_head *list); 75 void list_add_tail(struct list_head *new, struct list_head *head); 154 void list_del_init(struct list_head *entry); 22 void _raw_spin_lock(raw_spinlock_t *); 41 void _raw_spin_unlock(raw_spinlock_t *); 300 void spin_lock(spinlock_t *lock); 345 void spin_unlock(spinlock_t *lock); 73 void init_completion(struct completion *x); 91 void wait_for_completion(struct completion *); 106 void complete(struct completion *); 44 void debug_dma_unmap_page(struct device *, dma_addr_t , size_t , int, bool ); 926 void * lowmem_page_address(const struct page *page); 120 struct page * sg_page(struct scatterlist *sg); 239 void * sg_virt(struct scatterlist *sg); 246 struct scatterlist * sg_next(struct scatterlist *); 77 int valid_dma_direction(int dma_direction); 147 dma_addr_t ldv_dma_map_single_attrs_33(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs); 156 void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs); 461 int ldv_dma_mapping_error_34(struct device *dev, dma_addr_t dma_addr); 147 void * krealloc(const void *, size_t , gfp_t ); 24 void * dma_pool_alloc(struct dma_pool *, gfp_t , dma_addr_t *); 27 void * dma_pool_zalloc(struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle); 33 void dma_pool_free(struct dma_pool *, void *, dma_addr_t ); 320 void wusbhc_giveback_urb(struct wusbhc *, struct urb *, int); 468 u8 wusb_port_no_to_idx(u8 port_no); 188 unsigned int usb_pipe_to_qh_type(unsigned int pipe); 131 bool whc_std_last(struct whc_std *std); 52 void qset_fill_qh(struct whc *whc, struct whc_qset *qset, struct urb *urb); 254 void qset_remove_qtd(struct whc *whc, struct whc_qset *qset); 263 void qset_copy_bounce_to_sg(struct whc *whc, struct whc_std *std); 330 void qset_remove_qtds(struct whc *whc, struct whc_qset *qset, struct urb *urb); 347 void qset_free_stds(struct whc_qset *qset, struct urb *urb); 357 int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_flags); 395 void urb_dequeue_work(struct work_struct *work); 416 struct whc_std * qset_new_std(struct whc *whc, struct whc_qset *qset, struct urb *urb, gfp_t mem_flags); 434 int qset_add_urb_sg(struct whc *whc, struct whc_qset *qset, struct urb *urb, gfp_t mem_flags); 553 int qset_add_urb_sg_linearize(struct whc *whc, struct whc_qset *qset, struct urb *urb, gfp_t mem_flags); 726 int get_urb_status_from_qtd(struct urb *urb, u32 status); 201 void bitmap_copy(unsigned long *dst, const unsigned long *src, unsigned int nbits); 46 void msleep(unsigned int); 271 void uwb_mas_bm_copy_le(void *dst, const struct uwb_mas_bm *mas); 25 int whc_update_di(struct whc *whc, int idx); 141 int whc_set_key(struct whc *whc, u8 key_index, uint32_t tkid, const void *key, size_t key_size, bool is_gtk); 10 void ldv_error(); 25 int ldv_undef_int(); 26 void * ldv_undef_ptr(); 8 int LDV_DMA_MAP_CALLS = 0; 11 dma_addr_t ldv_dma_map_page(struct device *dev, struct page *page, size_t offset, size_t size, enum dma_data_direction dir); 41 dma_addr_t ldv_dma_map_single(struct device *dev, void *cpu_addr, size_t size, enum dma_data_direction dir); return ; } { 408 struct usb_hcd *var_group1; 409 struct urb *var_group2; 410 unsigned int var_whc_urb_enqueue_4_p2; 411 int var_whc_urb_dequeue_5_p2; 412 struct usb_host_endpoint *var_group3; 413 struct umc_dev *var_group4; 414 int res_whc_probe_8; 415 int ldv_s_whci_hc_driver_umc_driver; 416 int tmp; 417 int tmp___0; 418 int tmp___1; 476 ldv_s_whci_hc_driver_umc_driver = 0; 456 LDV_IN_INTERRUPT = 1; 471 ldv_handler_precall() { /* Function call is skipped due to function is undefined */} { 348 int tmp; 348 tmp = __umc_driver_register(&whci_hc_driver, &__this_module, "whci_hcd") { /* Function call is skipped due to function is undefined */} } 479 goto ldv_39430; 479 tmp___1 = nondet_int() { /* Function call is skipped due to function is undefined */} 482 goto ldv_39429; 480 ldv_39429:; 483 tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */} 483 switch (tmp___0); 561 ldv_handler_precall() { /* Function call is skipped due to function is undefined */} { } 120 struct wusbhc *wusbhc; 121 const struct usb_hcd *__mptr; 122 struct whc *whc; 123 const struct wusbhc *__mptr___0; 124 int ret; 121 __mptr = (const struct usb_hcd *)usb_hcd; 121 wusbhc = (struct wusbhc *)__mptr; 122 __mptr___0 = (const struct wusbhc *)wusbhc; 122 whc = (struct whc *)__mptr___0; 125 switch ((urb->pipe) >> 30); 133 skipped uneccesary edges { } 267 struct whc_qset *qset; 268 int err; 269 unsigned long flags; 270 raw_spinlock_t *tmp; 271 flags = _raw_spin_lock_irqsave(tmp) { /* Function call is skipped due to function is undefined */} 273 err = usb_hcd_link_urb_to_ep(&(whc->wusbhc.usb_hcd), urb) { /* Function call is skipped due to function is undefined */} { 159 struct whc_qset *qset; 162 struct whc_qset *__CPAchecker_TMP_0 = (struct whc_qset *)(urb->ep->hcpriv); 162 qset = __CPAchecker_TMP_0; } { } 640 struct whc_urb *wurb; 641 int remaining; 642 unsigned long long transfer_dma; 643 int ntds_remaining; 644 int ret; 645 void *tmp; 646 struct lock_class_key __key; 647 struct __anonstruct_atomic64_t_7 __constr_expr_0; 648 struct whc_std *std; 649 unsigned long std_len; 650 int tmp___0; 642 int __CPAchecker_TMP_0 = (int)(urb->transfer_buffer_length); 642 remaining = __CPAchecker_TMP_0; 643 transfer_dma = urb->transfer_dma; { 607 void *tmp; { } 453 void *tmp___2; 468 tmp___2 = __kmalloc(size, flags) { /* Function call is skipped due to function is undefined */} } 647 wurb = (struct whc_urb *)tmp; 650 urb->hcpriv = (void *)wurb; 651 wurb->qset = qset; 652 wurb->urb = urb; 653 __init_work(&(wurb->dequeue_work), 0) { /* Function call is skipped due to function is undefined */} 653 __constr_expr_0.counter = 137438953408L; 653 wurb->dequeue_work.data = __constr_expr_0; 653 lockdep_init_map(&(wurb->dequeue_work.lockdep_map), "(&wurb->dequeue_work)", &__key, 0) { /* Function call is skipped due to function is undefined */} { 27 union __anonunion___u_16___3 __u; 27 __u.__val = list; 28 list->prev = list; } 653 wurb->dequeue_work.func = &urb_dequeue_work; { } 437 unsigned long remaining; 438 struct scatterlist *sg; 439 int i; 440 int ntds; 441 struct whc_std *std; 442 struct whc_page_list_entry *new_pl_virt; 443 unsigned long long prev_end; 444 unsigned long pl_len; 445 int p; 446 unsigned long long dma_addr; 447 unsigned long dma_remaining; 448 unsigned long long sp; 449 unsigned long long ep; 450 int num_pointers; 451 unsigned long __min1; 452 unsigned long __min2; 453 unsigned long dma_len; 454 void *tmp; 455 const struct list_head *__mptr; 456 int tmp___0; 457 const struct list_head *__mptr___0; 440 ntds = 0; 441 std = (struct whc_std *)0; 443 prev_end = 0ULL; 445 p = 0; 447 size_t __CPAchecker_TMP_0 = (size_t )(urb->transfer_buffer_length); 447 remaining = __CPAchecker_TMP_0; 449 i = 0; 449 sg = urb->sg; 449 goto ldv_38588; 455 ldv_38576:; 535 __mptr = (const struct list_head *)(qset->stds.next); 535 std = ((struct whc_std *)__mptr) + 18446744073709551584UL; 535 goto ldv_38594; 537 goto ldv_38593; 536 ldv_38593:; 537 unsigned long __CPAchecker_TMP_9 = (unsigned long)(std->num_pointers); 537 pl_len = __CPAchecker_TMP_9 * 8UL; 538 tmp___0 = ntds; 538 ntds = ntds - 1; 538 std->ntds_remaining = tmp___0; 539 void *__CPAchecker_TMP_10 = (void *)(std->pl_virt); 539 -ldv_dma_map_single_attrs_33(whc->wusbhc.dev, __CPAchecker_TMP_10, pl_len, 1, (struct dma_attrs *)0) { 38 unsigned long long tmp; { } 58 unsigned long long nonedetermined; 59 void *tmp; 58 tmp = ldv_undef_ptr() { /* Function call is skipped due to function is undefined */} 58 nonedetermined = (dma_addr_t )tmp; 63 LDV_DMA_MAP_CALLS = LDV_DMA_MAP_CALLS + 1; } 535 __mptr___0 = (const struct list_head *)(std->list_node.next); 535 std = ((struct whc_std *)__mptr___0) + 18446744073709551584UL; 536 ldv_38594:; 537 goto ldv_38593; 536 ldv_38593:; 537 unsigned long __CPAchecker_TMP_9 = (unsigned long)(std->num_pointers); 537 pl_len = __CPAchecker_TMP_9 * 8UL; 538 tmp___0 = ntds; 538 ntds = ntds - 1; 538 std->ntds_remaining = tmp___0; 539 void *__CPAchecker_TMP_10 = (void *)(std->pl_virt); 539 -ldv_dma_map_single_attrs_33(whc->wusbhc.dev, __CPAchecker_TMP_10, pl_len, 1, (struct dma_attrs *)0) { } 38 unsigned long long tmp; { } 58 unsigned long long nonedetermined; 59 void *tmp; 58 tmp = ldv_undef_ptr() { /* Function call is skipped due to function is undefined */} 58 nonedetermined = (dma_addr_t )tmp; } | Source code 1 /*
2 * Wireless Host Controller (WHC) asynchronous schedule management.
3 *
4 * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18 #include <linux/kernel.h>
19 #include <linux/gfp.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/uwb/umc.h>
22 #include <linux/usb.h>
23
24 #include "../../wusbcore/wusbhc.h"
25
26 #include "whcd.h"
27
28 static void qset_get_next_prev(struct whc *whc, struct whc_qset *qset,
29 struct whc_qset **next, struct whc_qset **prev)
30 {
31 struct list_head *n, *p;
32
33 BUG_ON(list_empty(&whc->async_list));
34
35 n = qset->list_node.next;
36 if (n == &whc->async_list)
37 n = n->next;
38 p = qset->list_node.prev;
39 if (p == &whc->async_list)
40 p = p->prev;
41
42 *next = container_of(n, struct whc_qset, list_node);
43 *prev = container_of(p, struct whc_qset, list_node);
44
45 }
46
47 static void asl_qset_insert_begin(struct whc *whc, struct whc_qset *qset)
48 {
49 list_move(&qset->list_node, &whc->async_list);
50 qset->in_sw_list = true;
51 }
52
53 static void asl_qset_insert(struct whc *whc, struct whc_qset *qset)
54 {
55 struct whc_qset *next, *prev;
56
57 qset_clear(whc, qset);
58
59 /* Link into ASL. */
60 qset_get_next_prev(whc, qset, &next, &prev);
61 whc_qset_set_link_ptr(&qset->qh.link, next->qset_dma);
62 whc_qset_set_link_ptr(&prev->qh.link, qset->qset_dma);
63 qset->in_hw_list = true;
64 }
65
66 static void asl_qset_remove(struct whc *whc, struct whc_qset *qset)
67 {
68 struct whc_qset *prev, *next;
69
70 qset_get_next_prev(whc, qset, &next, &prev);
71
72 list_move(&qset->list_node, &whc->async_removed_list);
73 qset->in_sw_list = false;
74
75 /*
76 * No more qsets in the ASL? The caller must stop the ASL as
77 * it's no longer valid.
78 */
79 if (list_empty(&whc->async_list))
80 return;
81
82 /* Remove from ASL. */
83 whc_qset_set_link_ptr(&prev->qh.link, next->qset_dma);
84 qset->in_hw_list = false;
85 }
86
87 /**
88 * process_qset - process any recently inactivated or halted qTDs in a
89 * qset.
90 *
91 * After inactive qTDs are removed, new qTDs can be added if the
92 * urb queue still contains URBs.
93 *
94 * Returns any additional WUSBCMD bits for the ASL sync command (i.e.,
95 * WUSBCMD_ASYNC_QSET_RM if a halted qset was removed).
96 */
97 static uint32_t process_qset(struct whc *whc, struct whc_qset *qset)
98 {
99 enum whc_update update = 0;
100 uint32_t status = 0;
101
102 while (qset->ntds) {
103 struct whc_qtd *td;
104 int t;
105
106 t = qset->td_start;
107 td = &qset->qtd[qset->td_start];
108 status = le32_to_cpu(td->status);
109
110 /*
111 * Nothing to do with a still active qTD.
112 */
113 if (status & QTD_STS_ACTIVE)
114 break;
115
116 if (status & QTD_STS_HALTED) {
117 /* Ug, an error. */
118 process_halted_qtd(whc, qset, td);
119 /* A halted qTD always triggers an update
120 because the qset was either removed or
121 reactivated. */
122 update |= WHC_UPDATE_UPDATED;
123 goto done;
124 }
125
126 /* Mmm, a completed qTD. */
127 process_inactive_qtd(whc, qset, td);
128 }
129
130 if (!qset->remove)
131 update |= qset_add_qtds(whc, qset);
132
133 done:
134 /*
135 * Remove this qset from the ASL if requested, but only if has
136 * no qTDs.
137 */
138 if (qset->remove && qset->ntds == 0) {
139 asl_qset_remove(whc, qset);
140 update |= WHC_UPDATE_REMOVED;
141 }
142 return update;
143 }
144
145 void asl_start(struct whc *whc)
146 {
147 struct whc_qset *qset;
148
149 qset = list_first_entry(&whc->async_list, struct whc_qset, list_node);
150
151 le_writeq(qset->qset_dma | QH_LINK_NTDS(8), whc->base + WUSBASYNCLISTADDR);
152
153 whc_write_wusbcmd(whc, WUSBCMD_ASYNC_EN, WUSBCMD_ASYNC_EN);
154 whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
155 WUSBSTS_ASYNC_SCHED, WUSBSTS_ASYNC_SCHED,
156 1000, "start ASL");
157 }
158
159 void asl_stop(struct whc *whc)
160 {
161 whc_write_wusbcmd(whc, WUSBCMD_ASYNC_EN, 0);
162 whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
163 WUSBSTS_ASYNC_SCHED, 0,
164 1000, "stop ASL");
165 }
166
167 /**
168 * asl_update - request an ASL update and wait for the hardware to be synced
169 * @whc: the WHCI HC
170 * @wusbcmd: WUSBCMD value to start the update.
171 *
172 * If the WUSB HC is inactive (i.e., the ASL is stopped) then the
173 * update must be skipped as the hardware may not respond to update
174 * requests.
175 */
176 void asl_update(struct whc *whc, uint32_t wusbcmd)
177 {
178 struct wusbhc *wusbhc = &whc->wusbhc;
179 long t;
180
181 mutex_lock(&wusbhc->mutex);
182 if (wusbhc->active) {
183 whc_write_wusbcmd(whc, wusbcmd, wusbcmd);
184 t = wait_event_timeout(
185 whc->async_list_wq,
186 (le_readl(whc->base + WUSBCMD) & WUSBCMD_ASYNC_UPDATED) == 0,
187 msecs_to_jiffies(1000));
188 if (t == 0)
189 whc_hw_error(whc, "ASL update timeout");
190 }
191 mutex_unlock(&wusbhc->mutex);
192 }
193
194 /**
195 * scan_async_work - scan the ASL for qsets to process.
196 *
197 * Process each qset in the ASL in turn and then signal the WHC that
198 * the ASL has been updated.
199 *
200 * Then start, stop or update the asynchronous schedule as required.
201 */
202 void scan_async_work(struct work_struct *work)
203 {
204 struct whc *whc = container_of(work, struct whc, async_work);
205 struct whc_qset *qset, *t;
206 enum whc_update update = 0;
207
208 spin_lock_irq(&whc->lock);
209
210 /*
211 * Transerve the software list backwards so new qsets can be
212 * safely inserted into the ASL without making it non-circular.
213 */
214 list_for_each_entry_safe_reverse(qset, t, &whc->async_list, list_node) {
215 if (!qset->in_hw_list) {
216 asl_qset_insert(whc, qset);
217 update |= WHC_UPDATE_ADDED;
218 }
219
220 update |= process_qset(whc, qset);
221 }
222
223 spin_unlock_irq(&whc->lock);
224
225 if (update) {
226 uint32_t wusbcmd = WUSBCMD_ASYNC_UPDATED | WUSBCMD_ASYNC_SYNCED_DB;
227 if (update & WHC_UPDATE_REMOVED)
228 wusbcmd |= WUSBCMD_ASYNC_QSET_RM;
229 asl_update(whc, wusbcmd);
230 }
231
232 /*
233 * Now that the ASL is updated, complete the removal of any
234 * removed qsets.
235 *
236 * If the qset was to be reset, do so and reinsert it into the
237 * ASL if it has pending transfers.
238 */
239 spin_lock_irq(&whc->lock);
240
241 list_for_each_entry_safe(qset, t, &whc->async_removed_list, list_node) {
242 qset_remove_complete(whc, qset);
243 if (qset->reset) {
244 qset_reset(whc, qset);
245 if (!list_empty(&qset->stds)) {
246 asl_qset_insert_begin(whc, qset);
247 queue_work(whc->workqueue, &whc->async_work);
248 }
249 }
250 }
251
252 spin_unlock_irq(&whc->lock);
253 }
254
255 /**
256 * asl_urb_enqueue - queue an URB onto the asynchronous list (ASL).
257 * @whc: the WHCI host controller
258 * @urb: the URB to enqueue
259 * @mem_flags: flags for any memory allocations
260 *
261 * The qset for the endpoint is obtained and the urb queued on to it.
262 *
263 * Work is scheduled to update the hardware's view of the ASL.
264 */
265 int asl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags)
266 {
267 struct whc_qset *qset;
268 int err;
269 unsigned long flags;
270
271 spin_lock_irqsave(&whc->lock, flags);
272
273 err = usb_hcd_link_urb_to_ep(&whc->wusbhc.usb_hcd, urb);
274 if (err < 0) {
275 spin_unlock_irqrestore(&whc->lock, flags);
276 return err;
277 }
278
279 qset = get_qset(whc, urb, GFP_ATOMIC);
280 if (qset == NULL)
281 err = -ENOMEM;
282 else
283 err = qset_add_urb(whc, qset, urb, GFP_ATOMIC);
284 if (!err) {
285 if (!qset->in_sw_list && !qset->remove)
286 asl_qset_insert_begin(whc, qset);
287 } else
288 usb_hcd_unlink_urb_from_ep(&whc->wusbhc.usb_hcd, urb);
289
290 spin_unlock_irqrestore(&whc->lock, flags);
291
292 if (!err)
293 queue_work(whc->workqueue, &whc->async_work);
294
295 return err;
296 }
297
298 /**
299 * asl_urb_dequeue - remove an URB (qset) from the async list.
300 * @whc: the WHCI host controller
301 * @urb: the URB to dequeue
302 * @status: the current status of the URB
303 *
304 * URBs that do yet have qTDs can simply be removed from the software
305 * queue, otherwise the qset must be removed from the ASL so the qTDs
306 * can be removed.
307 */
308 int asl_urb_dequeue(struct whc *whc, struct urb *urb, int status)
309 {
310 struct whc_urb *wurb = urb->hcpriv;
311 struct whc_qset *qset = wurb->qset;
312 struct whc_std *std, *t;
313 bool has_qtd = false;
314 int ret;
315 unsigned long flags;
316
317 spin_lock_irqsave(&whc->lock, flags);
318
319 ret = usb_hcd_check_unlink_urb(&whc->wusbhc.usb_hcd, urb, status);
320 if (ret < 0)
321 goto out;
322
323 list_for_each_entry_safe(std, t, &qset->stds, list_node) {
324 if (std->urb == urb) {
325 if (std->qtd)
326 has_qtd = true;
327 qset_free_std(whc, std);
328 } else
329 std->qtd = NULL; /* so this std is re-added when the qset is */
330 }
331
332 if (has_qtd) {
333 asl_qset_remove(whc, qset);
334 wurb->status = status;
335 wurb->is_async = true;
336 queue_work(whc->workqueue, &wurb->dequeue_work);
337 } else
338 qset_remove_urb(whc, qset, urb, status);
339 out:
340 spin_unlock_irqrestore(&whc->lock, flags);
341
342 return ret;
343 }
344
345 /**
346 * asl_qset_delete - delete a qset from the ASL
347 */
348 void asl_qset_delete(struct whc *whc, struct whc_qset *qset)
349 {
350 qset->remove = 1;
351 queue_work(whc->workqueue, &whc->async_work);
352 qset_delete(whc, qset);
353 }
354
355 /**
356 * asl_init - initialize the asynchronous schedule list
357 *
358 * A dummy qset with no qTDs is added to the ASL to simplify removing
359 * qsets (no need to stop the ASL when the last qset is removed).
360 */
361 int asl_init(struct whc *whc)
362 {
363 struct whc_qset *qset;
364
365 qset = qset_alloc(whc, GFP_KERNEL);
366 if (qset == NULL)
367 return -ENOMEM;
368
369 asl_qset_insert_begin(whc, qset);
370 asl_qset_insert(whc, qset);
371
372 return 0;
373 }
374
375 /**
376 * asl_clean_up - free ASL resources
377 *
378 * The ASL is stopped and empty except for the dummy qset.
379 */
380 void asl_clean_up(struct whc *whc)
381 {
382 struct whc_qset *qset;
383
384 if (!list_empty(&whc->async_list)) {
385 qset = list_first_entry(&whc->async_list, struct whc_qset, list_node);
386 list_del(&qset->list_node);
387 qset_free(whc, qset);
388 }
389 } 1
2 /*
3 * Wireless Host Controller (WHC) driver.
4 *
5 * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19 #include <linux/kernel.h>
20 #include <linux/init.h>
21 #include <linux/module.h>
22 #include <linux/uwb/umc.h>
23
24 #include "../../wusbcore/wusbhc.h"
25
26 #include "whcd.h"
27
28 /*
29 * One time initialization.
30 *
31 * Nothing to do here.
32 */
33 static int whc_reset(struct usb_hcd *usb_hcd)
34 {
35 return 0;
36 }
37
38 /*
39 * Start the wireless host controller.
40 *
41 * Start device notification.
42 *
43 * Put hc into run state, set DNTS parameters.
44 */
45 static int whc_start(struct usb_hcd *usb_hcd)
46 {
47 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
48 struct whc *whc = wusbhc_to_whc(wusbhc);
49 u8 bcid;
50 int ret;
51
52 mutex_lock(&wusbhc->mutex);
53
54 le_writel(WUSBINTR_GEN_CMD_DONE
55 | WUSBINTR_HOST_ERR
56 | WUSBINTR_ASYNC_SCHED_SYNCED
57 | WUSBINTR_DNTS_INT
58 | WUSBINTR_ERR_INT
59 | WUSBINTR_INT,
60 whc->base + WUSBINTR);
61
62 /* set cluster ID */
63 bcid = wusb_cluster_id_get();
64 ret = whc_set_cluster_id(whc, bcid);
65 if (ret < 0)
66 goto out;
67 wusbhc->cluster_id = bcid;
68
69 /* start HC */
70 whc_write_wusbcmd(whc, WUSBCMD_RUN, WUSBCMD_RUN);
71
72 usb_hcd->uses_new_polling = 1;
73 set_bit(HCD_FLAG_POLL_RH, &usb_hcd->flags);
74 usb_hcd->state = HC_STATE_RUNNING;
75
76 out:
77 mutex_unlock(&wusbhc->mutex);
78 return ret;
79 }
80
81
82 /*
83 * Stop the wireless host controller.
84 *
85 * Stop device notification.
86 *
87 * Wait for pending transfer to stop? Put hc into stop state?
88 */
89 static void whc_stop(struct usb_hcd *usb_hcd)
90 {
91 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
92 struct whc *whc = wusbhc_to_whc(wusbhc);
93
94 mutex_lock(&wusbhc->mutex);
95
96 /* stop HC */
97 le_writel(0, whc->base + WUSBINTR);
98 whc_write_wusbcmd(whc, WUSBCMD_RUN, 0);
99 whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
100 WUSBSTS_HCHALTED, WUSBSTS_HCHALTED,
101 100, "HC to halt");
102
103 wusb_cluster_id_put(wusbhc->cluster_id);
104
105 mutex_unlock(&wusbhc->mutex);
106 }
107
108 static int whc_get_frame_number(struct usb_hcd *usb_hcd)
109 {
110 /* Frame numbers are not applicable to WUSB. */
111 return -ENOSYS;
112 }
113
114
115 /*
116 * Queue an URB to the ASL or PZL
117 */
118 static int whc_urb_enqueue(struct usb_hcd *usb_hcd, struct urb *urb,
119 gfp_t mem_flags)
120 {
121 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
122 struct whc *whc = wusbhc_to_whc(wusbhc);
123 int ret;
124
125 switch (usb_pipetype(urb->pipe)) {
126 case PIPE_INTERRUPT:
127 ret = pzl_urb_enqueue(whc, urb, mem_flags);
128 break;
129 case PIPE_ISOCHRONOUS:
130 dev_err(&whc->umc->dev, "isochronous transfers unsupported\n");
131 ret = -ENOTSUPP;
132 break;
133 case PIPE_CONTROL:
134 case PIPE_BULK:
135 default:
136 ret = asl_urb_enqueue(whc, urb, mem_flags);
137 break;
138 }
139
140 return ret;
141 }
142
143 /*
144 * Remove a queued URB from the ASL or PZL.
145 */
146 static int whc_urb_dequeue(struct usb_hcd *usb_hcd, struct urb *urb, int status)
147 {
148 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
149 struct whc *whc = wusbhc_to_whc(wusbhc);
150 int ret;
151
152 switch (usb_pipetype(urb->pipe)) {
153 case PIPE_INTERRUPT:
154 ret = pzl_urb_dequeue(whc, urb, status);
155 break;
156 case PIPE_ISOCHRONOUS:
157 ret = -ENOTSUPP;
158 break;
159 case PIPE_CONTROL:
160 case PIPE_BULK:
161 default:
162 ret = asl_urb_dequeue(whc, urb, status);
163 break;
164 }
165
166 return ret;
167 }
168
169 /*
170 * Wait for all URBs to the endpoint to be completed, then delete the
171 * qset.
172 */
173 static void whc_endpoint_disable(struct usb_hcd *usb_hcd,
174 struct usb_host_endpoint *ep)
175 {
176 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
177 struct whc *whc = wusbhc_to_whc(wusbhc);
178 struct whc_qset *qset;
179
180 qset = ep->hcpriv;
181 if (qset) {
182 ep->hcpriv = NULL;
183 if (usb_endpoint_xfer_bulk(&ep->desc)
184 || usb_endpoint_xfer_control(&ep->desc))
185 asl_qset_delete(whc, qset);
186 else
187 pzl_qset_delete(whc, qset);
188 }
189 }
190
191 static void whc_endpoint_reset(struct usb_hcd *usb_hcd,
192 struct usb_host_endpoint *ep)
193 {
194 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
195 struct whc *whc = wusbhc_to_whc(wusbhc);
196 struct whc_qset *qset;
197 unsigned long flags;
198
199 spin_lock_irqsave(&whc->lock, flags);
200
201 qset = ep->hcpriv;
202 if (qset) {
203 qset->remove = 1;
204 qset->reset = 1;
205
206 if (usb_endpoint_xfer_bulk(&ep->desc)
207 || usb_endpoint_xfer_control(&ep->desc))
208 queue_work(whc->workqueue, &whc->async_work);
209 else
210 queue_work(whc->workqueue, &whc->periodic_work);
211 }
212
213 spin_unlock_irqrestore(&whc->lock, flags);
214 }
215
216
217 static struct hc_driver whc_hc_driver = {
218 .description = "whci-hcd",
219 .product_desc = "Wireless host controller",
220 .hcd_priv_size = sizeof(struct whc) - sizeof(struct usb_hcd),
221 .irq = whc_int_handler,
222 .flags = HCD_USB2,
223
224 .reset = whc_reset,
225 .start = whc_start,
226 .stop = whc_stop,
227 .get_frame_number = whc_get_frame_number,
228 .urb_enqueue = whc_urb_enqueue,
229 .urb_dequeue = whc_urb_dequeue,
230 .endpoint_disable = whc_endpoint_disable,
231 .endpoint_reset = whc_endpoint_reset,
232
233 .hub_status_data = wusbhc_rh_status_data,
234 .hub_control = wusbhc_rh_control,
235 .start_port_reset = wusbhc_rh_start_port_reset,
236 };
237
238 static int whc_probe(struct umc_dev *umc)
239 {
240 int ret;
241 struct usb_hcd *usb_hcd;
242 struct wusbhc *wusbhc;
243 struct whc *whc;
244 struct device *dev = &umc->dev;
245
246 usb_hcd = usb_create_hcd(&whc_hc_driver, dev, "whci");
247 if (usb_hcd == NULL) {
248 dev_err(dev, "unable to create hcd\n");
249 return -ENOMEM;
250 }
251
252 usb_hcd->wireless = 1;
253 usb_hcd->self.sg_tablesize = 2048; /* somewhat arbitrary */
254
255 wusbhc = usb_hcd_to_wusbhc(usb_hcd);
256 whc = wusbhc_to_whc(wusbhc);
257 whc->umc = umc;
258
259 ret = whc_init(whc);
260 if (ret)
261 goto error;
262
263 wusbhc->dev = dev;
264 wusbhc->uwb_rc = uwb_rc_get_by_grandpa(umc->dev.parent);
265 if (!wusbhc->uwb_rc) {
266 ret = -ENODEV;
267 dev_err(dev, "cannot get radio controller\n");
268 goto error;
269 }
270
271 if (whc->n_devices > USB_MAXCHILDREN) {
272 dev_warn(dev, "USB_MAXCHILDREN too low for WUSB adapter (%u ports)\n",
273 whc->n_devices);
274 wusbhc->ports_max = USB_MAXCHILDREN;
275 } else
276 wusbhc->ports_max = whc->n_devices;
277 wusbhc->mmcies_max = whc->n_mmc_ies;
278 wusbhc->start = whc_wusbhc_start;
279 wusbhc->stop = whc_wusbhc_stop;
280 wusbhc->mmcie_add = whc_mmcie_add;
281 wusbhc->mmcie_rm = whc_mmcie_rm;
282 wusbhc->dev_info_set = whc_dev_info_set;
283 wusbhc->bwa_set = whc_bwa_set;
284 wusbhc->set_num_dnts = whc_set_num_dnts;
285 wusbhc->set_ptk = whc_set_ptk;
286 wusbhc->set_gtk = whc_set_gtk;
287
288 ret = wusbhc_create(wusbhc);
289 if (ret)
290 goto error_wusbhc_create;
291
292 ret = usb_add_hcd(usb_hcd, whc->umc->irq, IRQF_SHARED);
293 if (ret) {
294 dev_err(dev, "cannot add HCD: %d\n", ret);
295 goto error_usb_add_hcd;
296 }
297 device_wakeup_enable(usb_hcd->self.controller);
298
299 ret = wusbhc_b_create(wusbhc);
300 if (ret) {
301 dev_err(dev, "WUSBHC phase B setup failed: %d\n", ret);
302 goto error_wusbhc_b_create;
303 }
304
305 whc_dbg_init(whc);
306
307 return 0;
308
309 error_wusbhc_b_create:
310 usb_remove_hcd(usb_hcd);
311 error_usb_add_hcd:
312 wusbhc_destroy(wusbhc);
313 error_wusbhc_create:
314 uwb_rc_put(wusbhc->uwb_rc);
315 error:
316 whc_clean_up(whc);
317 usb_put_hcd(usb_hcd);
318 return ret;
319 }
320
321
322 static void whc_remove(struct umc_dev *umc)
323 {
324 struct usb_hcd *usb_hcd = dev_get_drvdata(&umc->dev);
325 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
326 struct whc *whc = wusbhc_to_whc(wusbhc);
327
328 if (usb_hcd) {
329 whc_dbg_clean_up(whc);
330 wusbhc_b_destroy(wusbhc);
331 usb_remove_hcd(usb_hcd);
332 wusbhc_destroy(wusbhc);
333 uwb_rc_put(wusbhc->uwb_rc);
334 whc_clean_up(whc);
335 usb_put_hcd(usb_hcd);
336 }
337 }
338
339 static struct umc_driver whci_hc_driver = {
340 .name = "whci-hcd",
341 .cap_id = UMC_CAP_ID_WHCI_WUSB_HC,
342 .probe = whc_probe,
343 .remove = whc_remove,
344 };
345
346 static int __init whci_hc_driver_init(void)
347 {
348 return umc_driver_register(&whci_hc_driver);
349 }
350 module_init(whci_hc_driver_init);
351
352 static void __exit whci_hc_driver_exit(void)
353 {
354 umc_driver_unregister(&whci_hc_driver);
355 }
356 module_exit(whci_hc_driver_exit);
357
358 /* PCI device ID's that we handle (so it gets loaded) */
359 static struct pci_device_id __used whci_hcd_id_table[] = {
360 { PCI_DEVICE_CLASS(PCI_CLASS_WIRELESS_WHCI, ~0) },
361 { /* empty last entry */ }
362 };
363 MODULE_DEVICE_TABLE(pci, whci_hcd_id_table);
364
365 MODULE_DESCRIPTION("WHCI Wireless USB host controller driver");
366 MODULE_AUTHOR("Cambridge Silicon Radio Ltd.");
367 MODULE_LICENSE("GPL");
368
369
370
371
372
373 /* LDV_COMMENT_BEGIN_MAIN */
374 #ifdef LDV_MAIN2_sequence_infinite_withcheck_stateful
375
376 /*###########################################################################*/
377
378 /*############## Driver Environment Generator 0.2 output ####################*/
379
380 /*###########################################################################*/
381
382
383
384 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
385 void ldv_check_final_state(void);
386
387 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
388 void ldv_check_return_value(int res);
389
390 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
391 void ldv_check_return_value_probe(int res);
392
393 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
394 void ldv_initialize(void);
395
396 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
397 void ldv_handler_precall(void);
398
399 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
400 int nondet_int(void);
401
402 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
403 int LDV_IN_INTERRUPT;
404
405 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
406 void ldv_main2_sequence_infinite_withcheck_stateful(void) {
407
408
409
410 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
411 /*============================= VARIABLE DECLARATION PART =============================*/
412 /** STRUCT: struct type: hc_driver, struct name: whc_hc_driver **/
413 /* content: static int whc_reset(struct usb_hcd *usb_hcd)*/
414 /* LDV_COMMENT_END_PREP */
415 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "whc_reset" */
416 struct usb_hcd * var_group1;
417 /* content: static int whc_start(struct usb_hcd *usb_hcd)*/
418 /* LDV_COMMENT_END_PREP */
419 /* content: static void whc_stop(struct usb_hcd *usb_hcd)*/
420 /* LDV_COMMENT_END_PREP */
421 /* content: static int whc_get_frame_number(struct usb_hcd *usb_hcd)*/
422 /* LDV_COMMENT_END_PREP */
423 /* content: static int whc_urb_enqueue(struct usb_hcd *usb_hcd, struct urb *urb, gfp_t mem_flags)*/
424 /* LDV_COMMENT_END_PREP */
425 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "whc_urb_enqueue" */
426 struct urb * var_group2;
427 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "whc_urb_enqueue" */
428 gfp_t var_whc_urb_enqueue_4_p2;
429 /* content: static int whc_urb_dequeue(struct usb_hcd *usb_hcd, struct urb *urb, int status)*/
430 /* LDV_COMMENT_END_PREP */
431 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "whc_urb_dequeue" */
432 int var_whc_urb_dequeue_5_p2;
433 /* content: static void whc_endpoint_disable(struct usb_hcd *usb_hcd, struct usb_host_endpoint *ep)*/
434 /* LDV_COMMENT_END_PREP */
435 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "whc_endpoint_disable" */
436 struct usb_host_endpoint * var_group3;
437 /* content: static void whc_endpoint_reset(struct usb_hcd *usb_hcd, struct usb_host_endpoint *ep)*/
438 /* LDV_COMMENT_END_PREP */
439
440 /** STRUCT: struct type: umc_driver, struct name: whci_hc_driver **/
441 /* content: static int whc_probe(struct umc_dev *umc)*/
442 /* LDV_COMMENT_END_PREP */
443 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "whc_probe" */
444 struct umc_dev * var_group4;
445 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "whc_probe" */
446 static int res_whc_probe_8;
447 /* content: static void whc_remove(struct umc_dev *umc)*/
448 /* LDV_COMMENT_END_PREP */
449
450
451
452
453 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
454 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
455 /*============================= VARIABLE INITIALIZING PART =============================*/
456 LDV_IN_INTERRUPT=1;
457
458
459
460
461 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
462 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
463 /*============================= FUNCTION CALL SECTION =============================*/
464 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
465 ldv_initialize();
466
467 /** INIT: init_type: ST_MODULE_INIT **/
468 /* content: static int __init whci_hc_driver_init(void)*/
469 /* LDV_COMMENT_END_PREP */
470 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver init function after driver loading to kernel. This function declared as "MODULE_INIT(function name)". */
471 ldv_handler_precall();
472 if(whci_hc_driver_init())
473 goto ldv_final;
474
475
476 int ldv_s_whci_hc_driver_umc_driver = 0;
477
478
479 while( nondet_int()
480 || !(ldv_s_whci_hc_driver_umc_driver == 0)
481 ) {
482
483 switch(nondet_int()) {
484
485 case 0: {
486
487 /** STRUCT: struct type: hc_driver, struct name: whc_hc_driver **/
488
489
490 /* content: static int whc_reset(struct usb_hcd *usb_hcd)*/
491 /* LDV_COMMENT_END_PREP */
492 /* LDV_COMMENT_FUNCTION_CALL Function from field "reset" from driver structure with callbacks "whc_hc_driver" */
493 ldv_handler_precall();
494 whc_reset( var_group1);
495
496
497
498
499 }
500
501 break;
502 case 1: {
503
504 /** STRUCT: struct type: hc_driver, struct name: whc_hc_driver **/
505
506
507 /* content: static int whc_start(struct usb_hcd *usb_hcd)*/
508 /* LDV_COMMENT_END_PREP */
509 /* LDV_COMMENT_FUNCTION_CALL Function from field "start" from driver structure with callbacks "whc_hc_driver" */
510 ldv_handler_precall();
511 whc_start( var_group1);
512
513
514
515
516 }
517
518 break;
519 case 2: {
520
521 /** STRUCT: struct type: hc_driver, struct name: whc_hc_driver **/
522
523
524 /* content: static void whc_stop(struct usb_hcd *usb_hcd)*/
525 /* LDV_COMMENT_END_PREP */
526 /* LDV_COMMENT_FUNCTION_CALL Function from field "stop" from driver structure with callbacks "whc_hc_driver" */
527 ldv_handler_precall();
528 whc_stop( var_group1);
529
530
531
532
533 }
534
535 break;
536 case 3: {
537
538 /** STRUCT: struct type: hc_driver, struct name: whc_hc_driver **/
539
540
541 /* content: static int whc_get_frame_number(struct usb_hcd *usb_hcd)*/
542 /* LDV_COMMENT_END_PREP */
543 /* LDV_COMMENT_FUNCTION_CALL Function from field "get_frame_number" from driver structure with callbacks "whc_hc_driver" */
544 ldv_handler_precall();
545 whc_get_frame_number( var_group1);
546
547
548
549
550 }
551
552 break;
553 case 4: {
554
555 /** STRUCT: struct type: hc_driver, struct name: whc_hc_driver **/
556
557
558 /* content: static int whc_urb_enqueue(struct usb_hcd *usb_hcd, struct urb *urb, gfp_t mem_flags)*/
559 /* LDV_COMMENT_END_PREP */
560 /* LDV_COMMENT_FUNCTION_CALL Function from field "urb_enqueue" from driver structure with callbacks "whc_hc_driver" */
561 ldv_handler_precall();
562 whc_urb_enqueue( var_group1, var_group2, var_whc_urb_enqueue_4_p2);
563
564
565
566
567 }
568
569 break;
570 case 5: {
571
572 /** STRUCT: struct type: hc_driver, struct name: whc_hc_driver **/
573
574
575 /* content: static int whc_urb_dequeue(struct usb_hcd *usb_hcd, struct urb *urb, int status)*/
576 /* LDV_COMMENT_END_PREP */
577 /* LDV_COMMENT_FUNCTION_CALL Function from field "urb_dequeue" from driver structure with callbacks "whc_hc_driver" */
578 ldv_handler_precall();
579 whc_urb_dequeue( var_group1, var_group2, var_whc_urb_dequeue_5_p2);
580
581
582
583
584 }
585
586 break;
587 case 6: {
588
589 /** STRUCT: struct type: hc_driver, struct name: whc_hc_driver **/
590
591
592 /* content: static void whc_endpoint_disable(struct usb_hcd *usb_hcd, struct usb_host_endpoint *ep)*/
593 /* LDV_COMMENT_END_PREP */
594 /* LDV_COMMENT_FUNCTION_CALL Function from field "endpoint_disable" from driver structure with callbacks "whc_hc_driver" */
595 ldv_handler_precall();
596 whc_endpoint_disable( var_group1, var_group3);
597
598
599
600
601 }
602
603 break;
604 case 7: {
605
606 /** STRUCT: struct type: hc_driver, struct name: whc_hc_driver **/
607
608
609 /* content: static void whc_endpoint_reset(struct usb_hcd *usb_hcd, struct usb_host_endpoint *ep)*/
610 /* LDV_COMMENT_END_PREP */
611 /* LDV_COMMENT_FUNCTION_CALL Function from field "endpoint_reset" from driver structure with callbacks "whc_hc_driver" */
612 ldv_handler_precall();
613 whc_endpoint_reset( var_group1, var_group3);
614
615
616
617
618 }
619
620 break;
621 case 8: {
622
623 /** STRUCT: struct type: umc_driver, struct name: whci_hc_driver **/
624 if(ldv_s_whci_hc_driver_umc_driver==0) {
625
626 /* content: static int whc_probe(struct umc_dev *umc)*/
627 /* LDV_COMMENT_END_PREP */
628 /* LDV_COMMENT_FUNCTION_CALL Function from field "probe" from driver structure with callbacks "whci_hc_driver". Standart function test for correct return result. */
629 res_whc_probe_8 = whc_probe( var_group4);
630 ldv_check_return_value(res_whc_probe_8);
631 ldv_check_return_value_probe(res_whc_probe_8);
632 if(res_whc_probe_8)
633 goto ldv_module_exit;
634 ldv_s_whci_hc_driver_umc_driver++;
635
636 }
637
638 }
639
640 break;
641 case 9: {
642
643 /** STRUCT: struct type: umc_driver, struct name: whci_hc_driver **/
644 if(ldv_s_whci_hc_driver_umc_driver==1) {
645
646 /* content: static void whc_remove(struct umc_dev *umc)*/
647 /* LDV_COMMENT_END_PREP */
648 /* LDV_COMMENT_FUNCTION_CALL Function from field "remove" from driver structure with callbacks "whci_hc_driver" */
649 ldv_handler_precall();
650 whc_remove( var_group4);
651 ldv_s_whci_hc_driver_umc_driver=0;
652
653 }
654
655 }
656
657 break;
658 default: break;
659
660 }
661
662 }
663
664 ldv_module_exit:
665
666 /** INIT: init_type: ST_MODULE_EXIT **/
667 /* content: static void __exit whci_hc_driver_exit(void)*/
668 /* LDV_COMMENT_END_PREP */
669 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver release function before driver will be uploaded from kernel. This function declared as "MODULE_EXIT(function name)". */
670 ldv_handler_precall();
671 whci_hc_driver_exit();
672
673 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
674 ldv_final: ldv_check_final_state();
675
676 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
677 return;
678
679 }
680 #endif
681
682 /* LDV_COMMENT_END_MAIN */ 1 /*
2 * Wireless Host Controller (WHC) qset management.
3 *
4 * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18 #include <linux/kernel.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/slab.h>
21 #include <linux/uwb/umc.h>
22 #include <linux/usb.h>
23
24 #include "../../wusbcore/wusbhc.h"
25
26 #include "whcd.h"
27
28 struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags)
29 {
30 struct whc_qset *qset;
31 dma_addr_t dma;
32
33 qset = dma_pool_zalloc(whc->qset_pool, mem_flags, &dma);
34 if (qset == NULL)
35 return NULL;
36
37 qset->qset_dma = dma;
38 qset->whc = whc;
39
40 INIT_LIST_HEAD(&qset->list_node);
41 INIT_LIST_HEAD(&qset->stds);
42
43 return qset;
44 }
45
46 /**
47 * qset_fill_qh - fill the static endpoint state in a qset's QHead
48 * @qset: the qset whose QH needs initializing with static endpoint
49 * state
50 * @urb: an urb for a transfer to this endpoint
51 */
52 static void qset_fill_qh(struct whc *whc, struct whc_qset *qset, struct urb *urb)
53 {
54 struct usb_device *usb_dev = urb->dev;
55 struct wusb_dev *wusb_dev = usb_dev->wusb_dev;
56 struct usb_wireless_ep_comp_descriptor *epcd;
57 bool is_out;
58 uint8_t phy_rate;
59
60 is_out = usb_pipeout(urb->pipe);
61
62 qset->max_packet = le16_to_cpu(urb->ep->desc.wMaxPacketSize);
63
64 epcd = (struct usb_wireless_ep_comp_descriptor *)qset->ep->extra;
65 if (epcd) {
66 qset->max_seq = epcd->bMaxSequence;
67 qset->max_burst = epcd->bMaxBurst;
68 } else {
69 qset->max_seq = 2;
70 qset->max_burst = 1;
71 }
72
73 /*
74 * Initial PHY rate is 53.3 Mbit/s for control endpoints or
75 * the maximum supported by the device for other endpoints
76 * (unless limited by the user).
77 */
78 if (usb_pipecontrol(urb->pipe))
79 phy_rate = UWB_PHY_RATE_53;
80 else {
81 uint16_t phy_rates;
82
83 phy_rates = le16_to_cpu(wusb_dev->wusb_cap_descr->wPHYRates);
84 phy_rate = fls(phy_rates) - 1;
85 if (phy_rate > whc->wusbhc.phy_rate)
86 phy_rate = whc->wusbhc.phy_rate;
87 }
88
89 qset->qh.info1 = cpu_to_le32(
90 QH_INFO1_EP(usb_pipeendpoint(urb->pipe))
91 | (is_out ? QH_INFO1_DIR_OUT : QH_INFO1_DIR_IN)
92 | usb_pipe_to_qh_type(urb->pipe)
93 | QH_INFO1_DEV_INFO_IDX(wusb_port_no_to_idx(usb_dev->portnum))
94 | QH_INFO1_MAX_PKT_LEN(qset->max_packet)
95 );
96 qset->qh.info2 = cpu_to_le32(
97 QH_INFO2_BURST(qset->max_burst)
98 | QH_INFO2_DBP(0)
99 | QH_INFO2_MAX_COUNT(3)
100 | QH_INFO2_MAX_RETRY(3)
101 | QH_INFO2_MAX_SEQ(qset->max_seq - 1)
102 );
103 /* FIXME: where can we obtain these Tx parameters from? Why
104 * doesn't the chip know what Tx power to use? It knows the Rx
105 * strength and can presumably guess the Tx power required
106 * from that? */
107 qset->qh.info3 = cpu_to_le32(
108 QH_INFO3_TX_RATE(phy_rate)
109 | QH_INFO3_TX_PWR(0) /* 0 == max power */
110 );
111
112 qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1);
113 }
114
115 /**
116 * qset_clear - clear fields in a qset so it may be reinserted into a
117 * schedule.
118 *
119 * The sequence number and current window are not cleared (see
120 * qset_reset()).
121 */
122 void qset_clear(struct whc *whc, struct whc_qset *qset)
123 {
124 qset->td_start = qset->td_end = qset->ntds = 0;
125
126 qset->qh.link = cpu_to_le64(QH_LINK_NTDS(8) | QH_LINK_T);
127 qset->qh.status = qset->qh.status & QH_STATUS_SEQ_MASK;
128 qset->qh.err_count = 0;
129 qset->qh.scratch[0] = 0;
130 qset->qh.scratch[1] = 0;
131 qset->qh.scratch[2] = 0;
132
133 memset(&qset->qh.overlay, 0, sizeof(qset->qh.overlay));
134
135 init_completion(&qset->remove_complete);
136 }
137
138 /**
139 * qset_reset - reset endpoint state in a qset.
140 *
141 * Clears the sequence number and current window. This qset must not
142 * be in the ASL or PZL.
143 */
144 void qset_reset(struct whc *whc, struct whc_qset *qset)
145 {
146 qset->reset = 0;
147
148 qset->qh.status &= ~QH_STATUS_SEQ_MASK;
149 qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1);
150 }
151
152 /**
153 * get_qset - get the qset for an async endpoint
154 *
155 * A new qset is created if one does not already exist.
156 */
157 struct whc_qset *get_qset(struct whc *whc, struct urb *urb,
158 gfp_t mem_flags)
159 {
160 struct whc_qset *qset;
161
162 qset = urb->ep->hcpriv;
163 if (qset == NULL) {
164 qset = qset_alloc(whc, mem_flags);
165 if (qset == NULL)
166 return NULL;
167
168 qset->ep = urb->ep;
169 urb->ep->hcpriv = qset;
170 qset_fill_qh(whc, qset, urb);
171 }
172 return qset;
173 }
174
175 void qset_remove_complete(struct whc *whc, struct whc_qset *qset)
176 {
177 qset->remove = 0;
178 list_del_init(&qset->list_node);
179 complete(&qset->remove_complete);
180 }
181
182 /**
183 * qset_add_qtds - add qTDs for an URB to a qset
184 *
185 * Returns true if the list (ASL/PZL) must be updated because (for a
186 * WHCI 0.95 controller) an activated qTD was pointed to be iCur.
187 */
188 enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset)
189 {
190 struct whc_std *std;
191 enum whc_update update = 0;
192
193 list_for_each_entry(std, &qset->stds, list_node) {
194 struct whc_qtd *qtd;
195 uint32_t status;
196
197 if (qset->ntds >= WHCI_QSET_TD_MAX
198 || (qset->pause_after_urb && std->urb != qset->pause_after_urb))
199 break;
200
201 if (std->qtd)
202 continue; /* already has a qTD */
203
204 qtd = std->qtd = &qset->qtd[qset->td_end];
205
206 /* Fill in setup bytes for control transfers. */
207 if (usb_pipecontrol(std->urb->pipe))
208 memcpy(qtd->setup, std->urb->setup_packet, 8);
209
210 status = QTD_STS_ACTIVE | QTD_STS_LEN(std->len);
211
212 if (whc_std_last(std) && usb_pipeout(std->urb->pipe))
213 status |= QTD_STS_LAST_PKT;
214
215 /*
216 * For an IN transfer the iAlt field should be set so
217 * the h/w will automatically advance to the next
218 * transfer. However, if there are 8 or more TDs
219 * remaining in this transfer then iAlt cannot be set
220 * as it could point to somewhere in this transfer.
221 */
222 if (std->ntds_remaining < WHCI_QSET_TD_MAX) {
223 int ialt;
224 ialt = (qset->td_end + std->ntds_remaining) % WHCI_QSET_TD_MAX;
225 status |= QTD_STS_IALT(ialt);
226 } else if (usb_pipein(std->urb->pipe))
227 qset->pause_after_urb = std->urb;
228
229 if (std->num_pointers)
230 qtd->options = cpu_to_le32(QTD_OPT_IOC);
231 else
232 qtd->options = cpu_to_le32(QTD_OPT_IOC | QTD_OPT_SMALL);
233 qtd->page_list_ptr = cpu_to_le64(std->dma_addr);
234
235 qtd->status = cpu_to_le32(status);
236
237 if (QH_STATUS_TO_ICUR(qset->qh.status) == qset->td_end)
238 update = WHC_UPDATE_UPDATED;
239
240 if (++qset->td_end >= WHCI_QSET_TD_MAX)
241 qset->td_end = 0;
242 qset->ntds++;
243 }
244
245 return update;
246 }
247
248 /**
249 * qset_remove_qtd - remove the first qTD from a qset.
250 *
251 * The qTD might be still active (if it's part of a IN URB that
252 * resulted in a short read) so ensure it's deactivated.
253 */
254 static void qset_remove_qtd(struct whc *whc, struct whc_qset *qset)
255 {
256 qset->qtd[qset->td_start].status = 0;
257
258 if (++qset->td_start >= WHCI_QSET_TD_MAX)
259 qset->td_start = 0;
260 qset->ntds--;
261 }
262
263 static void qset_copy_bounce_to_sg(struct whc *whc, struct whc_std *std)
264 {
265 struct scatterlist *sg;
266 void *bounce;
267 size_t remaining, offset;
268
269 bounce = std->bounce_buf;
270 remaining = std->len;
271
272 sg = std->bounce_sg;
273 offset = std->bounce_offset;
274
275 while (remaining) {
276 size_t len;
277
278 len = min(sg->length - offset, remaining);
279 memcpy(sg_virt(sg) + offset, bounce, len);
280
281 bounce += len;
282 remaining -= len;
283
284 offset += len;
285 if (offset >= sg->length) {
286 sg = sg_next(sg);
287 offset = 0;
288 }
289 }
290
291 }
292
293 /**
294 * qset_free_std - remove an sTD and free it.
295 * @whc: the WHCI host controller
296 * @std: the sTD to remove and free.
297 */
298 void qset_free_std(struct whc *whc, struct whc_std *std)
299 {
300 list_del(&std->list_node);
301 if (std->bounce_buf) {
302 bool is_out = usb_pipeout(std->urb->pipe);
303 dma_addr_t dma_addr;
304
305 if (std->num_pointers)
306 dma_addr = le64_to_cpu(std->pl_virt[0].buf_ptr);
307 else
308 dma_addr = std->dma_addr;
309
310 dma_unmap_single(whc->wusbhc.dev, dma_addr,
311 std->len, is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
312 if (!is_out)
313 qset_copy_bounce_to_sg(whc, std);
314 kfree(std->bounce_buf);
315 }
316 if (std->pl_virt) {
317 if (std->dma_addr)
318 dma_unmap_single(whc->wusbhc.dev, std->dma_addr,
319 std->num_pointers * sizeof(struct whc_page_list_entry),
320 DMA_TO_DEVICE);
321 kfree(std->pl_virt);
322 std->pl_virt = NULL;
323 }
324 kfree(std);
325 }
326
327 /**
328 * qset_remove_qtds - remove an URB's qTDs (and sTDs).
329 */
330 static void qset_remove_qtds(struct whc *whc, struct whc_qset *qset,
331 struct urb *urb)
332 {
333 struct whc_std *std, *t;
334
335 list_for_each_entry_safe(std, t, &qset->stds, list_node) {
336 if (std->urb != urb)
337 break;
338 if (std->qtd != NULL)
339 qset_remove_qtd(whc, qset);
340 qset_free_std(whc, std);
341 }
342 }
343
344 /**
345 * qset_free_stds - free any remaining sTDs for an URB.
346 */
347 static void qset_free_stds(struct whc_qset *qset, struct urb *urb)
348 {
349 struct whc_std *std, *t;
350
351 list_for_each_entry_safe(std, t, &qset->stds, list_node) {
352 if (std->urb == urb)
353 qset_free_std(qset->whc, std);
354 }
355 }
356
357 static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_flags)
358 {
359 dma_addr_t dma_addr = std->dma_addr;
360 dma_addr_t sp, ep;
361 size_t pl_len;
362 int p;
363
364 /* Short buffers don't need a page list. */
365 if (std->len <= WHCI_PAGE_SIZE) {
366 std->num_pointers = 0;
367 return 0;
368 }
369
370 sp = dma_addr & ~(WHCI_PAGE_SIZE-1);
371 ep = dma_addr + std->len;
372 std->num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE);
373
374 pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
375 std->pl_virt = kmalloc(pl_len, mem_flags);
376 if (std->pl_virt == NULL)
377 return -ENOMEM;
378 std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, pl_len, DMA_TO_DEVICE);
379 if (dma_mapping_error(whc->wusbhc.dev, std->dma_addr)) {
380 kfree(std->pl_virt);
381 return -EFAULT;
382 }
383
384 for (p = 0; p < std->num_pointers; p++) {
385 std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
386 dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1);
387 }
388
389 return 0;
390 }
391
392 /**
393 * urb_dequeue_work - executes asl/pzl update and gives back the urb to the system.
394 */
395 static void urb_dequeue_work(struct work_struct *work)
396 {
397 struct whc_urb *wurb = container_of(work, struct whc_urb, dequeue_work);
398 struct whc_qset *qset = wurb->qset;
399 struct whc *whc = qset->whc;
400 unsigned long flags;
401
402 if (wurb->is_async)
403 asl_update(whc, WUSBCMD_ASYNC_UPDATED
404 | WUSBCMD_ASYNC_SYNCED_DB
405 | WUSBCMD_ASYNC_QSET_RM);
406 else
407 pzl_update(whc, WUSBCMD_PERIODIC_UPDATED
408 | WUSBCMD_PERIODIC_SYNCED_DB
409 | WUSBCMD_PERIODIC_QSET_RM);
410
411 spin_lock_irqsave(&whc->lock, flags);
412 qset_remove_urb(whc, qset, wurb->urb, wurb->status);
413 spin_unlock_irqrestore(&whc->lock, flags);
414 }
415
416 static struct whc_std *qset_new_std(struct whc *whc, struct whc_qset *qset,
417 struct urb *urb, gfp_t mem_flags)
418 {
419 struct whc_std *std;
420
421 std = kzalloc(sizeof(struct whc_std), mem_flags);
422 if (std == NULL)
423 return NULL;
424
425 std->urb = urb;
426 std->qtd = NULL;
427
428 INIT_LIST_HEAD(&std->list_node);
429 list_add_tail(&std->list_node, &qset->stds);
430
431 return std;
432 }
433
434 static int qset_add_urb_sg(struct whc *whc, struct whc_qset *qset, struct urb *urb,
435 gfp_t mem_flags)
436 {
437 size_t remaining;
438 struct scatterlist *sg;
439 int i;
440 int ntds = 0;
441 struct whc_std *std = NULL;
442 struct whc_page_list_entry *new_pl_virt;
443 dma_addr_t prev_end = 0;
444 size_t pl_len;
445 int p = 0;
446
447 remaining = urb->transfer_buffer_length;
448
449 for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
450 dma_addr_t dma_addr;
451 size_t dma_remaining;
452 dma_addr_t sp, ep;
453 int num_pointers;
454
455 if (remaining == 0) {
456 break;
457 }
458
459 dma_addr = sg_dma_address(sg);
460 dma_remaining = min_t(size_t, sg_dma_len(sg), remaining);
461
462 while (dma_remaining) {
463 size_t dma_len;
464
465 /*
466 * We can use the previous std (if it exists) provided that:
467 * - the previous one ended on a page boundary.
468 * - the current one begins on a page boundary.
469 * - the previous one isn't full.
470 *
471 * If a new std is needed but the previous one
472 * was not a whole number of packets then this
473 * sg list cannot be mapped onto multiple
474 * qTDs. Return an error and let the caller
475 * sort it out.
476 */
477 if (!std
478 || (prev_end & (WHCI_PAGE_SIZE-1))
479 || (dma_addr & (WHCI_PAGE_SIZE-1))
480 || std->len + WHCI_PAGE_SIZE > QTD_MAX_XFER_SIZE) {
481 if (std && std->len % qset->max_packet != 0)
482 return -EINVAL;
483 std = qset_new_std(whc, qset, urb, mem_flags);
484 if (std == NULL) {
485 return -ENOMEM;
486 }
487 ntds++;
488 p = 0;
489 }
490
491 dma_len = dma_remaining;
492
493 /*
494 * If the remainder of this element doesn't
495 * fit in a single qTD, limit the qTD to a
496 * whole number of packets. This allows the
497 * remainder to go into the next qTD.
498 */
499 if (std->len + dma_len > QTD_MAX_XFER_SIZE) {
500 dma_len = (QTD_MAX_XFER_SIZE / qset->max_packet)
501 * qset->max_packet - std->len;
502 }
503
504 std->len += dma_len;
505 std->ntds_remaining = -1; /* filled in later */
506
507 sp = dma_addr & ~(WHCI_PAGE_SIZE-1);
508 ep = dma_addr + dma_len;
509 num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE);
510 std->num_pointers += num_pointers;
511
512 pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
513
514 new_pl_virt = krealloc(std->pl_virt, pl_len, mem_flags);
515 if (new_pl_virt == NULL) {
516 kfree(std->pl_virt);
517 std->pl_virt = NULL;
518 return -ENOMEM;
519 }
520 std->pl_virt = new_pl_virt;
521
522 for (;p < std->num_pointers; p++) {
523 std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
524 dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1);
525 }
526
527 prev_end = dma_addr = ep;
528 dma_remaining -= dma_len;
529 remaining -= dma_len;
530 }
531 }
532
533 /* Now the number of stds is know, go back and fill in
534 std->ntds_remaining. */
535 list_for_each_entry(std, &qset->stds, list_node) {
536 if (std->ntds_remaining == -1) {
537 pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
538 std->ntds_remaining = ntds--;
539 std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt,
540 pl_len, DMA_TO_DEVICE);
541 }
542 }
543 return 0;
544 }
545
546 /**
547 * qset_add_urb_sg_linearize - add an urb with sg list, copying the data
548 *
549 * If the URB contains an sg list whose elements cannot be directly
550 * mapped to qTDs then the data must be transferred via bounce
551 * buffers.
552 */
553 static int qset_add_urb_sg_linearize(struct whc *whc, struct whc_qset *qset,
554 struct urb *urb, gfp_t mem_flags)
555 {
556 bool is_out = usb_pipeout(urb->pipe);
557 size_t max_std_len;
558 size_t remaining;
559 int ntds = 0;
560 struct whc_std *std = NULL;
561 void *bounce = NULL;
562 struct scatterlist *sg;
563 int i;
564
565 /* limit maximum bounce buffer to 16 * 3.5 KiB ~= 28 k */
566 max_std_len = qset->max_burst * qset->max_packet;
567
568 remaining = urb->transfer_buffer_length;
569
570 for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
571 size_t len;
572 size_t sg_remaining;
573 void *orig;
574
575 if (remaining == 0) {
576 break;
577 }
578
579 sg_remaining = min_t(size_t, remaining, sg->length);
580 orig = sg_virt(sg);
581
582 while (sg_remaining) {
583 if (!std || std->len == max_std_len) {
584 std = qset_new_std(whc, qset, urb, mem_flags);
585 if (std == NULL)
586 return -ENOMEM;
587 std->bounce_buf = kmalloc(max_std_len, mem_flags);
588 if (std->bounce_buf == NULL)
589 return -ENOMEM;
590 std->bounce_sg = sg;
591 std->bounce_offset = orig - sg_virt(sg);
592 bounce = std->bounce_buf;
593 ntds++;
594 }
595
596 len = min(sg_remaining, max_std_len - std->len);
597
598 if (is_out)
599 memcpy(bounce, orig, len);
600
601 std->len += len;
602 std->ntds_remaining = -1; /* filled in later */
603
604 bounce += len;
605 orig += len;
606 sg_remaining -= len;
607 remaining -= len;
608 }
609 }
610
611 /*
612 * For each of the new sTDs, map the bounce buffers, create
613 * page lists (if necessary), and fill in std->ntds_remaining.
614 */
615 list_for_each_entry(std, &qset->stds, list_node) {
616 if (std->ntds_remaining != -1)
617 continue;
618
619 std->dma_addr = dma_map_single(&whc->umc->dev, std->bounce_buf, std->len,
620 is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
621
622 if (qset_fill_page_list(whc, std, mem_flags) < 0)
623 return -ENOMEM;
624
625 std->ntds_remaining = ntds--;
626 }
627
628 return 0;
629 }
630
631 /**
632 * qset_add_urb - add an urb to the qset's queue.
633 *
634 * The URB is chopped into sTDs, one for each qTD that will required.
635 * At least one qTD (and sTD) is required even if the transfer has no
636 * data (e.g., for some control transfers).
637 */
638 int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb,
639 gfp_t mem_flags)
640 {
641 struct whc_urb *wurb;
642 int remaining = urb->transfer_buffer_length;
643 u64 transfer_dma = urb->transfer_dma;
644 int ntds_remaining;
645 int ret;
646
647 wurb = kzalloc(sizeof(struct whc_urb), mem_flags);
648 if (wurb == NULL)
649 goto err_no_mem;
650 urb->hcpriv = wurb;
651 wurb->qset = qset;
652 wurb->urb = urb;
653 INIT_WORK(&wurb->dequeue_work, urb_dequeue_work);
654
655 if (urb->num_sgs) {
656 ret = qset_add_urb_sg(whc, qset, urb, mem_flags);
657 if (ret == -EINVAL) {
658 qset_free_stds(qset, urb);
659 ret = qset_add_urb_sg_linearize(whc, qset, urb, mem_flags);
660 }
661 if (ret < 0)
662 goto err_no_mem;
663 return 0;
664 }
665
666 ntds_remaining = DIV_ROUND_UP(remaining, QTD_MAX_XFER_SIZE);
667 if (ntds_remaining == 0)
668 ntds_remaining = 1;
669
670 while (ntds_remaining) {
671 struct whc_std *std;
672 size_t std_len;
673
674 std_len = remaining;
675 if (std_len > QTD_MAX_XFER_SIZE)
676 std_len = QTD_MAX_XFER_SIZE;
677
678 std = qset_new_std(whc, qset, urb, mem_flags);
679 if (std == NULL)
680 goto err_no_mem;
681
682 std->dma_addr = transfer_dma;
683 std->len = std_len;
684 std->ntds_remaining = ntds_remaining;
685
686 if (qset_fill_page_list(whc, std, mem_flags) < 0)
687 goto err_no_mem;
688
689 ntds_remaining--;
690 remaining -= std_len;
691 transfer_dma += std_len;
692 }
693
694 return 0;
695
696 err_no_mem:
697 qset_free_stds(qset, urb);
698 return -ENOMEM;
699 }
700
701 /**
702 * qset_remove_urb - remove an URB from the urb queue.
703 *
704 * The URB is returned to the USB subsystem.
705 */
706 void qset_remove_urb(struct whc *whc, struct whc_qset *qset,
707 struct urb *urb, int status)
708 {
709 struct wusbhc *wusbhc = &whc->wusbhc;
710 struct whc_urb *wurb = urb->hcpriv;
711
712 usb_hcd_unlink_urb_from_ep(&wusbhc->usb_hcd, urb);
713 /* Drop the lock as urb->complete() may enqueue another urb. */
714 spin_unlock(&whc->lock);
715 wusbhc_giveback_urb(wusbhc, urb, status);
716 spin_lock(&whc->lock);
717
718 kfree(wurb);
719 }
720
721 /**
722 * get_urb_status_from_qtd - get the completed urb status from qTD status
723 * @urb: completed urb
724 * @status: qTD status
725 */
726 static int get_urb_status_from_qtd(struct urb *urb, u32 status)
727 {
728 if (status & QTD_STS_HALTED) {
729 if (status & QTD_STS_DBE)
730 return usb_pipein(urb->pipe) ? -ENOSR : -ECOMM;
731 else if (status & QTD_STS_BABBLE)
732 return -EOVERFLOW;
733 else if (status & QTD_STS_RCE)
734 return -ETIME;
735 return -EPIPE;
736 }
737 if (usb_pipein(urb->pipe)
738 && (urb->transfer_flags & URB_SHORT_NOT_OK)
739 && urb->actual_length < urb->transfer_buffer_length)
740 return -EREMOTEIO;
741 return 0;
742 }
743
744 /**
745 * process_inactive_qtd - process an inactive (but not halted) qTD.
746 *
747 * Update the urb with the transfer bytes from the qTD, if the urb is
748 * completely transferred or (in the case of an IN only) the LPF is
749 * set, then the transfer is complete and the urb should be returned
750 * to the system.
751 */
752 void process_inactive_qtd(struct whc *whc, struct whc_qset *qset,
753 struct whc_qtd *qtd)
754 {
755 struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node);
756 struct urb *urb = std->urb;
757 uint32_t status;
758 bool complete;
759
760 status = le32_to_cpu(qtd->status);
761
762 urb->actual_length += std->len - QTD_STS_TO_LEN(status);
763
764 if (usb_pipein(urb->pipe) && (status & QTD_STS_LAST_PKT))
765 complete = true;
766 else
767 complete = whc_std_last(std);
768
769 qset_remove_qtd(whc, qset);
770 qset_free_std(whc, std);
771
772 /*
773 * Transfers for this URB are complete? Then return it to the
774 * USB subsystem.
775 */
776 if (complete) {
777 qset_remove_qtds(whc, qset, urb);
778 qset_remove_urb(whc, qset, urb, get_urb_status_from_qtd(urb, status));
779
780 /*
781 * If iAlt isn't valid then the hardware didn't
782 * advance iCur. Adjust the start and end pointers to
783 * match iCur.
784 */
785 if (!(status & QTD_STS_IALT_VALID))
786 qset->td_start = qset->td_end
787 = QH_STATUS_TO_ICUR(le16_to_cpu(qset->qh.status));
788 qset->pause_after_urb = NULL;
789 }
790 }
791
792 /**
793 * process_halted_qtd - process a qset with a halted qtd
794 *
795 * Remove all the qTDs for the failed URB and return the failed URB to
796 * the USB subsystem. Then remove all other qTDs so the qset can be
797 * removed.
798 *
799 * FIXME: this is the point where rate adaptation can be done. If a
800 * transfer failed because it exceeded the maximum number of retries
801 * then it could be reactivated with a slower rate without having to
802 * remove the qset.
803 */
804 void process_halted_qtd(struct whc *whc, struct whc_qset *qset,
805 struct whc_qtd *qtd)
806 {
807 struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node);
808 struct urb *urb = std->urb;
809 int urb_status;
810
811 urb_status = get_urb_status_from_qtd(urb, le32_to_cpu(qtd->status));
812
813 qset_remove_qtds(whc, qset, urb);
814 qset_remove_urb(whc, qset, urb, urb_status);
815
816 list_for_each_entry(std, &qset->stds, list_node) {
817 if (qset->ntds == 0)
818 break;
819 qset_remove_qtd(whc, qset);
820 std->qtd = NULL;
821 }
822
823 qset->remove = 1;
824 }
825
826 void qset_free(struct whc *whc, struct whc_qset *qset)
827 {
828 dma_pool_free(whc->qset_pool, qset, qset->qset_dma);
829 }
830
831 /**
832 * qset_delete - wait for a qset to be unused, then free it.
833 */
834 void qset_delete(struct whc *whc, struct whc_qset *qset)
835 {
836 wait_for_completion(&qset->remove_complete);
837 qset_free(whc, qset);
838 } 1
2 #include <linux/types.h>
3 #include <linux/dma-direction.h>
4
5 extern dma_addr_t ldv_dma_map_page(struct device *dev, struct page *page, size_t offset, size_t size, enum dma_data_direction dir);
6 extern dma_addr_t ldv_dma_map_single(struct device *dev, void *cpu_addr, size_t size, enum dma_data_direction dir);
7 extern dma_addr_t ldv_dma_map_single_attrs(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
8 extern int ldv_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
9 #line 1 "/home/cluser/ldv/ref_launch/work/current--X--drivers--X--defaultlinux-4.5-rc1.tar.xz--X--331_1a--X--cpachecker/linux-4.5-rc1.tar.xz/csd_deg_dscv/2256/dscv_tempdir/dscv/ri/331_1a/drivers/usb/host/whci/qset.c"
10 /*
11 * Wireless Host Controller (WHC) qset management.
12 *
13 * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License version
17 * 2 as published by the Free Software Foundation.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program. If not, see <http://www.gnu.org/licenses/>.
26 */
27 #include <linux/kernel.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/slab.h>
30 #include <linux/uwb/umc.h>
31 #include <linux/usb.h>
32
33 #include "../../wusbcore/wusbhc.h"
34
35 #include "whcd.h"
36
37 struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags)
38 {
39 struct whc_qset *qset;
40 dma_addr_t dma;
41
42 qset = dma_pool_zalloc(whc->qset_pool, mem_flags, &dma);
43 if (qset == NULL)
44 return NULL;
45
46 qset->qset_dma = dma;
47 qset->whc = whc;
48
49 INIT_LIST_HEAD(&qset->list_node);
50 INIT_LIST_HEAD(&qset->stds);
51
52 return qset;
53 }
54
55 /**
56 * qset_fill_qh - fill the static endpoint state in a qset's QHead
57 * @qset: the qset whose QH needs initializing with static endpoint
58 * state
59 * @urb: an urb for a transfer to this endpoint
60 */
61 static void qset_fill_qh(struct whc *whc, struct whc_qset *qset, struct urb *urb)
62 {
63 struct usb_device *usb_dev = urb->dev;
64 struct wusb_dev *wusb_dev = usb_dev->wusb_dev;
65 struct usb_wireless_ep_comp_descriptor *epcd;
66 bool is_out;
67 uint8_t phy_rate;
68
69 is_out = usb_pipeout(urb->pipe);
70
71 qset->max_packet = le16_to_cpu(urb->ep->desc.wMaxPacketSize);
72
73 epcd = (struct usb_wireless_ep_comp_descriptor *)qset->ep->extra;
74 if (epcd) {
75 qset->max_seq = epcd->bMaxSequence;
76 qset->max_burst = epcd->bMaxBurst;
77 } else {
78 qset->max_seq = 2;
79 qset->max_burst = 1;
80 }
81
82 /*
83 * Initial PHY rate is 53.3 Mbit/s for control endpoints or
84 * the maximum supported by the device for other endpoints
85 * (unless limited by the user).
86 */
87 if (usb_pipecontrol(urb->pipe))
88 phy_rate = UWB_PHY_RATE_53;
89 else {
90 uint16_t phy_rates;
91
92 phy_rates = le16_to_cpu(wusb_dev->wusb_cap_descr->wPHYRates);
93 phy_rate = fls(phy_rates) - 1;
94 if (phy_rate > whc->wusbhc.phy_rate)
95 phy_rate = whc->wusbhc.phy_rate;
96 }
97
98 qset->qh.info1 = cpu_to_le32(
99 QH_INFO1_EP(usb_pipeendpoint(urb->pipe))
100 | (is_out ? QH_INFO1_DIR_OUT : QH_INFO1_DIR_IN)
101 | usb_pipe_to_qh_type(urb->pipe)
102 | QH_INFO1_DEV_INFO_IDX(wusb_port_no_to_idx(usb_dev->portnum))
103 | QH_INFO1_MAX_PKT_LEN(qset->max_packet)
104 );
105 qset->qh.info2 = cpu_to_le32(
106 QH_INFO2_BURST(qset->max_burst)
107 | QH_INFO2_DBP(0)
108 | QH_INFO2_MAX_COUNT(3)
109 | QH_INFO2_MAX_RETRY(3)
110 | QH_INFO2_MAX_SEQ(qset->max_seq - 1)
111 );
112 /* FIXME: where can we obtain these Tx parameters from? Why
113 * doesn't the chip know what Tx power to use? It knows the Rx
114 * strength and can presumably guess the Tx power required
115 * from that? */
116 qset->qh.info3 = cpu_to_le32(
117 QH_INFO3_TX_RATE(phy_rate)
118 | QH_INFO3_TX_PWR(0) /* 0 == max power */
119 );
120
121 qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1);
122 }
123
124 /**
125 * qset_clear - clear fields in a qset so it may be reinserted into a
126 * schedule.
127 *
128 * The sequence number and current window are not cleared (see
129 * qset_reset()).
130 */
131 void qset_clear(struct whc *whc, struct whc_qset *qset)
132 {
133 qset->td_start = qset->td_end = qset->ntds = 0;
134
135 qset->qh.link = cpu_to_le64(QH_LINK_NTDS(8) | QH_LINK_T);
136 qset->qh.status = qset->qh.status & QH_STATUS_SEQ_MASK;
137 qset->qh.err_count = 0;
138 qset->qh.scratch[0] = 0;
139 qset->qh.scratch[1] = 0;
140 qset->qh.scratch[2] = 0;
141
142 memset(&qset->qh.overlay, 0, sizeof(qset->qh.overlay));
143
144 init_completion(&qset->remove_complete);
145 }
146
147 /**
148 * qset_reset - reset endpoint state in a qset.
149 *
150 * Clears the sequence number and current window. This qset must not
151 * be in the ASL or PZL.
152 */
153 void qset_reset(struct whc *whc, struct whc_qset *qset)
154 {
155 qset->reset = 0;
156
157 qset->qh.status &= ~QH_STATUS_SEQ_MASK;
158 qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1);
159 }
160
161 /**
162 * get_qset - get the qset for an async endpoint
163 *
164 * A new qset is created if one does not already exist.
165 */
166 struct whc_qset *get_qset(struct whc *whc, struct urb *urb,
167 gfp_t mem_flags)
168 {
169 struct whc_qset *qset;
170
171 qset = urb->ep->hcpriv;
172 if (qset == NULL) {
173 qset = qset_alloc(whc, mem_flags);
174 if (qset == NULL)
175 return NULL;
176
177 qset->ep = urb->ep;
178 urb->ep->hcpriv = qset;
179 qset_fill_qh(whc, qset, urb);
180 }
181 return qset;
182 }
183
184 void qset_remove_complete(struct whc *whc, struct whc_qset *qset)
185 {
186 qset->remove = 0;
187 list_del_init(&qset->list_node);
188 complete(&qset->remove_complete);
189 }
190
191 /**
192 * qset_add_qtds - add qTDs for an URB to a qset
193 *
194 * Returns true if the list (ASL/PZL) must be updated because (for a
195 * WHCI 0.95 controller) an activated qTD was pointed to be iCur.
196 */
197 enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset)
198 {
199 struct whc_std *std;
200 enum whc_update update = 0;
201
202 list_for_each_entry(std, &qset->stds, list_node) {
203 struct whc_qtd *qtd;
204 uint32_t status;
205
206 if (qset->ntds >= WHCI_QSET_TD_MAX
207 || (qset->pause_after_urb && std->urb != qset->pause_after_urb))
208 break;
209
210 if (std->qtd)
211 continue; /* already has a qTD */
212
213 qtd = std->qtd = &qset->qtd[qset->td_end];
214
215 /* Fill in setup bytes for control transfers. */
216 if (usb_pipecontrol(std->urb->pipe))
217 memcpy(qtd->setup, std->urb->setup_packet, 8);
218
219 status = QTD_STS_ACTIVE | QTD_STS_LEN(std->len);
220
221 if (whc_std_last(std) && usb_pipeout(std->urb->pipe))
222 status |= QTD_STS_LAST_PKT;
223
224 /*
225 * For an IN transfer the iAlt field should be set so
226 * the h/w will automatically advance to the next
227 * transfer. However, if there are 8 or more TDs
228 * remaining in this transfer then iAlt cannot be set
229 * as it could point to somewhere in this transfer.
230 */
231 if (std->ntds_remaining < WHCI_QSET_TD_MAX) {
232 int ialt;
233 ialt = (qset->td_end + std->ntds_remaining) % WHCI_QSET_TD_MAX;
234 status |= QTD_STS_IALT(ialt);
235 } else if (usb_pipein(std->urb->pipe))
236 qset->pause_after_urb = std->urb;
237
238 if (std->num_pointers)
239 qtd->options = cpu_to_le32(QTD_OPT_IOC);
240 else
241 qtd->options = cpu_to_le32(QTD_OPT_IOC | QTD_OPT_SMALL);
242 qtd->page_list_ptr = cpu_to_le64(std->dma_addr);
243
244 qtd->status = cpu_to_le32(status);
245
246 if (QH_STATUS_TO_ICUR(qset->qh.status) == qset->td_end)
247 update = WHC_UPDATE_UPDATED;
248
249 if (++qset->td_end >= WHCI_QSET_TD_MAX)
250 qset->td_end = 0;
251 qset->ntds++;
252 }
253
254 return update;
255 }
256
257 /**
258 * qset_remove_qtd - remove the first qTD from a qset.
259 *
260 * The qTD might be still active (if it's part of a IN URB that
261 * resulted in a short read) so ensure it's deactivated.
262 */
263 static void qset_remove_qtd(struct whc *whc, struct whc_qset *qset)
264 {
265 qset->qtd[qset->td_start].status = 0;
266
267 if (++qset->td_start >= WHCI_QSET_TD_MAX)
268 qset->td_start = 0;
269 qset->ntds--;
270 }
271
272 static void qset_copy_bounce_to_sg(struct whc *whc, struct whc_std *std)
273 {
274 struct scatterlist *sg;
275 void *bounce;
276 size_t remaining, offset;
277
278 bounce = std->bounce_buf;
279 remaining = std->len;
280
281 sg = std->bounce_sg;
282 offset = std->bounce_offset;
283
284 while (remaining) {
285 size_t len;
286
287 len = min(sg->length - offset, remaining);
288 memcpy(sg_virt(sg) + offset, bounce, len);
289
290 bounce += len;
291 remaining -= len;
292
293 offset += len;
294 if (offset >= sg->length) {
295 sg = sg_next(sg);
296 offset = 0;
297 }
298 }
299
300 }
301
302 /**
303 * qset_free_std - remove an sTD and free it.
304 * @whc: the WHCI host controller
305 * @std: the sTD to remove and free.
306 */
307 void qset_free_std(struct whc *whc, struct whc_std *std)
308 {
309 list_del(&std->list_node);
310 if (std->bounce_buf) {
311 bool is_out = usb_pipeout(std->urb->pipe);
312 dma_addr_t dma_addr;
313
314 if (std->num_pointers)
315 dma_addr = le64_to_cpu(std->pl_virt[0].buf_ptr);
316 else
317 dma_addr = std->dma_addr;
318
319 dma_unmap_single(whc->wusbhc.dev, dma_addr,
320 std->len, is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
321 if (!is_out)
322 qset_copy_bounce_to_sg(whc, std);
323 kfree(std->bounce_buf);
324 }
325 if (std->pl_virt) {
326 if (std->dma_addr)
327 dma_unmap_single(whc->wusbhc.dev, std->dma_addr,
328 std->num_pointers * sizeof(struct whc_page_list_entry),
329 DMA_TO_DEVICE);
330 kfree(std->pl_virt);
331 std->pl_virt = NULL;
332 }
333 kfree(std);
334 }
335
336 /**
337 * qset_remove_qtds - remove an URB's qTDs (and sTDs).
338 */
339 static void qset_remove_qtds(struct whc *whc, struct whc_qset *qset,
340 struct urb *urb)
341 {
342 struct whc_std *std, *t;
343
344 list_for_each_entry_safe(std, t, &qset->stds, list_node) {
345 if (std->urb != urb)
346 break;
347 if (std->qtd != NULL)
348 qset_remove_qtd(whc, qset);
349 qset_free_std(whc, std);
350 }
351 }
352
353 /**
354 * qset_free_stds - free any remaining sTDs for an URB.
355 */
356 static void qset_free_stds(struct whc_qset *qset, struct urb *urb)
357 {
358 struct whc_std *std, *t;
359
360 list_for_each_entry_safe(std, t, &qset->stds, list_node) {
361 if (std->urb == urb)
362 qset_free_std(qset->whc, std);
363 }
364 }
365
366 static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_flags)
367 {
368 dma_addr_t dma_addr = std->dma_addr;
369 dma_addr_t sp, ep;
370 size_t pl_len;
371 int p;
372
373 /* Short buffers don't need a page list. */
374 if (std->len <= WHCI_PAGE_SIZE) {
375 std->num_pointers = 0;
376 return 0;
377 }
378
379 sp = dma_addr & ~(WHCI_PAGE_SIZE-1);
380 ep = dma_addr + std->len;
381 std->num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE);
382
383 pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
384 std->pl_virt = kmalloc(pl_len, mem_flags);
385 if (std->pl_virt == NULL)
386 return -ENOMEM;
387 std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, pl_len, DMA_TO_DEVICE);
388 if (dma_mapping_error(whc->wusbhc.dev, std->dma_addr)) {
389 kfree(std->pl_virt);
390 return -EFAULT;
391 }
392
393 for (p = 0; p < std->num_pointers; p++) {
394 std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
395 dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1);
396 }
397
398 return 0;
399 }
400
401 /**
402 * urb_dequeue_work - executes asl/pzl update and gives back the urb to the system.
403 */
404 static void urb_dequeue_work(struct work_struct *work)
405 {
406 struct whc_urb *wurb = container_of(work, struct whc_urb, dequeue_work);
407 struct whc_qset *qset = wurb->qset;
408 struct whc *whc = qset->whc;
409 unsigned long flags;
410
411 if (wurb->is_async)
412 asl_update(whc, WUSBCMD_ASYNC_UPDATED
413 | WUSBCMD_ASYNC_SYNCED_DB
414 | WUSBCMD_ASYNC_QSET_RM);
415 else
416 pzl_update(whc, WUSBCMD_PERIODIC_UPDATED
417 | WUSBCMD_PERIODIC_SYNCED_DB
418 | WUSBCMD_PERIODIC_QSET_RM);
419
420 spin_lock_irqsave(&whc->lock, flags);
421 qset_remove_urb(whc, qset, wurb->urb, wurb->status);
422 spin_unlock_irqrestore(&whc->lock, flags);
423 }
424
425 static struct whc_std *qset_new_std(struct whc *whc, struct whc_qset *qset,
426 struct urb *urb, gfp_t mem_flags)
427 {
428 struct whc_std *std;
429
430 std = kzalloc(sizeof(struct whc_std), mem_flags);
431 if (std == NULL)
432 return NULL;
433
434 std->urb = urb;
435 std->qtd = NULL;
436
437 INIT_LIST_HEAD(&std->list_node);
438 list_add_tail(&std->list_node, &qset->stds);
439
440 return std;
441 }
442
443 static int qset_add_urb_sg(struct whc *whc, struct whc_qset *qset, struct urb *urb,
444 gfp_t mem_flags)
445 {
446 size_t remaining;
447 struct scatterlist *sg;
448 int i;
449 int ntds = 0;
450 struct whc_std *std = NULL;
451 struct whc_page_list_entry *new_pl_virt;
452 dma_addr_t prev_end = 0;
453 size_t pl_len;
454 int p = 0;
455
456 remaining = urb->transfer_buffer_length;
457
458 for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
459 dma_addr_t dma_addr;
460 size_t dma_remaining;
461 dma_addr_t sp, ep;
462 int num_pointers;
463
464 if (remaining == 0) {
465 break;
466 }
467
468 dma_addr = sg_dma_address(sg);
469 dma_remaining = min_t(size_t, sg_dma_len(sg), remaining);
470
471 while (dma_remaining) {
472 size_t dma_len;
473
474 /*
475 * We can use the previous std (if it exists) provided that:
476 * - the previous one ended on a page boundary.
477 * - the current one begins on a page boundary.
478 * - the previous one isn't full.
479 *
480 * If a new std is needed but the previous one
481 * was not a whole number of packets then this
482 * sg list cannot be mapped onto multiple
483 * qTDs. Return an error and let the caller
484 * sort it out.
485 */
486 if (!std
487 || (prev_end & (WHCI_PAGE_SIZE-1))
488 || (dma_addr & (WHCI_PAGE_SIZE-1))
489 || std->len + WHCI_PAGE_SIZE > QTD_MAX_XFER_SIZE) {
490 if (std && std->len % qset->max_packet != 0)
491 return -EINVAL;
492 std = qset_new_std(whc, qset, urb, mem_flags);
493 if (std == NULL) {
494 return -ENOMEM;
495 }
496 ntds++;
497 p = 0;
498 }
499
500 dma_len = dma_remaining;
501
502 /*
503 * If the remainder of this element doesn't
504 * fit in a single qTD, limit the qTD to a
505 * whole number of packets. This allows the
506 * remainder to go into the next qTD.
507 */
508 if (std->len + dma_len > QTD_MAX_XFER_SIZE) {
509 dma_len = (QTD_MAX_XFER_SIZE / qset->max_packet)
510 * qset->max_packet - std->len;
511 }
512
513 std->len += dma_len;
514 std->ntds_remaining = -1; /* filled in later */
515
516 sp = dma_addr & ~(WHCI_PAGE_SIZE-1);
517 ep = dma_addr + dma_len;
518 num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE);
519 std->num_pointers += num_pointers;
520
521 pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
522
523 new_pl_virt = krealloc(std->pl_virt, pl_len, mem_flags);
524 if (new_pl_virt == NULL) {
525 kfree(std->pl_virt);
526 std->pl_virt = NULL;
527 return -ENOMEM;
528 }
529 std->pl_virt = new_pl_virt;
530
531 for (;p < std->num_pointers; p++) {
532 std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
533 dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1);
534 }
535
536 prev_end = dma_addr = ep;
537 dma_remaining -= dma_len;
538 remaining -= dma_len;
539 }
540 }
541
542 /* Now the number of stds is know, go back and fill in
543 std->ntds_remaining. */
544 list_for_each_entry(std, &qset->stds, list_node) {
545 if (std->ntds_remaining == -1) {
546 pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
547 std->ntds_remaining = ntds--;
548 std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt,
549 pl_len, DMA_TO_DEVICE);
550 }
551 }
552 return 0;
553 }
554
555 /**
556 * qset_add_urb_sg_linearize - add an urb with sg list, copying the data
557 *
558 * If the URB contains an sg list whose elements cannot be directly
559 * mapped to qTDs then the data must be transferred via bounce
560 * buffers.
561 */
562 static int qset_add_urb_sg_linearize(struct whc *whc, struct whc_qset *qset,
563 struct urb *urb, gfp_t mem_flags)
564 {
565 bool is_out = usb_pipeout(urb->pipe);
566 size_t max_std_len;
567 size_t remaining;
568 int ntds = 0;
569 struct whc_std *std = NULL;
570 void *bounce = NULL;
571 struct scatterlist *sg;
572 int i;
573
574 /* limit maximum bounce buffer to 16 * 3.5 KiB ~= 28 k */
575 max_std_len = qset->max_burst * qset->max_packet;
576
577 remaining = urb->transfer_buffer_length;
578
579 for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
580 size_t len;
581 size_t sg_remaining;
582 void *orig;
583
584 if (remaining == 0) {
585 break;
586 }
587
588 sg_remaining = min_t(size_t, remaining, sg->length);
589 orig = sg_virt(sg);
590
591 while (sg_remaining) {
592 if (!std || std->len == max_std_len) {
593 std = qset_new_std(whc, qset, urb, mem_flags);
594 if (std == NULL)
595 return -ENOMEM;
596 std->bounce_buf = kmalloc(max_std_len, mem_flags);
597 if (std->bounce_buf == NULL)
598 return -ENOMEM;
599 std->bounce_sg = sg;
600 std->bounce_offset = orig - sg_virt(sg);
601 bounce = std->bounce_buf;
602 ntds++;
603 }
604
605 len = min(sg_remaining, max_std_len - std->len);
606
607 if (is_out)
608 memcpy(bounce, orig, len);
609
610 std->len += len;
611 std->ntds_remaining = -1; /* filled in later */
612
613 bounce += len;
614 orig += len;
615 sg_remaining -= len;
616 remaining -= len;
617 }
618 }
619
620 /*
621 * For each of the new sTDs, map the bounce buffers, create
622 * page lists (if necessary), and fill in std->ntds_remaining.
623 */
624 list_for_each_entry(std, &qset->stds, list_node) {
625 if (std->ntds_remaining != -1)
626 continue;
627
628 std->dma_addr = dma_map_single(&whc->umc->dev, std->bounce_buf, std->len,
629 is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
630
631 if (qset_fill_page_list(whc, std, mem_flags) < 0)
632 return -ENOMEM;
633
634 std->ntds_remaining = ntds--;
635 }
636
637 return 0;
638 }
639
640 /**
641 * qset_add_urb - add an urb to the qset's queue.
642 *
643 * The URB is chopped into sTDs, one for each qTD that will required.
644 * At least one qTD (and sTD) is required even if the transfer has no
645 * data (e.g., for some control transfers).
646 */
647 int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb,
648 gfp_t mem_flags)
649 {
650 struct whc_urb *wurb;
651 int remaining = urb->transfer_buffer_length;
652 u64 transfer_dma = urb->transfer_dma;
653 int ntds_remaining;
654 int ret;
655
656 wurb = kzalloc(sizeof(struct whc_urb), mem_flags);
657 if (wurb == NULL)
658 goto err_no_mem;
659 urb->hcpriv = wurb;
660 wurb->qset = qset;
661 wurb->urb = urb;
662 INIT_WORK(&wurb->dequeue_work, urb_dequeue_work);
663
664 if (urb->num_sgs) {
665 ret = qset_add_urb_sg(whc, qset, urb, mem_flags);
666 if (ret == -EINVAL) {
667 qset_free_stds(qset, urb);
668 ret = qset_add_urb_sg_linearize(whc, qset, urb, mem_flags);
669 }
670 if (ret < 0)
671 goto err_no_mem;
672 return 0;
673 }
674
675 ntds_remaining = DIV_ROUND_UP(remaining, QTD_MAX_XFER_SIZE);
676 if (ntds_remaining == 0)
677 ntds_remaining = 1;
678
679 while (ntds_remaining) {
680 struct whc_std *std;
681 size_t std_len;
682
683 std_len = remaining;
684 if (std_len > QTD_MAX_XFER_SIZE)
685 std_len = QTD_MAX_XFER_SIZE;
686
687 std = qset_new_std(whc, qset, urb, mem_flags);
688 if (std == NULL)
689 goto err_no_mem;
690
691 std->dma_addr = transfer_dma;
692 std->len = std_len;
693 std->ntds_remaining = ntds_remaining;
694
695 if (qset_fill_page_list(whc, std, mem_flags) < 0)
696 goto err_no_mem;
697
698 ntds_remaining--;
699 remaining -= std_len;
700 transfer_dma += std_len;
701 }
702
703 return 0;
704
705 err_no_mem:
706 qset_free_stds(qset, urb);
707 return -ENOMEM;
708 }
709
710 /**
711 * qset_remove_urb - remove an URB from the urb queue.
712 *
713 * The URB is returned to the USB subsystem.
714 */
715 void qset_remove_urb(struct whc *whc, struct whc_qset *qset,
716 struct urb *urb, int status)
717 {
718 struct wusbhc *wusbhc = &whc->wusbhc;
719 struct whc_urb *wurb = urb->hcpriv;
720
721 usb_hcd_unlink_urb_from_ep(&wusbhc->usb_hcd, urb);
722 /* Drop the lock as urb->complete() may enqueue another urb. */
723 spin_unlock(&whc->lock);
724 wusbhc_giveback_urb(wusbhc, urb, status);
725 spin_lock(&whc->lock);
726
727 kfree(wurb);
728 }
729
730 /**
731 * get_urb_status_from_qtd - get the completed urb status from qTD status
732 * @urb: completed urb
733 * @status: qTD status
734 */
735 static int get_urb_status_from_qtd(struct urb *urb, u32 status)
736 {
737 if (status & QTD_STS_HALTED) {
738 if (status & QTD_STS_DBE)
739 return usb_pipein(urb->pipe) ? -ENOSR : -ECOMM;
740 else if (status & QTD_STS_BABBLE)
741 return -EOVERFLOW;
742 else if (status & QTD_STS_RCE)
743 return -ETIME;
744 return -EPIPE;
745 }
746 if (usb_pipein(urb->pipe)
747 && (urb->transfer_flags & URB_SHORT_NOT_OK)
748 && urb->actual_length < urb->transfer_buffer_length)
749 return -EREMOTEIO;
750 return 0;
751 }
752
753 /**
754 * process_inactive_qtd - process an inactive (but not halted) qTD.
755 *
756 * Update the urb with the transfer bytes from the qTD, if the urb is
757 * completely transferred or (in the case of an IN only) the LPF is
758 * set, then the transfer is complete and the urb should be returned
759 * to the system.
760 */
761 void process_inactive_qtd(struct whc *whc, struct whc_qset *qset,
762 struct whc_qtd *qtd)
763 {
764 struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node);
765 struct urb *urb = std->urb;
766 uint32_t status;
767 bool complete;
768
769 status = le32_to_cpu(qtd->status);
770
771 urb->actual_length += std->len - QTD_STS_TO_LEN(status);
772
773 if (usb_pipein(urb->pipe) && (status & QTD_STS_LAST_PKT))
774 complete = true;
775 else
776 complete = whc_std_last(std);
777
778 qset_remove_qtd(whc, qset);
779 qset_free_std(whc, std);
780
781 /*
782 * Transfers for this URB are complete? Then return it to the
783 * USB subsystem.
784 */
785 if (complete) {
786 qset_remove_qtds(whc, qset, urb);
787 qset_remove_urb(whc, qset, urb, get_urb_status_from_qtd(urb, status));
788
789 /*
790 * If iAlt isn't valid then the hardware didn't
791 * advance iCur. Adjust the start and end pointers to
792 * match iCur.
793 */
794 if (!(status & QTD_STS_IALT_VALID))
795 qset->td_start = qset->td_end
796 = QH_STATUS_TO_ICUR(le16_to_cpu(qset->qh.status));
797 qset->pause_after_urb = NULL;
798 }
799 }
800
801 /**
802 * process_halted_qtd - process a qset with a halted qtd
803 *
804 * Remove all the qTDs for the failed URB and return the failed URB to
805 * the USB subsystem. Then remove all other qTDs so the qset can be
806 * removed.
807 *
808 * FIXME: this is the point where rate adaptation can be done. If a
809 * transfer failed because it exceeded the maximum number of retries
810 * then it could be reactivated with a slower rate without having to
811 * remove the qset.
812 */
813 void process_halted_qtd(struct whc *whc, struct whc_qset *qset,
814 struct whc_qtd *qtd)
815 {
816 struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node);
817 struct urb *urb = std->urb;
818 int urb_status;
819
820 urb_status = get_urb_status_from_qtd(urb, le32_to_cpu(qtd->status));
821
822 qset_remove_qtds(whc, qset, urb);
823 qset_remove_urb(whc, qset, urb, urb_status);
824
825 list_for_each_entry(std, &qset->stds, list_node) {
826 if (qset->ntds == 0)
827 break;
828 qset_remove_qtd(whc, qset);
829 std->qtd = NULL;
830 }
831
832 qset->remove = 1;
833 }
834
835 void qset_free(struct whc *whc, struct whc_qset *qset)
836 {
837 dma_pool_free(whc->qset_pool, qset, qset->qset_dma);
838 }
839
840 /**
841 * qset_delete - wait for a qset to be unused, then free it.
842 */
843 void qset_delete(struct whc *whc, struct whc_qset *qset)
844 {
845 wait_for_completion(&qset->remove_complete);
846 qset_free(whc, qset);
847 }
848
849 #line 9 "/home/cluser/ldv/ref_launch/work/current--X--drivers--X--defaultlinux-4.5-rc1.tar.xz--X--331_1a--X--cpachecker/linux-4.5-rc1.tar.xz/csd_deg_dscv/2256/dscv_tempdir/dscv/ri/331_1a/drivers/usb/host/whci/qset.o.c.prepared" 1
2 #include <linux/types.h>
3 #include <linux/dma-direction.h>
4 #include <verifier/rcv.h>
5 #include <verifier/set.h>
6 #include <verifier/map.h>
7
8 Set LDV_DMA_MAP_CALLS;
9
10 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_dma_map_page') maps page */
11 dma_addr_t ldv_dma_map_page(struct device *dev, struct page *page, size_t offset, size_t size, enum dma_data_direction dir) {
12 dma_addr_t nonedetermined;
13
14 nonedetermined = ldv_undef_ptr();
15
16 /* LDV_COMMENT_ASSERT Check that previos dma_mapping call was checked*/
17 ldv_assert(ldv_set_is_empty(LDV_DMA_MAP_CALLS));
18
19 ldv_set_add(LDV_DMA_MAP_CALLS, nonedetermined);
20
21 return nonedetermined;
22 }
23
24 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_dma_mapping_error') unmaps page */
25 int ldv_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) {
26
27 /* LDV_COMMENT_ASSERT No dma_mapping calls to verify */
28 ldv_assert(ldv_set_contains(LDV_DMA_MAP_CALLS, dma_addr));
29 ldv_set_remove(LDV_DMA_MAP_CALLS, dma_addr);
30
31 int nonedetermined;
32
33 nonedetermined = ldv_undef_int();
34
35 return nonedetermined;
36 }
37
38
39
40 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_dma_map_single') maps pci_dma */
41 dma_addr_t ldv_dma_map_single(struct device *dev, void *cpu_addr, size_t size, enum dma_data_direction dir) {
42 dma_addr_t nonedetermined;
43
44 nonedetermined = ldv_undef_ptr();
45
46 /* LDV_COMMENT_ASSERT Check that previos dma_mapping call was checked*/
47 ldv_assert(ldv_set_is_empty(LDV_DMA_MAP_CALLS));
48
49 ldv_set_add(LDV_DMA_MAP_CALLS, nonedetermined);
50
51 return nonedetermined;
52 }
53
54 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_dma_map_single_attrs') maps pci_dma */
55 dma_addr_t ldv_dma_map_single_attrs(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) {
56 dma_addr_t nonedetermined;
57
58 nonedetermined = ldv_undef_ptr();
59
60 /* LDV_COMMENT_ASSERT Check that previos dma_mapping call was checked*/
61 ldv_assert(ldv_set_is_empty(LDV_DMA_MAP_CALLS));
62
63 ldv_set_add(LDV_DMA_MAP_CALLS, nonedetermined);
64
65 return nonedetermined;
66 }
67
68 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_initialize') Initialize all module reference counters at the beginning */
69 void ldv_initialize(void) {
70 /* LDV_COMMENT_CHANGE_STATE All module reference counters have some initial value at the beginning */
71 ldv_set_init(LDV_DMA_MAP_CALLS);
72 }
73
74 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_check_final_state') Check that all module reference counters have their initial values at the end */
75 void ldv_check_final_state(void) {
76 /* LDV_COMMENT_ASSERT All incremented module reference counters should be decremented before module unloading*/
77 ldv_assert(ldv_set_is_empty(LDV_DMA_MAP_CALLS));
78 } 1 #ifndef _LDV_RCV_H_
2 #define _LDV_RCV_H_
3
4 /* If expr evaluates to zero, ldv_assert() causes a program to reach the error
5 label like the standard assert(). */
6 #define ldv_assert(expr) ((expr) ? 0 : ldv_error())
7
8 /* The error label wrapper. It is used because of some static verifiers (like
9 BLAST) don't accept multiple error labels through a program. */
10 static inline void ldv_error(void)
11 {
12 LDV_ERROR: goto LDV_ERROR;
13 }
14
15 /* If expr evaluates to zero, ldv_assume() causes an infinite loop that is
16 avoided by verifiers. */
17 #define ldv_assume(expr) ((expr) ? 0 : ldv_stop())
18
19 /* Infinite loop, that causes verifiers to skip such paths. */
20 static inline void ldv_stop(void) {
21 LDV_STOP: goto LDV_STOP;
22 }
23
24 /* Special nondeterministic functions. */
25 int ldv_undef_int(void);
26 void *ldv_undef_ptr(void);
27 unsigned long ldv_undef_ulong(void);
28 long ldv_undef_long(void);
29 /* Return nondeterministic negative integer number. */
30 static inline int ldv_undef_int_negative(void)
31 {
32 int ret = ldv_undef_int();
33
34 ldv_assume(ret < 0);
35
36 return ret;
37 }
38 /* Return nondeterministic nonpositive integer number. */
39 static inline int ldv_undef_int_nonpositive(void)
40 {
41 int ret = ldv_undef_int();
42
43 ldv_assume(ret <= 0);
44
45 return ret;
46 }
47
48 /* Add explicit model for __builin_expect GCC function. Without the model a
49 return value will be treated as nondetermined by verifiers. */
50 static inline long __builtin_expect(long exp, long c)
51 {
52 return exp;
53 }
54
55 /* This function causes the program to exit abnormally. GCC implements this
56 function by using a target-dependent mechanism (such as intentionally executing
57 an illegal instruction) or by calling abort. The mechanism used may vary from
58 release to release so you should not rely on any particular implementation.
59 http://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html */
60 static inline void __builtin_trap(void)
61 {
62 ldv_assert(0);
63 }
64
65 /* The constant is for simulating an error of ldv_undef_ptr() function. */
66 #define LDV_PTR_MAX 2012
67
68 #endif /* _LDV_RCV_H_ */ 1 #ifndef __LINUX_COMPILER_H
2 #define __LINUX_COMPILER_H
3
4 #ifndef __ASSEMBLY__
5
6 #ifdef __CHECKER__
7 # define __user __attribute__((noderef, address_space(1)))
8 # define __kernel __attribute__((address_space(0)))
9 # define __safe __attribute__((safe))
10 # define __force __attribute__((force))
11 # define __nocast __attribute__((nocast))
12 # define __iomem __attribute__((noderef, address_space(2)))
13 # define __must_hold(x) __attribute__((context(x,1,1)))
14 # define __acquires(x) __attribute__((context(x,0,1)))
15 # define __releases(x) __attribute__((context(x,1,0)))
16 # define __acquire(x) __context__(x,1)
17 # define __release(x) __context__(x,-1)
18 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
19 # define __percpu __attribute__((noderef, address_space(3)))
20 # define __pmem __attribute__((noderef, address_space(5)))
21 #ifdef CONFIG_SPARSE_RCU_POINTER
22 # define __rcu __attribute__((noderef, address_space(4)))
23 #else
24 # define __rcu
25 #endif
26 extern void __chk_user_ptr(const volatile void __user *);
27 extern void __chk_io_ptr(const volatile void __iomem *);
28 #else
29 # define __user
30 # define __kernel
31 # define __safe
32 # define __force
33 # define __nocast
34 # define __iomem
35 # define __chk_user_ptr(x) (void)0
36 # define __chk_io_ptr(x) (void)0
37 # define __builtin_warning(x, y...) (1)
38 # define __must_hold(x)
39 # define __acquires(x)
40 # define __releases(x)
41 # define __acquire(x) (void)0
42 # define __release(x) (void)0
43 # define __cond_lock(x,c) (c)
44 # define __percpu
45 # define __rcu
46 # define __pmem
47 #endif
48
49 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
50 #define ___PASTE(a,b) a##b
51 #define __PASTE(a,b) ___PASTE(a,b)
52
53 #ifdef __KERNEL__
54
55 #ifdef __GNUC__
56 #include <linux/compiler-gcc.h>
57 #endif
58
59 #if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__)
60 #define notrace __attribute__((hotpatch(0,0)))
61 #else
62 #define notrace __attribute__((no_instrument_function))
63 #endif
64
65 /* Intel compiler defines __GNUC__. So we will overwrite implementations
66 * coming from above header files here
67 */
68 #ifdef __INTEL_COMPILER
69 # include <linux/compiler-intel.h>
70 #endif
71
72 /* Clang compiler defines __GNUC__. So we will overwrite implementations
73 * coming from above header files here
74 */
75 #ifdef __clang__
76 #include <linux/compiler-clang.h>
77 #endif
78
79 /*
80 * Generic compiler-dependent macros required for kernel
81 * build go below this comment. Actual compiler/compiler version
82 * specific implementations come from the above header files
83 */
84
85 struct ftrace_branch_data {
86 const char *func;
87 const char *file;
88 unsigned line;
89 union {
90 struct {
91 unsigned long correct;
92 unsigned long incorrect;
93 };
94 struct {
95 unsigned long miss;
96 unsigned long hit;
97 };
98 unsigned long miss_hit[2];
99 };
100 };
101
102 /*
103 * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
104 * to disable branch tracing on a per file basis.
105 */
106 #if defined(CONFIG_TRACE_BRANCH_PROFILING) \
107 && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
108 void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
109
110 #define likely_notrace(x) __builtin_expect(!!(x), 1)
111 #define unlikely_notrace(x) __builtin_expect(!!(x), 0)
112
113 #define __branch_check__(x, expect) ({ \
114 int ______r; \
115 static struct ftrace_branch_data \
116 __attribute__((__aligned__(4))) \
117 __attribute__((section("_ftrace_annotated_branch"))) \
118 ______f = { \
119 .func = __func__, \
120 .file = __FILE__, \
121 .line = __LINE__, \
122 }; \
123 ______r = likely_notrace(x); \
124 ftrace_likely_update(&______f, ______r, expect); \
125 ______r; \
126 })
127
128 /*
129 * Using __builtin_constant_p(x) to ignore cases where the return
130 * value is always the same. This idea is taken from a similar patch
131 * written by Daniel Walker.
132 */
133 # ifndef likely
134 # define likely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 1))
135 # endif
136 # ifndef unlikely
137 # define unlikely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 0))
138 # endif
139
140 #ifdef CONFIG_PROFILE_ALL_BRANCHES
141 /*
142 * "Define 'is'", Bill Clinton
143 * "Define 'if'", Steven Rostedt
144 */
145 #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
146 #define __trace_if(cond) \
147 if (__builtin_constant_p((cond)) ? !!(cond) : \
148 ({ \
149 int ______r; \
150 static struct ftrace_branch_data \
151 __attribute__((__aligned__(4))) \
152 __attribute__((section("_ftrace_branch"))) \
153 ______f = { \
154 .func = __func__, \
155 .file = __FILE__, \
156 .line = __LINE__, \
157 }; \
158 ______r = !!(cond); \
159 ______f.miss_hit[______r]++; \
160 ______r; \
161 }))
162 #endif /* CONFIG_PROFILE_ALL_BRANCHES */
163
164 #else
165 # define likely(x) __builtin_expect(!!(x), 1)
166 # define unlikely(x) __builtin_expect(!!(x), 0)
167 #endif
168
169 /* Optimization barrier */
170 #ifndef barrier
171 # define barrier() __memory_barrier()
172 #endif
173
174 #ifndef barrier_data
175 # define barrier_data(ptr) barrier()
176 #endif
177
178 /* Unreachable code */
179 #ifndef unreachable
180 # define unreachable() do { } while (1)
181 #endif
182
183 #ifndef RELOC_HIDE
184 # define RELOC_HIDE(ptr, off) \
185 ({ unsigned long __ptr; \
186 __ptr = (unsigned long) (ptr); \
187 (typeof(ptr)) (__ptr + (off)); })
188 #endif
189
190 #ifndef OPTIMIZER_HIDE_VAR
191 #define OPTIMIZER_HIDE_VAR(var) barrier()
192 #endif
193
194 /* Not-quite-unique ID. */
195 #ifndef __UNIQUE_ID
196 # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
197 #endif
198
199 #include <uapi/linux/types.h>
200
201 #define __READ_ONCE_SIZE \
202 ({ \
203 switch (size) { \
204 case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \
205 case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \
206 case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \
207 case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \
208 default: \
209 barrier(); \
210 __builtin_memcpy((void *)res, (const void *)p, size); \
211 barrier(); \
212 } \
213 })
214
215 static __always_inline
216 void __read_once_size(const volatile void *p, void *res, int size)
217 {
218 __READ_ONCE_SIZE;
219 }
220
221 #ifdef CONFIG_KASAN
222 /*
223 * This function is not 'inline' because __no_sanitize_address confilcts
224 * with inlining. Attempt to inline it may cause a build failure.
225 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
226 * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
227 */
228 static __no_sanitize_address __maybe_unused
229 void __read_once_size_nocheck(const volatile void *p, void *res, int size)
230 {
231 __READ_ONCE_SIZE;
232 }
233 #else
234 static __always_inline
235 void __read_once_size_nocheck(const volatile void *p, void *res, int size)
236 {
237 __READ_ONCE_SIZE;
238 }
239 #endif
240
241 static __always_inline void __write_once_size(volatile void *p, void *res, int size)
242 {
243 switch (size) {
244 case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
245 case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
246 case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
247 case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
248 default:
249 barrier();
250 __builtin_memcpy((void *)p, (const void *)res, size);
251 barrier();
252 }
253 }
254
255 /*
256 * Prevent the compiler from merging or refetching reads or writes. The
257 * compiler is also forbidden from reordering successive instances of
258 * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
259 * compiler is aware of some particular ordering. One way to make the
260 * compiler aware of ordering is to put the two invocations of READ_ONCE,
261 * WRITE_ONCE or ACCESS_ONCE() in different C statements.
262 *
263 * In contrast to ACCESS_ONCE these two macros will also work on aggregate
264 * data types like structs or unions. If the size of the accessed data
265 * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
266 * READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a
267 * compile-time warning.
268 *
269 * Their two major use cases are: (1) Mediating communication between
270 * process-level code and irq/NMI handlers, all running on the same CPU,
271 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
272 * mutilate accesses that either do not require ordering or that interact
273 * with an explicit memory barrier or atomic instruction that provides the
274 * required ordering.
275 */
276
277 #define __READ_ONCE(x, check) \
278 ({ \
279 union { typeof(x) __val; char __c[1]; } __u; \
280 if (check) \
281 __read_once_size(&(x), __u.__c, sizeof(x)); \
282 else \
283 __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \
284 __u.__val; \
285 })
286 #define READ_ONCE(x) __READ_ONCE(x, 1)
287
288 /*
289 * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
290 * to hide memory access from KASAN.
291 */
292 #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
293
294 #define WRITE_ONCE(x, val) \
295 ({ \
296 union { typeof(x) __val; char __c[1]; } __u = \
297 { .__val = (__force typeof(x)) (val) }; \
298 __write_once_size(&(x), __u.__c, sizeof(x)); \
299 __u.__val; \
300 })
301
302 /**
303 * smp_cond_acquire() - Spin wait for cond with ACQUIRE ordering
304 * @cond: boolean expression to wait for
305 *
306 * Equivalent to using smp_load_acquire() on the condition variable but employs
307 * the control dependency of the wait to reduce the barrier on many platforms.
308 *
309 * The control dependency provides a LOAD->STORE order, the additional RMB
310 * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order,
311 * aka. ACQUIRE.
312 */
313 #define smp_cond_acquire(cond) do { \
314 while (!(cond)) \
315 cpu_relax(); \
316 smp_rmb(); /* ctrl + rmb := acquire */ \
317 } while (0)
318
319 #endif /* __KERNEL__ */
320
321 #endif /* __ASSEMBLY__ */
322
323 #ifdef __KERNEL__
324 /*
325 * Allow us to mark functions as 'deprecated' and have gcc emit a nice
326 * warning for each use, in hopes of speeding the functions removal.
327 * Usage is:
328 * int __deprecated foo(void)
329 */
330 #ifndef __deprecated
331 # define __deprecated /* unimplemented */
332 #endif
333
334 #ifdef MODULE
335 #define __deprecated_for_modules __deprecated
336 #else
337 #define __deprecated_for_modules
338 #endif
339
340 #ifndef __must_check
341 #define __must_check
342 #endif
343
344 #ifndef CONFIG_ENABLE_MUST_CHECK
345 #undef __must_check
346 #define __must_check
347 #endif
348 #ifndef CONFIG_ENABLE_WARN_DEPRECATED
349 #undef __deprecated
350 #undef __deprecated_for_modules
351 #define __deprecated
352 #define __deprecated_for_modules
353 #endif
354
355 /*
356 * Allow us to avoid 'defined but not used' warnings on functions and data,
357 * as well as force them to be emitted to the assembly file.
358 *
359 * As of gcc 3.4, static functions that are not marked with attribute((used))
360 * may be elided from the assembly file. As of gcc 3.4, static data not so
361 * marked will not be elided, but this may change in a future gcc version.
362 *
363 * NOTE: Because distributions shipped with a backported unit-at-a-time
364 * compiler in gcc 3.3, we must define __used to be __attribute__((used))
365 * for gcc >=3.3 instead of 3.4.
366 *
367 * In prior versions of gcc, such functions and data would be emitted, but
368 * would be warned about except with attribute((unused)).
369 *
370 * Mark functions that are referenced only in inline assembly as __used so
371 * the code is emitted even though it appears to be unreferenced.
372 */
373 #ifndef __used
374 # define __used /* unimplemented */
375 #endif
376
377 #ifndef __maybe_unused
378 # define __maybe_unused /* unimplemented */
379 #endif
380
381 #ifndef __always_unused
382 # define __always_unused /* unimplemented */
383 #endif
384
385 #ifndef noinline
386 #define noinline
387 #endif
388
389 /*
390 * Rather then using noinline to prevent stack consumption, use
391 * noinline_for_stack instead. For documentation reasons.
392 */
393 #define noinline_for_stack noinline
394
395 #ifndef __always_inline
396 #define __always_inline inline
397 #endif
398
399 #endif /* __KERNEL__ */
400
401 /*
402 * From the GCC manual:
403 *
404 * Many functions do not examine any values except their arguments,
405 * and have no effects except the return value. Basically this is
406 * just slightly more strict class than the `pure' attribute above,
407 * since function is not allowed to read global memory.
408 *
409 * Note that a function that has pointer arguments and examines the
410 * data pointed to must _not_ be declared `const'. Likewise, a
411 * function that calls a non-`const' function usually must not be
412 * `const'. It does not make sense for a `const' function to return
413 * `void'.
414 */
415 #ifndef __attribute_const__
416 # define __attribute_const__ /* unimplemented */
417 #endif
418
419 /*
420 * Tell gcc if a function is cold. The compiler will assume any path
421 * directly leading to the call is unlikely.
422 */
423
424 #ifndef __cold
425 #define __cold
426 #endif
427
428 /* Simple shorthand for a section definition */
429 #ifndef __section
430 # define __section(S) __attribute__ ((__section__(#S)))
431 #endif
432
433 #ifndef __visible
434 #define __visible
435 #endif
436
437 /*
438 * Assume alignment of return value.
439 */
440 #ifndef __assume_aligned
441 #define __assume_aligned(a, ...)
442 #endif
443
444
445 /* Are two types/vars the same type (ignoring qualifiers)? */
446 #ifndef __same_type
447 # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
448 #endif
449
450 /* Is this type a native word size -- useful for atomic operations */
451 #ifndef __native_word
452 # define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
453 #endif
454
455 /* Compile time object size, -1 for unknown */
456 #ifndef __compiletime_object_size
457 # define __compiletime_object_size(obj) -1
458 #endif
459 #ifndef __compiletime_warning
460 # define __compiletime_warning(message)
461 #endif
462 #ifndef __compiletime_error
463 # define __compiletime_error(message)
464 /*
465 * Sparse complains of variable sized arrays due to the temporary variable in
466 * __compiletime_assert. Unfortunately we can't just expand it out to make
467 * sparse see a constant array size without breaking compiletime_assert on old
468 * versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether.
469 */
470 # ifndef __CHECKER__
471 # define __compiletime_error_fallback(condition) \
472 do { } while (0)
473 # endif
474 #endif
475 #ifndef __compiletime_error_fallback
476 # define __compiletime_error_fallback(condition) do { } while (0)
477 #endif
478
479 #define __compiletime_assert(condition, msg, prefix, suffix) \
480 do { \
481 bool __cond = !(condition); \
482 extern void prefix ## suffix(void) __compiletime_error(msg); \
483 if (__cond) \
484 prefix ## suffix(); \
485 __compiletime_error_fallback(__cond); \
486 } while (0)
487
488 #define _compiletime_assert(condition, msg, prefix, suffix) \
489 __compiletime_assert(condition, msg, prefix, suffix)
490
491 /**
492 * compiletime_assert - break build and emit msg if condition is false
493 * @condition: a compile-time constant condition to check
494 * @msg: a message to emit if condition is false
495 *
496 * In tradition of POSIX assert, this macro will break the build if the
497 * supplied condition is *false*, emitting the supplied error message if the
498 * compiler has support to do so.
499 */
500 #define compiletime_assert(condition, msg) \
501 _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
502
503 #define compiletime_assert_atomic_type(t) \
504 compiletime_assert(__native_word(t), \
505 "Need native word sized stores/loads for atomicity.")
506
507 /*
508 * Prevent the compiler from merging or refetching accesses. The compiler
509 * is also forbidden from reordering successive instances of ACCESS_ONCE(),
510 * but only when the compiler is aware of some particular ordering. One way
511 * to make the compiler aware of ordering is to put the two invocations of
512 * ACCESS_ONCE() in different C statements.
513 *
514 * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE
515 * on a union member will work as long as the size of the member matches the
516 * size of the union and the size is smaller than word size.
517 *
518 * The major use cases of ACCESS_ONCE used to be (1) Mediating communication
519 * between process-level code and irq/NMI handlers, all running on the same CPU,
520 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
521 * mutilate accesses that either do not require ordering or that interact
522 * with an explicit memory barrier or atomic instruction that provides the
523 * required ordering.
524 *
525 * If possible use READ_ONCE()/WRITE_ONCE() instead.
526 */
527 #define __ACCESS_ONCE(x) ({ \
528 __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
529 (volatile typeof(x) *)&(x); })
530 #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
531
532 /**
533 * lockless_dereference() - safely load a pointer for later dereference
534 * @p: The pointer to load
535 *
536 * Similar to rcu_dereference(), but for situations where the pointed-to
537 * object's lifetime is managed by something other than RCU. That
538 * "something other" might be reference counting or simple immortality.
539 */
540 #define lockless_dereference(p) \
541 ({ \
542 typeof(p) _________p1 = READ_ONCE(p); \
543 smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
544 (_________p1); \
545 })
546
547 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
548 #ifdef CONFIG_KPROBES
549 # define __kprobes __attribute__((__section__(".kprobes.text")))
550 # define nokprobe_inline __always_inline
551 #else
552 # define __kprobes
553 # define nokprobe_inline inline
554 #endif
555 #endif /* __LINUX_COMPILER_H */ 1 #ifndef _LINUX_LIST_H
2 #define _LINUX_LIST_H
3
4 #include <linux/types.h>
5 #include <linux/stddef.h>
6 #include <linux/poison.h>
7 #include <linux/const.h>
8 #include <linux/kernel.h>
9
10 /*
11 * Simple doubly linked list implementation.
12 *
13 * Some of the internal functions ("__xxx") are useful when
14 * manipulating whole lists rather than single entries, as
15 * sometimes we already know the next/prev entries and we can
16 * generate better code by using them directly rather than
17 * using the generic single-entry routines.
18 */
19
20 #define LIST_HEAD_INIT(name) { &(name), &(name) }
21
22 #define LIST_HEAD(name) \
23 struct list_head name = LIST_HEAD_INIT(name)
24
25 static inline void INIT_LIST_HEAD(struct list_head *list)
26 {
27 WRITE_ONCE(list->next, list);
28 list->prev = list;
29 }
30
31 /*
32 * Insert a new entry between two known consecutive entries.
33 *
34 * This is only for internal list manipulation where we know
35 * the prev/next entries already!
36 */
37 #ifndef CONFIG_DEBUG_LIST
38 static inline void __list_add(struct list_head *new,
39 struct list_head *prev,
40 struct list_head *next)
41 {
42 next->prev = new;
43 new->next = next;
44 new->prev = prev;
45 WRITE_ONCE(prev->next, new);
46 }
47 #else
48 extern void __list_add(struct list_head *new,
49 struct list_head *prev,
50 struct list_head *next);
51 #endif
52
53 /**
54 * list_add - add a new entry
55 * @new: new entry to be added
56 * @head: list head to add it after
57 *
58 * Insert a new entry after the specified head.
59 * This is good for implementing stacks.
60 */
61 static inline void list_add(struct list_head *new, struct list_head *head)
62 {
63 __list_add(new, head, head->next);
64 }
65
66
67 /**
68 * list_add_tail - add a new entry
69 * @new: new entry to be added
70 * @head: list head to add it before
71 *
72 * Insert a new entry before the specified head.
73 * This is useful for implementing queues.
74 */
75 static inline void list_add_tail(struct list_head *new, struct list_head *head)
76 {
77 __list_add(new, head->prev, head);
78 }
79
80 /*
81 * Delete a list entry by making the prev/next entries
82 * point to each other.
83 *
84 * This is only for internal list manipulation where we know
85 * the prev/next entries already!
86 */
87 static inline void __list_del(struct list_head * prev, struct list_head * next)
88 {
89 next->prev = prev;
90 WRITE_ONCE(prev->next, next);
91 }
92
93 /**
94 * list_del - deletes entry from list.
95 * @entry: the element to delete from the list.
96 * Note: list_empty() on entry does not return true after this, the entry is
97 * in an undefined state.
98 */
99 #ifndef CONFIG_DEBUG_LIST
100 static inline void __list_del_entry(struct list_head *entry)
101 {
102 __list_del(entry->prev, entry->next);
103 }
104
105 static inline void list_del(struct list_head *entry)
106 {
107 __list_del(entry->prev, entry->next);
108 entry->next = LIST_POISON1;
109 entry->prev = LIST_POISON2;
110 }
111 #else
112 extern void __list_del_entry(struct list_head *entry);
113 extern void list_del(struct list_head *entry);
114 #endif
115
116 #ifdef CONFIG_DEBUG_LIST
117 /*
118 * See devm_memremap_pages() which wants DEBUG_LIST=y to assert if one
119 * of the pages it allocates is ever passed to list_add()
120 */
121 extern void list_force_poison(struct list_head *entry);
122 #else
123 /* fallback to the less strict LIST_POISON* definitions */
124 #define list_force_poison list_del
125 #endif
126
127 /**
128 * list_replace - replace old entry by new one
129 * @old : the element to be replaced
130 * @new : the new element to insert
131 *
132 * If @old was empty, it will be overwritten.
133 */
134 static inline void list_replace(struct list_head *old,
135 struct list_head *new)
136 {
137 new->next = old->next;
138 new->next->prev = new;
139 new->prev = old->prev;
140 new->prev->next = new;
141 }
142
143 static inline void list_replace_init(struct list_head *old,
144 struct list_head *new)
145 {
146 list_replace(old, new);
147 INIT_LIST_HEAD(old);
148 }
149
150 /**
151 * list_del_init - deletes entry from list and reinitialize it.
152 * @entry: the element to delete from the list.
153 */
154 static inline void list_del_init(struct list_head *entry)
155 {
156 __list_del_entry(entry);
157 INIT_LIST_HEAD(entry);
158 }
159
160 /**
161 * list_move - delete from one list and add as another's head
162 * @list: the entry to move
163 * @head: the head that will precede our entry
164 */
165 static inline void list_move(struct list_head *list, struct list_head *head)
166 {
167 __list_del_entry(list);
168 list_add(list, head);
169 }
170
171 /**
172 * list_move_tail - delete from one list and add as another's tail
173 * @list: the entry to move
174 * @head: the head that will follow our entry
175 */
176 static inline void list_move_tail(struct list_head *list,
177 struct list_head *head)
178 {
179 __list_del_entry(list);
180 list_add_tail(list, head);
181 }
182
183 /**
184 * list_is_last - tests whether @list is the last entry in list @head
185 * @list: the entry to test
186 * @head: the head of the list
187 */
188 static inline int list_is_last(const struct list_head *list,
189 const struct list_head *head)
190 {
191 return list->next == head;
192 }
193
194 /**
195 * list_empty - tests whether a list is empty
196 * @head: the list to test.
197 */
198 static inline int list_empty(const struct list_head *head)
199 {
200 return READ_ONCE(head->next) == head;
201 }
202
203 /**
204 * list_empty_careful - tests whether a list is empty and not being modified
205 * @head: the list to test
206 *
207 * Description:
208 * tests whether a list is empty _and_ checks that no other CPU might be
209 * in the process of modifying either member (next or prev)
210 *
211 * NOTE: using list_empty_careful() without synchronization
212 * can only be safe if the only activity that can happen
213 * to the list entry is list_del_init(). Eg. it cannot be used
214 * if another CPU could re-list_add() it.
215 */
216 static inline int list_empty_careful(const struct list_head *head)
217 {
218 struct list_head *next = head->next;
219 return (next == head) && (next == head->prev);
220 }
221
222 /**
223 * list_rotate_left - rotate the list to the left
224 * @head: the head of the list
225 */
226 static inline void list_rotate_left(struct list_head *head)
227 {
228 struct list_head *first;
229
230 if (!list_empty(head)) {
231 first = head->next;
232 list_move_tail(first, head);
233 }
234 }
235
236 /**
237 * list_is_singular - tests whether a list has just one entry.
238 * @head: the list to test.
239 */
240 static inline int list_is_singular(const struct list_head *head)
241 {
242 return !list_empty(head) && (head->next == head->prev);
243 }
244
245 static inline void __list_cut_position(struct list_head *list,
246 struct list_head *head, struct list_head *entry)
247 {
248 struct list_head *new_first = entry->next;
249 list->next = head->next;
250 list->next->prev = list;
251 list->prev = entry;
252 entry->next = list;
253 head->next = new_first;
254 new_first->prev = head;
255 }
256
257 /**
258 * list_cut_position - cut a list into two
259 * @list: a new list to add all removed entries
260 * @head: a list with entries
261 * @entry: an entry within head, could be the head itself
262 * and if so we won't cut the list
263 *
264 * This helper moves the initial part of @head, up to and
265 * including @entry, from @head to @list. You should
266 * pass on @entry an element you know is on @head. @list
267 * should be an empty list or a list you do not care about
268 * losing its data.
269 *
270 */
271 static inline void list_cut_position(struct list_head *list,
272 struct list_head *head, struct list_head *entry)
273 {
274 if (list_empty(head))
275 return;
276 if (list_is_singular(head) &&
277 (head->next != entry && head != entry))
278 return;
279 if (entry == head)
280 INIT_LIST_HEAD(list);
281 else
282 __list_cut_position(list, head, entry);
283 }
284
285 static inline void __list_splice(const struct list_head *list,
286 struct list_head *prev,
287 struct list_head *next)
288 {
289 struct list_head *first = list->next;
290 struct list_head *last = list->prev;
291
292 first->prev = prev;
293 prev->next = first;
294
295 last->next = next;
296 next->prev = last;
297 }
298
299 /**
300 * list_splice - join two lists, this is designed for stacks
301 * @list: the new list to add.
302 * @head: the place to add it in the first list.
303 */
304 static inline void list_splice(const struct list_head *list,
305 struct list_head *head)
306 {
307 if (!list_empty(list))
308 __list_splice(list, head, head->next);
309 }
310
311 /**
312 * list_splice_tail - join two lists, each list being a queue
313 * @list: the new list to add.
314 * @head: the place to add it in the first list.
315 */
316 static inline void list_splice_tail(struct list_head *list,
317 struct list_head *head)
318 {
319 if (!list_empty(list))
320 __list_splice(list, head->prev, head);
321 }
322
323 /**
324 * list_splice_init - join two lists and reinitialise the emptied list.
325 * @list: the new list to add.
326 * @head: the place to add it in the first list.
327 *
328 * The list at @list is reinitialised
329 */
330 static inline void list_splice_init(struct list_head *list,
331 struct list_head *head)
332 {
333 if (!list_empty(list)) {
334 __list_splice(list, head, head->next);
335 INIT_LIST_HEAD(list);
336 }
337 }
338
339 /**
340 * list_splice_tail_init - join two lists and reinitialise the emptied list
341 * @list: the new list to add.
342 * @head: the place to add it in the first list.
343 *
344 * Each of the lists is a queue.
345 * The list at @list is reinitialised
346 */
347 static inline void list_splice_tail_init(struct list_head *list,
348 struct list_head *head)
349 {
350 if (!list_empty(list)) {
351 __list_splice(list, head->prev, head);
352 INIT_LIST_HEAD(list);
353 }
354 }
355
356 /**
357 * list_entry - get the struct for this entry
358 * @ptr: the &struct list_head pointer.
359 * @type: the type of the struct this is embedded in.
360 * @member: the name of the list_head within the struct.
361 */
362 #define list_entry(ptr, type, member) \
363 container_of(ptr, type, member)
364
365 /**
366 * list_first_entry - get the first element from a list
367 * @ptr: the list head to take the element from.
368 * @type: the type of the struct this is embedded in.
369 * @member: the name of the list_head within the struct.
370 *
371 * Note, that list is expected to be not empty.
372 */
373 #define list_first_entry(ptr, type, member) \
374 list_entry((ptr)->next, type, member)
375
376 /**
377 * list_last_entry - get the last element from a list
378 * @ptr: the list head to take the element from.
379 * @type: the type of the struct this is embedded in.
380 * @member: the name of the list_head within the struct.
381 *
382 * Note, that list is expected to be not empty.
383 */
384 #define list_last_entry(ptr, type, member) \
385 list_entry((ptr)->prev, type, member)
386
387 /**
388 * list_first_entry_or_null - get the first element from a list
389 * @ptr: the list head to take the element from.
390 * @type: the type of the struct this is embedded in.
391 * @member: the name of the list_head within the struct.
392 *
393 * Note that if the list is empty, it returns NULL.
394 */
395 #define list_first_entry_or_null(ptr, type, member) \
396 (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL)
397
398 /**
399 * list_next_entry - get the next element in list
400 * @pos: the type * to cursor
401 * @member: the name of the list_head within the struct.
402 */
403 #define list_next_entry(pos, member) \
404 list_entry((pos)->member.next, typeof(*(pos)), member)
405
406 /**
407 * list_prev_entry - get the prev element in list
408 * @pos: the type * to cursor
409 * @member: the name of the list_head within the struct.
410 */
411 #define list_prev_entry(pos, member) \
412 list_entry((pos)->member.prev, typeof(*(pos)), member)
413
414 /**
415 * list_for_each - iterate over a list
416 * @pos: the &struct list_head to use as a loop cursor.
417 * @head: the head for your list.
418 */
419 #define list_for_each(pos, head) \
420 for (pos = (head)->next; pos != (head); pos = pos->next)
421
422 /**
423 * list_for_each_prev - iterate over a list backwards
424 * @pos: the &struct list_head to use as a loop cursor.
425 * @head: the head for your list.
426 */
427 #define list_for_each_prev(pos, head) \
428 for (pos = (head)->prev; pos != (head); pos = pos->prev)
429
430 /**
431 * list_for_each_safe - iterate over a list safe against removal of list entry
432 * @pos: the &struct list_head to use as a loop cursor.
433 * @n: another &struct list_head to use as temporary storage
434 * @head: the head for your list.
435 */
436 #define list_for_each_safe(pos, n, head) \
437 for (pos = (head)->next, n = pos->next; pos != (head); \
438 pos = n, n = pos->next)
439
440 /**
441 * list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry
442 * @pos: the &struct list_head to use as a loop cursor.
443 * @n: another &struct list_head to use as temporary storage
444 * @head: the head for your list.
445 */
446 #define list_for_each_prev_safe(pos, n, head) \
447 for (pos = (head)->prev, n = pos->prev; \
448 pos != (head); \
449 pos = n, n = pos->prev)
450
451 /**
452 * list_for_each_entry - iterate over list of given type
453 * @pos: the type * to use as a loop cursor.
454 * @head: the head for your list.
455 * @member: the name of the list_head within the struct.
456 */
457 #define list_for_each_entry(pos, head, member) \
458 for (pos = list_first_entry(head, typeof(*pos), member); \
459 &pos->member != (head); \
460 pos = list_next_entry(pos, member))
461
462 /**
463 * list_for_each_entry_reverse - iterate backwards over list of given type.
464 * @pos: the type * to use as a loop cursor.
465 * @head: the head for your list.
466 * @member: the name of the list_head within the struct.
467 */
468 #define list_for_each_entry_reverse(pos, head, member) \
469 for (pos = list_last_entry(head, typeof(*pos), member); \
470 &pos->member != (head); \
471 pos = list_prev_entry(pos, member))
472
473 /**
474 * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue()
475 * @pos: the type * to use as a start point
476 * @head: the head of the list
477 * @member: the name of the list_head within the struct.
478 *
479 * Prepares a pos entry for use as a start point in list_for_each_entry_continue().
480 */
481 #define list_prepare_entry(pos, head, member) \
482 ((pos) ? : list_entry(head, typeof(*pos), member))
483
484 /**
485 * list_for_each_entry_continue - continue iteration over list of given type
486 * @pos: the type * to use as a loop cursor.
487 * @head: the head for your list.
488 * @member: the name of the list_head within the struct.
489 *
490 * Continue to iterate over list of given type, continuing after
491 * the current position.
492 */
493 #define list_for_each_entry_continue(pos, head, member) \
494 for (pos = list_next_entry(pos, member); \
495 &pos->member != (head); \
496 pos = list_next_entry(pos, member))
497
498 /**
499 * list_for_each_entry_continue_reverse - iterate backwards from the given point
500 * @pos: the type * to use as a loop cursor.
501 * @head: the head for your list.
502 * @member: the name of the list_head within the struct.
503 *
504 * Start to iterate over list of given type backwards, continuing after
505 * the current position.
506 */
507 #define list_for_each_entry_continue_reverse(pos, head, member) \
508 for (pos = list_prev_entry(pos, member); \
509 &pos->member != (head); \
510 pos = list_prev_entry(pos, member))
511
512 /**
513 * list_for_each_entry_from - iterate over list of given type from the current point
514 * @pos: the type * to use as a loop cursor.
515 * @head: the head for your list.
516 * @member: the name of the list_head within the struct.
517 *
518 * Iterate over list of given type, continuing from current position.
519 */
520 #define list_for_each_entry_from(pos, head, member) \
521 for (; &pos->member != (head); \
522 pos = list_next_entry(pos, member))
523
524 /**
525 * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
526 * @pos: the type * to use as a loop cursor.
527 * @n: another type * to use as temporary storage
528 * @head: the head for your list.
529 * @member: the name of the list_head within the struct.
530 */
531 #define list_for_each_entry_safe(pos, n, head, member) \
532 for (pos = list_first_entry(head, typeof(*pos), member), \
533 n = list_next_entry(pos, member); \
534 &pos->member != (head); \
535 pos = n, n = list_next_entry(n, member))
536
537 /**
538 * list_for_each_entry_safe_continue - continue list iteration safe against removal
539 * @pos: the type * to use as a loop cursor.
540 * @n: another type * to use as temporary storage
541 * @head: the head for your list.
542 * @member: the name of the list_head within the struct.
543 *
544 * Iterate over list of given type, continuing after current point,
545 * safe against removal of list entry.
546 */
547 #define list_for_each_entry_safe_continue(pos, n, head, member) \
548 for (pos = list_next_entry(pos, member), \
549 n = list_next_entry(pos, member); \
550 &pos->member != (head); \
551 pos = n, n = list_next_entry(n, member))
552
553 /**
554 * list_for_each_entry_safe_from - iterate over list from current point safe against removal
555 * @pos: the type * to use as a loop cursor.
556 * @n: another type * to use as temporary storage
557 * @head: the head for your list.
558 * @member: the name of the list_head within the struct.
559 *
560 * Iterate over list of given type from current point, safe against
561 * removal of list entry.
562 */
563 #define list_for_each_entry_safe_from(pos, n, head, member) \
564 for (n = list_next_entry(pos, member); \
565 &pos->member != (head); \
566 pos = n, n = list_next_entry(n, member))
567
568 /**
569 * list_for_each_entry_safe_reverse - iterate backwards over list safe against removal
570 * @pos: the type * to use as a loop cursor.
571 * @n: another type * to use as temporary storage
572 * @head: the head for your list.
573 * @member: the name of the list_head within the struct.
574 *
575 * Iterate backwards over list of given type, safe against removal
576 * of list entry.
577 */
578 #define list_for_each_entry_safe_reverse(pos, n, head, member) \
579 for (pos = list_last_entry(head, typeof(*pos), member), \
580 n = list_prev_entry(pos, member); \
581 &pos->member != (head); \
582 pos = n, n = list_prev_entry(n, member))
583
584 /**
585 * list_safe_reset_next - reset a stale list_for_each_entry_safe loop
586 * @pos: the loop cursor used in the list_for_each_entry_safe loop
587 * @n: temporary storage used in list_for_each_entry_safe
588 * @member: the name of the list_head within the struct.
589 *
590 * list_safe_reset_next is not safe to use in general if the list may be
591 * modified concurrently (eg. the lock is dropped in the loop body). An
592 * exception to this is if the cursor element (pos) is pinned in the list,
593 * and list_safe_reset_next is called after re-taking the lock and before
594 * completing the current iteration of the loop body.
595 */
596 #define list_safe_reset_next(pos, n, member) \
597 n = list_next_entry(pos, member)
598
599 /*
600 * Double linked lists with a single pointer list head.
601 * Mostly useful for hash tables where the two pointer list head is
602 * too wasteful.
603 * You lose the ability to access the tail in O(1).
604 */
605
606 #define HLIST_HEAD_INIT { .first = NULL }
607 #define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
608 #define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
609 static inline void INIT_HLIST_NODE(struct hlist_node *h)
610 {
611 h->next = NULL;
612 h->pprev = NULL;
613 }
614
615 static inline int hlist_unhashed(const struct hlist_node *h)
616 {
617 return !h->pprev;
618 }
619
620 static inline int hlist_empty(const struct hlist_head *h)
621 {
622 return !READ_ONCE(h->first);
623 }
624
625 static inline void __hlist_del(struct hlist_node *n)
626 {
627 struct hlist_node *next = n->next;
628 struct hlist_node **pprev = n->pprev;
629
630 WRITE_ONCE(*pprev, next);
631 if (next)
632 next->pprev = pprev;
633 }
634
635 static inline void hlist_del(struct hlist_node *n)
636 {
637 __hlist_del(n);
638 n->next = LIST_POISON1;
639 n->pprev = LIST_POISON2;
640 }
641
642 static inline void hlist_del_init(struct hlist_node *n)
643 {
644 if (!hlist_unhashed(n)) {
645 __hlist_del(n);
646 INIT_HLIST_NODE(n);
647 }
648 }
649
650 static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
651 {
652 struct hlist_node *first = h->first;
653 n->next = first;
654 if (first)
655 first->pprev = &n->next;
656 WRITE_ONCE(h->first, n);
657 n->pprev = &h->first;
658 }
659
660 /* next must be != NULL */
661 static inline void hlist_add_before(struct hlist_node *n,
662 struct hlist_node *next)
663 {
664 n->pprev = next->pprev;
665 n->next = next;
666 next->pprev = &n->next;
667 WRITE_ONCE(*(n->pprev), n);
668 }
669
670 static inline void hlist_add_behind(struct hlist_node *n,
671 struct hlist_node *prev)
672 {
673 n->next = prev->next;
674 WRITE_ONCE(prev->next, n);
675 n->pprev = &prev->next;
676
677 if (n->next)
678 n->next->pprev = &n->next;
679 }
680
681 /* after that we'll appear to be on some hlist and hlist_del will work */
682 static inline void hlist_add_fake(struct hlist_node *n)
683 {
684 n->pprev = &n->next;
685 }
686
687 static inline bool hlist_fake(struct hlist_node *h)
688 {
689 return h->pprev == &h->next;
690 }
691
692 /*
693 * Move a list from one list head to another. Fixup the pprev
694 * reference of the first entry if it exists.
695 */
696 static inline void hlist_move_list(struct hlist_head *old,
697 struct hlist_head *new)
698 {
699 new->first = old->first;
700 if (new->first)
701 new->first->pprev = &new->first;
702 old->first = NULL;
703 }
704
705 #define hlist_entry(ptr, type, member) container_of(ptr,type,member)
706
707 #define hlist_for_each(pos, head) \
708 for (pos = (head)->first; pos ; pos = pos->next)
709
710 #define hlist_for_each_safe(pos, n, head) \
711 for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
712 pos = n)
713
714 #define hlist_entry_safe(ptr, type, member) \
715 ({ typeof(ptr) ____ptr = (ptr); \
716 ____ptr ? hlist_entry(____ptr, type, member) : NULL; \
717 })
718
719 /**
720 * hlist_for_each_entry - iterate over list of given type
721 * @pos: the type * to use as a loop cursor.
722 * @head: the head for your list.
723 * @member: the name of the hlist_node within the struct.
724 */
725 #define hlist_for_each_entry(pos, head, member) \
726 for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member);\
727 pos; \
728 pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
729
730 /**
731 * hlist_for_each_entry_continue - iterate over a hlist continuing after current point
732 * @pos: the type * to use as a loop cursor.
733 * @member: the name of the hlist_node within the struct.
734 */
735 #define hlist_for_each_entry_continue(pos, member) \
736 for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\
737 pos; \
738 pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
739
740 /**
741 * hlist_for_each_entry_from - iterate over a hlist continuing from current point
742 * @pos: the type * to use as a loop cursor.
743 * @member: the name of the hlist_node within the struct.
744 */
745 #define hlist_for_each_entry_from(pos, member) \
746 for (; pos; \
747 pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
748
749 /**
750 * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
751 * @pos: the type * to use as a loop cursor.
752 * @n: another &struct hlist_node to use as temporary storage
753 * @head: the head for your list.
754 * @member: the name of the hlist_node within the struct.
755 */
756 #define hlist_for_each_entry_safe(pos, n, head, member) \
757 for (pos = hlist_entry_safe((head)->first, typeof(*pos), member);\
758 pos && ({ n = pos->member.next; 1; }); \
759 pos = hlist_entry_safe(n, typeof(*pos), member))
760
761 #endif 1 /*
2 * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
3 *
4 * (C) SGI 2006, Christoph Lameter
5 * Cleaned up and restructured to ease the addition of alternative
6 * implementations of SLAB allocators.
7 * (C) Linux Foundation 2008-2013
8 * Unified interface for all slab allocators
9 */
10
11 #ifndef _LINUX_SLAB_H
12 #define _LINUX_SLAB_H
13
14 #include <linux/gfp.h>
15 #include <linux/types.h>
16 #include <linux/workqueue.h>
17
18
19 /*
20 * Flags to pass to kmem_cache_create().
21 * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
22 */
23 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
24 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
25 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
26 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
27 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
28 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
29 #define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */
30 /*
31 * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS!
32 *
33 * This delays freeing the SLAB page by a grace period, it does _NOT_
34 * delay object freeing. This means that if you do kmem_cache_free()
35 * that memory location is free to be reused at any time. Thus it may
36 * be possible to see another object there in the same RCU grace period.
37 *
38 * This feature only ensures the memory location backing the object
39 * stays valid, the trick to using this is relying on an independent
40 * object validation pass. Something like:
41 *
42 * rcu_read_lock()
43 * again:
44 * obj = lockless_lookup(key);
45 * if (obj) {
46 * if (!try_get_ref(obj)) // might fail for free objects
47 * goto again;
48 *
49 * if (obj->key != key) { // not the object we expected
50 * put_ref(obj);
51 * goto again;
52 * }
53 * }
54 * rcu_read_unlock();
55 *
56 * This is useful if we need to approach a kernel structure obliquely,
57 * from its address obtained without the usual locking. We can lock
58 * the structure to stabilize it and check it's still at the given address,
59 * only if we can be sure that the memory has not been meanwhile reused
60 * for some other kind of object (which our subsystem's lock might corrupt).
61 *
62 * rcu_read_lock before reading the address, then rcu_read_unlock after
63 * taking the spinlock within the structure expected at that address.
64 */
65 #define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
66 #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
67 #define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */
68
69 /* Flag to prevent checks on free */
70 #ifdef CONFIG_DEBUG_OBJECTS
71 # define SLAB_DEBUG_OBJECTS 0x00400000UL
72 #else
73 # define SLAB_DEBUG_OBJECTS 0x00000000UL
74 #endif
75
76 #define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */
77
78 /* Don't track use of uninitialized memory */
79 #ifdef CONFIG_KMEMCHECK
80 # define SLAB_NOTRACK 0x01000000UL
81 #else
82 # define SLAB_NOTRACK 0x00000000UL
83 #endif
84 #ifdef CONFIG_FAILSLAB
85 # define SLAB_FAILSLAB 0x02000000UL /* Fault injection mark */
86 #else
87 # define SLAB_FAILSLAB 0x00000000UL
88 #endif
89 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
90 # define SLAB_ACCOUNT 0x04000000UL /* Account to memcg */
91 #else
92 # define SLAB_ACCOUNT 0x00000000UL
93 #endif
94
95 /* The following flags affect the page allocator grouping pages by mobility */
96 #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
97 #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
98 /*
99 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
100 *
101 * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
102 *
103 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
104 * Both make kfree a no-op.
105 */
106 #define ZERO_SIZE_PTR ((void *)16)
107
108 #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
109 (unsigned long)ZERO_SIZE_PTR)
110
111 #include <linux/kmemleak.h>
112 #include <linux/kasan.h>
113
114 struct mem_cgroup;
115 /*
116 * struct kmem_cache related prototypes
117 */
118 void __init kmem_cache_init(void);
119 bool slab_is_available(void);
120
121 struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
122 unsigned long,
123 void (*)(void *));
124 void kmem_cache_destroy(struct kmem_cache *);
125 int kmem_cache_shrink(struct kmem_cache *);
126
127 void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *);
128 void memcg_deactivate_kmem_caches(struct mem_cgroup *);
129 void memcg_destroy_kmem_caches(struct mem_cgroup *);
130
131 /*
132 * Please use this macro to create slab caches. Simply specify the
133 * name of the structure and maybe some flags that are listed above.
134 *
135 * The alignment of the struct determines object alignment. If you
136 * f.e. add ____cacheline_aligned_in_smp to the struct declaration
137 * then the objects will be properly aligned in SMP configurations.
138 */
139 #define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\
140 sizeof(struct __struct), __alignof__(struct __struct),\
141 (__flags), NULL)
142
143 /*
144 * Common kmalloc functions provided by all allocators
145 */
146 void * __must_check __krealloc(const void *, size_t, gfp_t);
147 void * __must_check krealloc(const void *, size_t, gfp_t);
148 void kfree(const void *);
149 void kzfree(const void *);
150 size_t ksize(const void *);
151
152 /*
153 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
154 * alignment larger than the alignment of a 64-bit integer.
155 * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
156 */
157 #if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
158 #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
159 #define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
160 #define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
161 #else
162 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
163 #endif
164
165 /*
166 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
167 * Intended for arches that get misalignment faults even for 64 bit integer
168 * aligned buffers.
169 */
170 #ifndef ARCH_SLAB_MINALIGN
171 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
172 #endif
173
174 /*
175 * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned
176 * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN
177 * aligned pointers.
178 */
179 #define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
180 #define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
181 #define __assume_page_alignment __assume_aligned(PAGE_SIZE)
182
183 /*
184 * Kmalloc array related definitions
185 */
186
187 #ifdef CONFIG_SLAB
188 /*
189 * The largest kmalloc size supported by the SLAB allocators is
190 * 32 megabyte (2^25) or the maximum allocatable page order if that is
191 * less than 32 MB.
192 *
193 * WARNING: Its not easy to increase this value since the allocators have
194 * to do various tricks to work around compiler limitations in order to
195 * ensure proper constant folding.
196 */
197 #define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
198 (MAX_ORDER + PAGE_SHIFT - 1) : 25)
199 #define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH
200 #ifndef KMALLOC_SHIFT_LOW
201 #define KMALLOC_SHIFT_LOW 5
202 #endif
203 #endif
204
205 #ifdef CONFIG_SLUB
206 /*
207 * SLUB directly allocates requests fitting in to an order-1 page
208 * (PAGE_SIZE*2). Larger requests are passed to the page allocator.
209 */
210 #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
211 #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
212 #ifndef KMALLOC_SHIFT_LOW
213 #define KMALLOC_SHIFT_LOW 3
214 #endif
215 #endif
216
217 #ifdef CONFIG_SLOB
218 /*
219 * SLOB passes all requests larger than one page to the page allocator.
220 * No kmalloc array is necessary since objects of different sizes can
221 * be allocated from the same page.
222 */
223 #define KMALLOC_SHIFT_HIGH PAGE_SHIFT
224 #define KMALLOC_SHIFT_MAX 30
225 #ifndef KMALLOC_SHIFT_LOW
226 #define KMALLOC_SHIFT_LOW 3
227 #endif
228 #endif
229
230 /* Maximum allocatable size */
231 #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
232 /* Maximum size for which we actually use a slab cache */
233 #define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH)
234 /* Maximum order allocatable via the slab allocagtor */
235 #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
236
237 /*
238 * Kmalloc subsystem.
239 */
240 #ifndef KMALLOC_MIN_SIZE
241 #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
242 #endif
243
244 /*
245 * This restriction comes from byte sized index implementation.
246 * Page size is normally 2^12 bytes and, in this case, if we want to use
247 * byte sized index which can represent 2^8 entries, the size of the object
248 * should be equal or greater to 2^12 / 2^8 = 2^4 = 16.
249 * If minimum size of kmalloc is less than 16, we use it as minimum object
250 * size and give up to use byte sized index.
251 */
252 #define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \
253 (KMALLOC_MIN_SIZE) : 16)
254
255 #ifndef CONFIG_SLOB
256 extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
257 #ifdef CONFIG_ZONE_DMA
258 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
259 #endif
260
261 /*
262 * Figure out which kmalloc slab an allocation of a certain size
263 * belongs to.
264 * 0 = zero alloc
265 * 1 = 65 .. 96 bytes
266 * 2 = 129 .. 192 bytes
267 * n = 2^(n-1)+1 .. 2^n
268 */
269 static __always_inline int kmalloc_index(size_t size)
270 {
271 if (!size)
272 return 0;
273
274 if (size <= KMALLOC_MIN_SIZE)
275 return KMALLOC_SHIFT_LOW;
276
277 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
278 return 1;
279 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
280 return 2;
281 if (size <= 8) return 3;
282 if (size <= 16) return 4;
283 if (size <= 32) return 5;
284 if (size <= 64) return 6;
285 if (size <= 128) return 7;
286 if (size <= 256) return 8;
287 if (size <= 512) return 9;
288 if (size <= 1024) return 10;
289 if (size <= 2 * 1024) return 11;
290 if (size <= 4 * 1024) return 12;
291 if (size <= 8 * 1024) return 13;
292 if (size <= 16 * 1024) return 14;
293 if (size <= 32 * 1024) return 15;
294 if (size <= 64 * 1024) return 16;
295 if (size <= 128 * 1024) return 17;
296 if (size <= 256 * 1024) return 18;
297 if (size <= 512 * 1024) return 19;
298 if (size <= 1024 * 1024) return 20;
299 if (size <= 2 * 1024 * 1024) return 21;
300 if (size <= 4 * 1024 * 1024) return 22;
301 if (size <= 8 * 1024 * 1024) return 23;
302 if (size <= 16 * 1024 * 1024) return 24;
303 if (size <= 32 * 1024 * 1024) return 25;
304 if (size <= 64 * 1024 * 1024) return 26;
305 BUG();
306
307 /* Will never be reached. Needed because the compiler may complain */
308 return -1;
309 }
310 #endif /* !CONFIG_SLOB */
311
312 void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment;
313 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment;
314 void kmem_cache_free(struct kmem_cache *, void *);
315
316 /*
317 * Bulk allocation and freeing operations. These are accellerated in an
318 * allocator specific way to avoid taking locks repeatedly or building
319 * metadata structures unnecessarily.
320 *
321 * Note that interrupts must be enabled when calling these functions.
322 */
323 void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
324 int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
325
326 #ifdef CONFIG_NUMA
327 void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment;
328 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment;
329 #else
330 static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
331 {
332 return __kmalloc(size, flags);
333 }
334
335 static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
336 {
337 return kmem_cache_alloc(s, flags);
338 }
339 #endif
340
341 #ifdef CONFIG_TRACING
342 extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment;
343
344 #ifdef CONFIG_NUMA
345 extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
346 gfp_t gfpflags,
347 int node, size_t size) __assume_slab_alignment;
348 #else
349 static __always_inline void *
350 kmem_cache_alloc_node_trace(struct kmem_cache *s,
351 gfp_t gfpflags,
352 int node, size_t size)
353 {
354 return kmem_cache_alloc_trace(s, gfpflags, size);
355 }
356 #endif /* CONFIG_NUMA */
357
358 #else /* CONFIG_TRACING */
359 static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
360 gfp_t flags, size_t size)
361 {
362 void *ret = kmem_cache_alloc(s, flags);
363
364 kasan_kmalloc(s, ret, size);
365 return ret;
366 }
367
368 static __always_inline void *
369 kmem_cache_alloc_node_trace(struct kmem_cache *s,
370 gfp_t gfpflags,
371 int node, size_t size)
372 {
373 void *ret = kmem_cache_alloc_node(s, gfpflags, node);
374
375 kasan_kmalloc(s, ret, size);
376 return ret;
377 }
378 #endif /* CONFIG_TRACING */
379
380 extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment;
381
382 #ifdef CONFIG_TRACING
383 extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment;
384 #else
385 static __always_inline void *
386 kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
387 {
388 return kmalloc_order(size, flags, order);
389 }
390 #endif
391
392 static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
393 {
394 unsigned int order = get_order(size);
395 return kmalloc_order_trace(size, flags, order);
396 }
397
398 /**
399 * kmalloc - allocate memory
400 * @size: how many bytes of memory are required.
401 * @flags: the type of memory to allocate.
402 *
403 * kmalloc is the normal method of allocating memory
404 * for objects smaller than page size in the kernel.
405 *
406 * The @flags argument may be one of:
407 *
408 * %GFP_USER - Allocate memory on behalf of user. May sleep.
409 *
410 * %GFP_KERNEL - Allocate normal kernel ram. May sleep.
411 *
412 * %GFP_ATOMIC - Allocation will not sleep. May use emergency pools.
413 * For example, use this inside interrupt handlers.
414 *
415 * %GFP_HIGHUSER - Allocate pages from high memory.
416 *
417 * %GFP_NOIO - Do not do any I/O at all while trying to get memory.
418 *
419 * %GFP_NOFS - Do not make any fs calls while trying to get memory.
420 *
421 * %GFP_NOWAIT - Allocation will not sleep.
422 *
423 * %__GFP_THISNODE - Allocate node-local memory only.
424 *
425 * %GFP_DMA - Allocation suitable for DMA.
426 * Should only be used for kmalloc() caches. Otherwise, use a
427 * slab created with SLAB_DMA.
428 *
429 * Also it is possible to set different flags by OR'ing
430 * in one or more of the following additional @flags:
431 *
432 * %__GFP_COLD - Request cache-cold pages instead of
433 * trying to return cache-warm pages.
434 *
435 * %__GFP_HIGH - This allocation has high priority and may use emergency pools.
436 *
437 * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
438 * (think twice before using).
439 *
440 * %__GFP_NORETRY - If memory is not immediately available,
441 * then give up at once.
442 *
443 * %__GFP_NOWARN - If allocation fails, don't issue any warnings.
444 *
445 * %__GFP_REPEAT - If allocation fails initially, try once more before failing.
446 *
447 * There are other flags available as well, but these are not intended
448 * for general use, and so are not documented here. For a full list of
449 * potential flags, always refer to linux/gfp.h.
450 */
451 static __always_inline void *kmalloc(size_t size, gfp_t flags)
452 {
453 if (__builtin_constant_p(size)) {
454 if (size > KMALLOC_MAX_CACHE_SIZE)
455 return kmalloc_large(size, flags);
456 #ifndef CONFIG_SLOB
457 if (!(flags & GFP_DMA)) {
458 int index = kmalloc_index(size);
459
460 if (!index)
461 return ZERO_SIZE_PTR;
462
463 return kmem_cache_alloc_trace(kmalloc_caches[index],
464 flags, size);
465 }
466 #endif
467 }
468 return __kmalloc(size, flags);
469 }
470
471 /*
472 * Determine size used for the nth kmalloc cache.
473 * return size or 0 if a kmalloc cache for that
474 * size does not exist
475 */
476 static __always_inline int kmalloc_size(int n)
477 {
478 #ifndef CONFIG_SLOB
479 if (n > 2)
480 return 1 << n;
481
482 if (n == 1 && KMALLOC_MIN_SIZE <= 32)
483 return 96;
484
485 if (n == 2 && KMALLOC_MIN_SIZE <= 64)
486 return 192;
487 #endif
488 return 0;
489 }
490
491 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
492 {
493 #ifndef CONFIG_SLOB
494 if (__builtin_constant_p(size) &&
495 size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
496 int i = kmalloc_index(size);
497
498 if (!i)
499 return ZERO_SIZE_PTR;
500
501 return kmem_cache_alloc_node_trace(kmalloc_caches[i],
502 flags, node, size);
503 }
504 #endif
505 return __kmalloc_node(size, flags, node);
506 }
507
508 struct memcg_cache_array {
509 struct rcu_head rcu;
510 struct kmem_cache *entries[0];
511 };
512
513 /*
514 * This is the main placeholder for memcg-related information in kmem caches.
515 * Both the root cache and the child caches will have it. For the root cache,
516 * this will hold a dynamically allocated array large enough to hold
517 * information about the currently limited memcgs in the system. To allow the
518 * array to be accessed without taking any locks, on relocation we free the old
519 * version only after a grace period.
520 *
521 * Child caches will hold extra metadata needed for its operation. Fields are:
522 *
523 * @memcg: pointer to the memcg this cache belongs to
524 * @root_cache: pointer to the global, root cache, this cache was derived from
525 *
526 * Both root and child caches of the same kind are linked into a list chained
527 * through @list.
528 */
529 struct memcg_cache_params {
530 bool is_root_cache;
531 struct list_head list;
532 union {
533 struct memcg_cache_array __rcu *memcg_caches;
534 struct {
535 struct mem_cgroup *memcg;
536 struct kmem_cache *root_cache;
537 };
538 };
539 };
540
541 int memcg_update_all_caches(int num_memcgs);
542
543 /**
544 * kmalloc_array - allocate memory for an array.
545 * @n: number of elements.
546 * @size: element size.
547 * @flags: the type of memory to allocate (see kmalloc).
548 */
549 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
550 {
551 if (size != 0 && n > SIZE_MAX / size)
552 return NULL;
553 return __kmalloc(n * size, flags);
554 }
555
556 /**
557 * kcalloc - allocate memory for an array. The memory is set to zero.
558 * @n: number of elements.
559 * @size: element size.
560 * @flags: the type of memory to allocate (see kmalloc).
561 */
562 static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
563 {
564 return kmalloc_array(n, size, flags | __GFP_ZERO);
565 }
566
567 /*
568 * kmalloc_track_caller is a special version of kmalloc that records the
569 * calling function of the routine calling it for slab leak tracking instead
570 * of just the calling function (confusing, eh?).
571 * It's useful when the call to kmalloc comes from a widely-used standard
572 * allocator where we care about the real place the memory allocation
573 * request comes from.
574 */
575 extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
576 #define kmalloc_track_caller(size, flags) \
577 __kmalloc_track_caller(size, flags, _RET_IP_)
578
579 #ifdef CONFIG_NUMA
580 extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
581 #define kmalloc_node_track_caller(size, flags, node) \
582 __kmalloc_node_track_caller(size, flags, node, \
583 _RET_IP_)
584
585 #else /* CONFIG_NUMA */
586
587 #define kmalloc_node_track_caller(size, flags, node) \
588 kmalloc_track_caller(size, flags)
589
590 #endif /* CONFIG_NUMA */
591
592 /*
593 * Shortcuts
594 */
595 static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
596 {
597 return kmem_cache_alloc(k, flags | __GFP_ZERO);
598 }
599
600 /**
601 * kzalloc - allocate memory. The memory is set to zero.
602 * @size: how many bytes of memory are required.
603 * @flags: the type of memory to allocate (see kmalloc).
604 */
605 static inline void *kzalloc(size_t size, gfp_t flags)
606 {
607 return kmalloc(size, flags | __GFP_ZERO);
608 }
609
610 /**
611 * kzalloc_node - allocate zeroed memory from a particular memory node.
612 * @size: how many bytes of memory are required.
613 * @flags: the type of memory to allocate (see kmalloc).
614 * @node: memory node from which to allocate
615 */
616 static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
617 {
618 return kmalloc_node(size, flags | __GFP_ZERO, node);
619 }
620
621 unsigned int kmem_cache_size(struct kmem_cache *s);
622 void __init kmem_cache_init_late(void);
623
624 #endif /* _LINUX_SLAB_H */ 1 #ifndef __LINUX_SPINLOCK_H
2 #define __LINUX_SPINLOCK_H
3
4 /*
5 * include/linux/spinlock.h - generic spinlock/rwlock declarations
6 *
7 * here's the role of the various spinlock/rwlock related include files:
8 *
9 * on SMP builds:
10 *
11 * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
12 * initializers
13 *
14 * linux/spinlock_types.h:
15 * defines the generic type and initializers
16 *
17 * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel
18 * implementations, mostly inline assembly code
19 *
20 * (also included on UP-debug builds:)
21 *
22 * linux/spinlock_api_smp.h:
23 * contains the prototypes for the _spin_*() APIs.
24 *
25 * linux/spinlock.h: builds the final spin_*() APIs.
26 *
27 * on UP builds:
28 *
29 * linux/spinlock_type_up.h:
30 * contains the generic, simplified UP spinlock type.
31 * (which is an empty structure on non-debug builds)
32 *
33 * linux/spinlock_types.h:
34 * defines the generic type and initializers
35 *
36 * linux/spinlock_up.h:
37 * contains the arch_spin_*()/etc. version of UP
38 * builds. (which are NOPs on non-debug, non-preempt
39 * builds)
40 *
41 * (included on UP-non-debug builds:)
42 *
43 * linux/spinlock_api_up.h:
44 * builds the _spin_*() APIs.
45 *
46 * linux/spinlock.h: builds the final spin_*() APIs.
47 */
48
49 #include <linux/typecheck.h>
50 #include <linux/preempt.h>
51 #include <linux/linkage.h>
52 #include <linux/compiler.h>
53 #include <linux/irqflags.h>
54 #include <linux/thread_info.h>
55 #include <linux/kernel.h>
56 #include <linux/stringify.h>
57 #include <linux/bottom_half.h>
58 #include <asm/barrier.h>
59
60
61 /*
62 * Must define these before including other files, inline functions need them
63 */
64 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
65
66 #define LOCK_SECTION_START(extra) \
67 ".subsection 1\n\t" \
68 extra \
69 ".ifndef " LOCK_SECTION_NAME "\n\t" \
70 LOCK_SECTION_NAME ":\n\t" \
71 ".endif\n"
72
73 #define LOCK_SECTION_END \
74 ".previous\n\t"
75
76 #define __lockfunc __attribute__((section(".spinlock.text")))
77
78 /*
79 * Pull the arch_spinlock_t and arch_rwlock_t definitions:
80 */
81 #include <linux/spinlock_types.h>
82
83 /*
84 * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
85 */
86 #ifdef CONFIG_SMP
87 # include <asm/spinlock.h>
88 #else
89 # include <linux/spinlock_up.h>
90 #endif
91
92 #ifdef CONFIG_DEBUG_SPINLOCK
93 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
94 struct lock_class_key *key);
95 # define raw_spin_lock_init(lock) \
96 do { \
97 static struct lock_class_key __key; \
98 \
99 __raw_spin_lock_init((lock), #lock, &__key); \
100 } while (0)
101
102 #else
103 # define raw_spin_lock_init(lock) \
104 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
105 #endif
106
107 #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
108
109 #ifdef CONFIG_GENERIC_LOCKBREAK
110 #define raw_spin_is_contended(lock) ((lock)->break_lock)
111 #else
112
113 #ifdef arch_spin_is_contended
114 #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
115 #else
116 #define raw_spin_is_contended(lock) (((void)(lock), 0))
117 #endif /*arch_spin_is_contended*/
118 #endif
119
120 /*
121 * Despite its name it doesn't necessarily has to be a full barrier.
122 * It should only guarantee that a STORE before the critical section
123 * can not be reordered with LOADs and STOREs inside this section.
124 * spin_lock() is the one-way barrier, this LOAD can not escape out
125 * of the region. So the default implementation simply ensures that
126 * a STORE can not move into the critical section, smp_wmb() should
127 * serialize it with another STORE done by spin_lock().
128 */
129 #ifndef smp_mb__before_spinlock
130 #define smp_mb__before_spinlock() smp_wmb()
131 #endif
132
133 /**
134 * raw_spin_unlock_wait - wait until the spinlock gets unlocked
135 * @lock: the spinlock in question.
136 */
137 #define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock)
138
139 #ifdef CONFIG_DEBUG_SPINLOCK
140 extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
141 #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
142 extern int do_raw_spin_trylock(raw_spinlock_t *lock);
143 extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
144 #else
145 static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
146 {
147 __acquire(lock);
148 arch_spin_lock(&lock->raw_lock);
149 }
150
151 static inline void
152 do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
153 {
154 __acquire(lock);
155 arch_spin_lock_flags(&lock->raw_lock, *flags);
156 }
157
158 static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
159 {
160 return arch_spin_trylock(&(lock)->raw_lock);
161 }
162
163 static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
164 {
165 arch_spin_unlock(&lock->raw_lock);
166 __release(lock);
167 }
168 #endif
169
170 /*
171 * Define the various spin_lock methods. Note we define these
172 * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
173 * various methods are defined as nops in the case they are not
174 * required.
175 */
176 #define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
177
178 #define raw_spin_lock(lock) _raw_spin_lock(lock)
179
180 #ifdef CONFIG_DEBUG_LOCK_ALLOC
181 # define raw_spin_lock_nested(lock, subclass) \
182 _raw_spin_lock_nested(lock, subclass)
183 # define raw_spin_lock_bh_nested(lock, subclass) \
184 _raw_spin_lock_bh_nested(lock, subclass)
185
186 # define raw_spin_lock_nest_lock(lock, nest_lock) \
187 do { \
188 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
189 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
190 } while (0)
191 #else
192 /*
193 * Always evaluate the 'subclass' argument to avoid that the compiler
194 * warns about set-but-not-used variables when building with
195 * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
196 */
197 # define raw_spin_lock_nested(lock, subclass) \
198 _raw_spin_lock(((void)(subclass), (lock)))
199 # define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
200 # define raw_spin_lock_bh_nested(lock, subclass) _raw_spin_lock_bh(lock)
201 #endif
202
203 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
204
205 #define raw_spin_lock_irqsave(lock, flags) \
206 do { \
207 typecheck(unsigned long, flags); \
208 flags = _raw_spin_lock_irqsave(lock); \
209 } while (0)
210
211 #ifdef CONFIG_DEBUG_LOCK_ALLOC
212 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
213 do { \
214 typecheck(unsigned long, flags); \
215 flags = _raw_spin_lock_irqsave_nested(lock, subclass); \
216 } while (0)
217 #else
218 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
219 do { \
220 typecheck(unsigned long, flags); \
221 flags = _raw_spin_lock_irqsave(lock); \
222 } while (0)
223 #endif
224
225 #else
226
227 #define raw_spin_lock_irqsave(lock, flags) \
228 do { \
229 typecheck(unsigned long, flags); \
230 _raw_spin_lock_irqsave(lock, flags); \
231 } while (0)
232
233 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
234 raw_spin_lock_irqsave(lock, flags)
235
236 #endif
237
238 #define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
239 #define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock)
240 #define raw_spin_unlock(lock) _raw_spin_unlock(lock)
241 #define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
242
243 #define raw_spin_unlock_irqrestore(lock, flags) \
244 do { \
245 typecheck(unsigned long, flags); \
246 _raw_spin_unlock_irqrestore(lock, flags); \
247 } while (0)
248 #define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
249
250 #define raw_spin_trylock_bh(lock) \
251 __cond_lock(lock, _raw_spin_trylock_bh(lock))
252
253 #define raw_spin_trylock_irq(lock) \
254 ({ \
255 local_irq_disable(); \
256 raw_spin_trylock(lock) ? \
257 1 : ({ local_irq_enable(); 0; }); \
258 })
259
260 #define raw_spin_trylock_irqsave(lock, flags) \
261 ({ \
262 local_irq_save(flags); \
263 raw_spin_trylock(lock) ? \
264 1 : ({ local_irq_restore(flags); 0; }); \
265 })
266
267 /**
268 * raw_spin_can_lock - would raw_spin_trylock() succeed?
269 * @lock: the spinlock in question.
270 */
271 #define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
272
273 /* Include rwlock functions */
274 #include <linux/rwlock.h>
275
276 /*
277 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
278 */
279 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
280 # include <linux/spinlock_api_smp.h>
281 #else
282 # include <linux/spinlock_api_up.h>
283 #endif
284
285 /*
286 * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
287 */
288
289 static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
290 {
291 return &lock->rlock;
292 }
293
294 #define spin_lock_init(_lock) \
295 do { \
296 spinlock_check(_lock); \
297 raw_spin_lock_init(&(_lock)->rlock); \
298 } while (0)
299
300 static __always_inline void spin_lock(spinlock_t *lock)
301 {
302 raw_spin_lock(&lock->rlock);
303 }
304
305 static __always_inline void spin_lock_bh(spinlock_t *lock)
306 {
307 raw_spin_lock_bh(&lock->rlock);
308 }
309
310 static __always_inline int spin_trylock(spinlock_t *lock)
311 {
312 return raw_spin_trylock(&lock->rlock);
313 }
314
315 #define spin_lock_nested(lock, subclass) \
316 do { \
317 raw_spin_lock_nested(spinlock_check(lock), subclass); \
318 } while (0)
319
320 #define spin_lock_bh_nested(lock, subclass) \
321 do { \
322 raw_spin_lock_bh_nested(spinlock_check(lock), subclass);\
323 } while (0)
324
325 #define spin_lock_nest_lock(lock, nest_lock) \
326 do { \
327 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
328 } while (0)
329
330 static __always_inline void spin_lock_irq(spinlock_t *lock)
331 {
332 raw_spin_lock_irq(&lock->rlock);
333 }
334
335 #define spin_lock_irqsave(lock, flags) \
336 do { \
337 raw_spin_lock_irqsave(spinlock_check(lock), flags); \
338 } while (0)
339
340 #define spin_lock_irqsave_nested(lock, flags, subclass) \
341 do { \
342 raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
343 } while (0)
344
345 static __always_inline void spin_unlock(spinlock_t *lock)
346 {
347 raw_spin_unlock(&lock->rlock);
348 }
349
350 static __always_inline void spin_unlock_bh(spinlock_t *lock)
351 {
352 raw_spin_unlock_bh(&lock->rlock);
353 }
354
355 static __always_inline void spin_unlock_irq(spinlock_t *lock)
356 {
357 raw_spin_unlock_irq(&lock->rlock);
358 }
359
360 static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
361 {
362 raw_spin_unlock_irqrestore(&lock->rlock, flags);
363 }
364
365 static __always_inline int spin_trylock_bh(spinlock_t *lock)
366 {
367 return raw_spin_trylock_bh(&lock->rlock);
368 }
369
370 static __always_inline int spin_trylock_irq(spinlock_t *lock)
371 {
372 return raw_spin_trylock_irq(&lock->rlock);
373 }
374
375 #define spin_trylock_irqsave(lock, flags) \
376 ({ \
377 raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
378 })
379
380 static __always_inline void spin_unlock_wait(spinlock_t *lock)
381 {
382 raw_spin_unlock_wait(&lock->rlock);
383 }
384
385 static __always_inline int spin_is_locked(spinlock_t *lock)
386 {
387 return raw_spin_is_locked(&lock->rlock);
388 }
389
390 static __always_inline int spin_is_contended(spinlock_t *lock)
391 {
392 return raw_spin_is_contended(&lock->rlock);
393 }
394
395 static __always_inline int spin_can_lock(spinlock_t *lock)
396 {
397 return raw_spin_can_lock(&lock->rlock);
398 }
399
400 #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
401
402 /*
403 * Pull the atomic_t declaration:
404 * (asm-mips/atomic.h needs above definitions)
405 */
406 #include <linux/atomic.h>
407 /**
408 * atomic_dec_and_lock - lock on reaching reference count zero
409 * @atomic: the atomic counter
410 * @lock: the spinlock in question
411 *
412 * Decrements @atomic by 1. If the result is 0, returns true and locks
413 * @lock. Returns false for all other cases.
414 */
415 extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
416 #define atomic_dec_and_lock(atomic, lock) \
417 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
418
419 #endif /* __LINUX_SPINLOCK_H */ |
Here is an explanation of a rule violation arisen while checking your driver against a corresponding kernel.
Note that it may be false positive, i.e. there isn't a real error indeed. Please analyze a given error trace and related source code to understand whether there is an error in your driver.
Error trace column contains a path on which the given rule is violated. You can expand/collapse some entity classes by clicking on corresponding checkboxes in a main menu or in an advanced Others menu. Also you can expand/collapse each particular entity by clicking on +/-. In hovering on some entities you can see some tips. Also the error trace is bound with related source code. Line numbers may be shown as links on the left. You can click on them to open corresponding lines in source code.
Source code column contains a content of files related with the error trace. There is source code of your driver (note that there are some LDV modifications at the end), kernel headers and rule model. Tabs show a currently opened file and other available files. In hovering on them you can see full file names. On clicking a corresponding file content will be shown.
Ядро | Модуль | Правило | Верификатор | Вердикт | Статус | Время создания | Описание проблемы |
linux-4.5-rc1.tar.xz | drivers/usb/host/whci/whci-hcd.ko | 331_1a | CPAchecker | Bug | Fixed | 2016-03-25 23:36:30 | L0224 |
Комментарий
Reported: 25 Mar 2016
[В начало]