Bug

[В начало]

Ошибка # 95

Показать/спрятать трассу ошибок
Error trace
Function bodies
Blocks
  • Others...
    Function bodies without model function calls
    Initialization function calls
    Initialization function bodies
    Entry point
    Entry point body
    Function calls
    Skipped function calls
    Formal parameter names
    Declarations
    Assumes
    Assume conditions
    Returns
    Return values
    DEG initialization
    DEG function calls
    Model function calls
    Model function bodies
    Model asserts
    Model state changes
    Model function function calls
    Model function function bodies
    Model returns
    Model others
    Identation
    Line numbers
    Expand signs
-__CPAchecker_initialize()
{
19 typedef signed char __s8;
20 typedef unsigned char __u8;
22 typedef short __s16;
23 typedef unsigned short __u16;
25 typedef int __s32;
26 typedef unsigned int __u32;
30 typedef unsigned long long __u64;
15 typedef signed char s8;
16 typedef unsigned char u8;
19 typedef unsigned short u16;
21 typedef int s32;
22 typedef unsigned int u32;
24 typedef long long s64;
25 typedef unsigned long long u64;
14 typedef long __kernel_long_t;
15 typedef unsigned long __kernel_ulong_t;
27 typedef int __kernel_pid_t;
48 typedef unsigned int __kernel_uid32_t;
49 typedef unsigned int __kernel_gid32_t;
71 typedef __kernel_ulong_t __kernel_size_t;
72 typedef __kernel_long_t __kernel_ssize_t;
87 typedef long long __kernel_loff_t;
88 typedef __kernel_long_t __kernel_time_t;
89 typedef __kernel_long_t __kernel_clock_t;
90 typedef int __kernel_timer_t;
91 typedef int __kernel_clockid_t;
32 typedef __u16 __le16;
34 typedef __u32 __le32;
36 typedef __u64 __le64;
12 typedef __u32 __kernel_dev_t;
15 typedef __kernel_dev_t dev_t;
18 typedef unsigned short umode_t;
21 typedef __kernel_pid_t pid_t;
26 typedef __kernel_clockid_t clockid_t;
29 typedef _Bool bool;
31 typedef __kernel_uid32_t uid_t;
32 typedef __kernel_gid32_t gid_t;
45 typedef __kernel_loff_t loff_t;
54 typedef __kernel_size_t size_t;
59 typedef __kernel_ssize_t ssize_t;
69 typedef __kernel_time_t time_t;
102 typedef __s32 int32_t;
106 typedef __u8 uint8_t;
107 typedef __u16 uint16_t;
108 typedef __u32 uint32_t;
133 typedef unsigned long sector_t;
134 typedef unsigned long blkcnt_t;
146 typedef u64 dma_addr_t;
157 typedef unsigned int gfp_t;
158 typedef unsigned int fmode_t;
159 typedef unsigned int oom_flags_t;
162 typedef u64 phys_addr_t;
167 typedef phys_addr_t resource_size_t;
177 struct __anonstruct_atomic_t_6 { int counter; } ;
177 typedef struct __anonstruct_atomic_t_6 atomic_t;
182 struct __anonstruct_atomic64_t_7 { long counter; } ;
182 typedef struct __anonstruct_atomic64_t_7 atomic64_t;
183 struct list_head { struct list_head *next; struct list_head *prev; } ;
188 struct hlist_node ;
188 struct hlist_head { struct hlist_node *first; } ;
192 struct hlist_node { struct hlist_node *next; struct hlist_node **pprev; } ;
203 struct callback_head { struct callback_head *next; void (*func)(struct callback_head *); } ;
213 enum dma_data_direction { DMA_BIDIRECTIONAL = 0, DMA_TO_DEVICE = 1, DMA_FROM_DEVICE = 2, DMA_NONE = 3 } ;
5 struct device ;
5 struct page ;
7 struct dma_attrs ;
33 struct module ;
65 struct pt_regs { unsigned long r15; unsigned long r14; unsigned long r13; unsigned long r12; unsigned long bp; unsigned long bx; unsigned long r11; unsigned long r10; unsigned long r9; unsigned long r8; unsigned long ax; unsigned long cx; unsigned long dx; unsigned long si; unsigned long di; unsigned long orig_ax; unsigned long ip; unsigned long cs; unsigned long flags; unsigned long sp; unsigned long ss; } ;
59 struct __anonstruct____missing_field_name_9 { unsigned int a; unsigned int b; } ;
59 struct __anonstruct____missing_field_name_10 { u16 limit0; u16 base0; unsigned char base1; unsigned char type; unsigned char s; unsigned char dpl; unsigned char p; unsigned char limit; unsigned char avl; unsigned char l; unsigned char d; unsigned char g; unsigned char base2; } ;
59 union __anonunion____missing_field_name_8 { struct __anonstruct____missing_field_name_9 __annonCompField4; struct __anonstruct____missing_field_name_10 __annonCompField5; } ;
59 struct desc_struct { union __anonunion____missing_field_name_8 __annonCompField6; } ;
15 typedef unsigned long pgdval_t;
16 typedef unsigned long pgprotval_t;
20 struct pgprot { pgprotval_t pgprot; } ;
243 typedef struct pgprot pgprot_t;
245 struct __anonstruct_pgd_t_12 { pgdval_t pgd; } ;
245 typedef struct __anonstruct_pgd_t_12 pgd_t;
333 typedef struct page *pgtable_t;
341 struct file ;
354 struct seq_file ;
389 struct thread_struct ;
391 struct mm_struct ;
392 struct task_struct ;
393 struct cpumask ;
327 struct arch_spinlock ;
18 typedef u16 __ticket_t;
19 typedef u32 __ticketpair_t;
20 struct __raw_tickets { __ticket_t head; __ticket_t tail; } ;
32 union __anonunion____missing_field_name_15 { __ticketpair_t head_tail; struct __raw_tickets tickets; } ;
32 struct arch_spinlock { union __anonunion____missing_field_name_15 __annonCompField7; } ;
33 typedef struct arch_spinlock arch_spinlock_t;
33 struct __anonstruct____missing_field_name_17 { u32 read; s32 write; } ;
33 union __anonunion_arch_rwlock_t_16 { s64 lock; struct __anonstruct____missing_field_name_17 __annonCompField8; } ;
33 typedef union __anonunion_arch_rwlock_t_16 arch_rwlock_t;
376 struct file_operations ;
388 struct completion ;
416 struct pid ;
102 struct timespec ;
127 struct kernel_vm86_regs { struct pt_regs pt; unsigned short es; unsigned short __esh; unsigned short ds; unsigned short __dsh; unsigned short fs; unsigned short __fsh; unsigned short gs; unsigned short __gsh; } ;
79 union __anonunion____missing_field_name_22 { struct pt_regs *regs; struct kernel_vm86_regs *vm86; } ;
79 struct math_emu_info { long ___orig_eip; union __anonunion____missing_field_name_22 __annonCompField10; } ;
306 struct cpumask { unsigned long bits[128U]; } ;
14 typedef struct cpumask cpumask_t;
663 typedef struct cpumask *cpumask_var_t;
162 struct seq_operations ;
294 struct i387_fsave_struct { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u32 status; } ;
312 struct __anonstruct____missing_field_name_27 { u64 rip; u64 rdp; } ;
312 struct __anonstruct____missing_field_name_28 { u32 fip; u32 fcs; u32 foo; u32 fos; } ;
312 union __anonunion____missing_field_name_26 { struct __anonstruct____missing_field_name_27 __annonCompField14; struct __anonstruct____missing_field_name_28 __annonCompField15; } ;
312 union __anonunion____missing_field_name_29 { u32 padding1[12U]; u32 sw_reserved[12U]; } ;
312 struct i387_fxsave_struct { u16 cwd; u16 swd; u16 twd; u16 fop; union __anonunion____missing_field_name_26 __annonCompField16; u32 mxcsr; u32 mxcsr_mask; u32 st_space[32U]; u32 xmm_space[64U]; u32 padding[12U]; union __anonunion____missing_field_name_29 __annonCompField17; } ;
346 struct i387_soft_struct { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u8 ftop; u8 changed; u8 lookahead; u8 no_update; u8 rm; u8 alimit; struct math_emu_info *info; u32 entry_eip; } ;
367 struct ymmh_struct { u32 ymmh_space[64U]; } ;
372 struct lwp_struct { u8 reserved[128U]; } ;
377 struct bndregs_struct { u64 bndregs[8U]; } ;
381 struct bndcsr_struct { u64 cfg_reg_u; u64 status_reg; } ;
386 struct xsave_hdr_struct { u64 xstate_bv; u64 reserved1[2U]; u64 reserved2[5U]; } ;
392 struct xsave_struct { struct i387_fxsave_struct i387; struct xsave_hdr_struct xsave_hdr; struct ymmh_struct ymmh; struct lwp_struct lwp; struct bndregs_struct bndregs; struct bndcsr_struct bndcsr; } ;
401 union thread_xstate { struct i387_fsave_struct fsave; struct i387_fxsave_struct fxsave; struct i387_soft_struct soft; struct xsave_struct xsave; } ;
409 struct fpu { unsigned int last_cpu; unsigned int has_fpu; union thread_xstate *state; } ;
456 struct kmem_cache ;
457 struct perf_event ;
458 struct thread_struct { struct desc_struct tls_array[3U]; unsigned long sp0; unsigned long sp; unsigned long usersp; unsigned short es; unsigned short ds; unsigned short fsindex; unsigned short gsindex; unsigned long fs; unsigned long gs; struct perf_event *ptrace_bps[4U]; unsigned long debugreg6; unsigned long ptrace_dr7; unsigned long cr2; unsigned long trap_nr; unsigned long error_code; struct fpu fpu; unsigned long *io_bitmap_ptr; unsigned long iopl; unsigned int io_bitmap_max; unsigned char fpu_counter; } ;
23 typedef atomic64_t atomic_long_t;
152 struct lockdep_map ;
55 struct stack_trace { unsigned int nr_entries; unsigned int max_entries; unsigned long *entries; int skip; } ;
26 struct lockdep_subclass_key { char __one_byte; } ;
53 struct lock_class_key { struct lockdep_subclass_key subkeys[8U]; } ;
59 struct lock_class { struct list_head hash_entry; struct list_head lock_entry; struct lockdep_subclass_key *key; unsigned int subclass; unsigned int dep_gen_id; unsigned long usage_mask; struct stack_trace usage_traces[13U]; struct list_head locks_after; struct list_head locks_before; unsigned int version; unsigned long ops; const char *name; int name_version; unsigned long contention_point[4U]; unsigned long contending_point[4U]; } ;
144 struct lockdep_map { struct lock_class_key *key; struct lock_class *class_cache[2U]; const char *name; int cpu; unsigned long ip; } ;
205 struct held_lock { u64 prev_chain_key; unsigned long acquire_ip; struct lockdep_map *instance; struct lockdep_map *nest_lock; u64 waittime_stamp; u64 holdtime_stamp; unsigned short class_idx; unsigned char irq_context; unsigned char trylock; unsigned char read; unsigned char check; unsigned char hardirqs_off; unsigned short references; } ;
537 struct raw_spinlock { arch_spinlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ;
32 typedef struct raw_spinlock raw_spinlock_t;
33 struct __anonstruct____missing_field_name_33 { u8 __padding[24U]; struct lockdep_map dep_map; } ;
33 union __anonunion____missing_field_name_32 { struct raw_spinlock rlock; struct __anonstruct____missing_field_name_33 __annonCompField19; } ;
33 struct spinlock { union __anonunion____missing_field_name_32 __annonCompField20; } ;
76 typedef struct spinlock spinlock_t;
23 struct __anonstruct_rwlock_t_34 { arch_rwlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ;
23 typedef struct __anonstruct_rwlock_t_34 rwlock_t;
12 struct __wait_queue ;
12 typedef struct __wait_queue wait_queue_t;
15 struct __wait_queue { unsigned int flags; void *private; int (*func)(wait_queue_t *, unsigned int, int, void *); struct list_head task_list; } ;
34 struct __wait_queue_head { spinlock_t lock; struct list_head task_list; } ;
39 typedef struct __wait_queue_head wait_queue_head_t;
919 struct seqcount { unsigned int sequence; struct lockdep_map dep_map; } ;
51 typedef struct seqcount seqcount_t;
98 struct __anonstruct_nodemask_t_36 { unsigned long bits[16U]; } ;
98 typedef struct __anonstruct_nodemask_t_36 nodemask_t;
799 struct mutex { atomic_t count; spinlock_t wait_lock; struct list_head wait_list; struct task_struct *owner; const char *name; void *magic; struct lockdep_map dep_map; } ;
67 struct mutex_waiter { struct list_head list; struct task_struct *task; void *magic; } ;
177 struct rw_semaphore ;
178 struct rw_semaphore { long count; raw_spinlock_t wait_lock; struct list_head wait_list; struct lockdep_map dep_map; } ;
155 struct completion { unsigned int done; wait_queue_head_t wait; } ;
1039 struct timespec { __kernel_time_t tv_sec; long tv_nsec; } ;
323 union ktime { s64 tv64; } ;
59 typedef union ktime ktime_t;
388 struct tvec_base ;
389 struct timer_list { struct list_head entry; unsigned long expires; struct tvec_base *base; void (*function)(unsigned long); unsigned long data; int slack; int start_pid; void *start_site; char start_comm[16U]; struct lockdep_map lockdep_map; } ;
254 struct hrtimer ;
255 enum hrtimer_restart ;
266 struct workqueue_struct ;
267 struct work_struct ;
54 struct work_struct { atomic_long_t data; struct list_head entry; void (*func)(struct work_struct *); struct lockdep_map lockdep_map; } ;
107 struct delayed_work { struct work_struct work; struct timer_list timer; struct workqueue_struct *wq; int cpu; } ;
72 struct resource { resource_size_t start; resource_size_t end; const char *name; unsigned long flags; struct resource *parent; struct resource *sibling; struct resource *child; } ;
58 struct pm_message { int event; } ;
64 typedef struct pm_message pm_message_t;
65 struct dev_pm_ops { int (*prepare)(struct device *); void (*complete)(struct device *); int (*suspend)(struct device *); int (*resume)(struct device *); int (*freeze)(struct device *); int (*thaw)(struct device *); int (*poweroff)(struct device *); int (*restore)(struct device *); int (*suspend_late)(struct device *); int (*resume_early)(struct device *); int (*freeze_late)(struct device *); int (*thaw_early)(struct device *); int (*poweroff_late)(struct device *); int (*restore_early)(struct device *); int (*suspend_noirq)(struct device *); int (*resume_noirq)(struct device *); int (*freeze_noirq)(struct device *); int (*thaw_noirq)(struct device *); int (*poweroff_noirq)(struct device *); int (*restore_noirq)(struct device *); int (*runtime_suspend)(struct device *); int (*runtime_resume)(struct device *); int (*runtime_idle)(struct device *); } ;
301 enum rpm_status { RPM_ACTIVE = 0, RPM_RESUMING = 1, RPM_SUSPENDED = 2, RPM_SUSPENDING = 3 } ;
308 enum rpm_request { RPM_REQ_NONE = 0, RPM_REQ_IDLE = 1, RPM_REQ_SUSPEND = 2, RPM_REQ_AUTOSUSPEND = 3, RPM_REQ_RESUME = 4 } ;
316 struct wakeup_source ;
527 struct pm_subsys_data { spinlock_t lock; unsigned int refcount; struct list_head clock_list; } ;
534 struct dev_pm_qos ;
534 struct dev_pm_info { pm_message_t power_state; unsigned char can_wakeup; unsigned char async_suspend; bool is_prepared; bool is_suspended; bool ignore_children; bool early_init; spinlock_t lock; struct list_head entry; struct completion completion; struct wakeup_source *wakeup; bool wakeup_path; bool syscore; struct timer_list suspend_timer; unsigned long timer_expires; struct work_struct work; wait_queue_head_t wait_queue; atomic_t usage_count; atomic_t child_count; unsigned char disable_depth; unsigned char idle_notification; unsigned char request_pending; unsigned char deferred_resume; unsigned char run_wake; unsigned char runtime_auto; unsigned char no_callbacks; unsigned char irq_safe; unsigned char use_autosuspend; unsigned char timer_autosuspends; unsigned char memalloc_noio; enum rpm_request request; enum rpm_status runtime_status; int runtime_error; int autosuspend_delay; unsigned long last_busy; unsigned long active_jiffies; unsigned long suspended_jiffies; unsigned long accounting_timestamp; struct pm_subsys_data *subsys_data; struct dev_pm_qos *qos; } ;
591 struct dev_pm_domain { struct dev_pm_ops ops; } ;
22 struct __anonstruct_mm_context_t_101 { void *ldt; int size; unsigned short ia32_compat; struct mutex lock; void *vdso; } ;
22 typedef struct __anonstruct_mm_context_t_101 mm_context_t;
18 struct rb_node { unsigned long __rb_parent_color; struct rb_node *rb_right; struct rb_node *rb_left; } ;
40 struct rb_root { struct rb_node *rb_node; } ;
87 struct vm_area_struct ;
341 struct device_node ;
1276 struct llist_node ;
64 struct llist_node { struct llist_node *next; } ;
419 struct idr_layer { int prefix; unsigned long bitmap[4U]; struct idr_layer *ary[256U]; int count; int layer; struct callback_head callback_head; } ;
38 struct idr { struct idr_layer *hint; struct idr_layer *top; struct idr_layer *id_free; int layers; int id_free_cnt; int cur; spinlock_t lock; } ;
197 struct ida_bitmap { long nr_busy; unsigned long bitmap[15U]; } ;
213 struct ida { struct idr idr; struct ida_bitmap *free_bitmap; } ;
245 struct dentry ;
246 struct iattr ;
247 struct super_block ;
248 struct file_system_type ;
249 struct kernfs_open_node ;
250 struct kernfs_iattrs ;
266 struct kernfs_root ;
266 struct kernfs_elem_dir { unsigned long subdirs; struct rb_root children; struct kernfs_root *root; } ;
62 struct kernfs_node ;
62 struct kernfs_elem_symlink { struct kernfs_node *target_kn; } ;
66 struct kernfs_ops ;
66 struct kernfs_elem_attr { const struct kernfs_ops *ops; struct kernfs_open_node *open; loff_t size; } ;
72 union __anonunion_u_129 { struct completion *completion; struct kernfs_node *removed_list; } ;
72 union __anonunion____missing_field_name_130 { struct kernfs_elem_dir dir; struct kernfs_elem_symlink symlink; struct kernfs_elem_attr attr; } ;
72 struct kernfs_node { atomic_t count; atomic_t active; struct lockdep_map dep_map; struct kernfs_node *parent; const char *name; struct rb_node rb; union __anonunion_u_129 u; const void *ns; unsigned int hash; union __anonunion____missing_field_name_130 __annonCompField32; void *priv; unsigned short flags; umode_t mode; unsigned int ino; struct kernfs_iattrs *iattr; } ;
114 struct kernfs_dir_ops { int (*mkdir)(struct kernfs_node *, const char *, umode_t ); int (*rmdir)(struct kernfs_node *); int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *); } ;
127 struct kernfs_root { struct kernfs_node *kn; struct ida ino_ida; struct kernfs_dir_ops *dir_ops; } ;
137 struct vm_operations_struct ;
137 struct kernfs_open_file { struct kernfs_node *kn; struct file *file; struct mutex mutex; int event; struct list_head list; bool mmapped; const struct vm_operations_struct *vm_ops; } ;
151 struct kernfs_ops { int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); ssize_t (*read)(struct kernfs_open_file *, char *, size_t , loff_t ); ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *); struct lock_class_key lockdep_key; } ;
376 struct sock ;
377 struct kobject ;
378 enum kobj_ns_type { KOBJ_NS_TYPE_NONE = 0, KOBJ_NS_TYPE_NET = 1, KOBJ_NS_TYPES = 2 } ;
384 struct kobj_ns_type_operations { enum kobj_ns_type type; bool (*current_may_mount)(); void * (*grab_current_ns)(); const void * (*netlink_ns)(struct sock *); const void * (*initial_ns)(); void (*drop_ns)(void *); } ;
83 struct user_namespace ;
22 struct __anonstruct_kuid_t_131 { uid_t val; } ;
22 typedef struct __anonstruct_kuid_t_131 kuid_t;
27 struct __anonstruct_kgid_t_132 { gid_t val; } ;
27 typedef struct __anonstruct_kgid_t_132 kgid_t;
127 struct kstat { u64 ino; dev_t dev; umode_t mode; unsigned int nlink; kuid_t uid; kgid_t gid; dev_t rdev; loff_t size; struct timespec atime; struct timespec mtime; struct timespec ctime; unsigned long blksize; unsigned long long blocks; } ;
36 struct bin_attribute ;
37 struct attribute { const char *name; umode_t mode; bool ignore_lockdep; struct lock_class_key *key; struct lock_class_key skey; } ;
37 struct attribute_group { const char *name; umode_t (*is_visible)(struct kobject *, struct attribute *, int); struct attribute **attrs; struct bin_attribute **bin_attrs; } ;
67 struct bin_attribute { struct attribute attr; size_t size; void *private; ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); int (*mmap)(struct file *, struct kobject *, struct bin_attribute *, struct vm_area_struct *); } ;
130 struct sysfs_ops { ssize_t (*show)(struct kobject *, struct attribute *, char *); ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t ); } ;
462 struct kref { atomic_t refcount; } ;
50 struct kset ;
50 struct kobj_type ;
50 struct kobject { const char *name; struct list_head entry; struct kobject *parent; struct kset *kset; struct kobj_type *ktype; struct kernfs_node *sd; struct kref kref; struct delayed_work release; unsigned char state_initialized; unsigned char state_in_sysfs; unsigned char state_add_uevent_sent; unsigned char state_remove_uevent_sent; unsigned char uevent_suppress; } ;
112 struct kobj_type { void (*release)(struct kobject *); const struct sysfs_ops *sysfs_ops; struct attribute **default_attrs; const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject *); const void * (*namespace)(struct kobject *); } ;
120 struct kobj_uevent_env { char *envp[32U]; int envp_idx; char buf[2048U]; int buflen; } ;
127 struct kset_uevent_ops { const int (*filter)(struct kset *, struct kobject *); const const char * (*name)(struct kset *, struct kobject *); const int (*uevent)(struct kset *, struct kobject *, struct kobj_uevent_env *); } ;
144 struct kset { struct list_head list; spinlock_t list_lock; struct kobject kobj; const struct kset_uevent_ops *uevent_ops; } ;
219 struct klist_node ;
37 struct klist_node { void *n_klist; struct list_head n_node; struct kref n_ref; } ;
67 struct path ;
68 struct inode ;
69 struct seq_file { char *buf; size_t size; size_t from; size_t count; size_t pad_until; loff_t index; loff_t read_pos; u64 version; struct mutex lock; const struct seq_operations *op; int poll_event; struct user_namespace *user_ns; void *private; } ;
35 struct seq_operations { void * (*start)(struct seq_file *, loff_t *); void (*stop)(struct seq_file *, void *); void * (*next)(struct seq_file *, void *, loff_t *); int (*show)(struct seq_file *, void *); } ;
196 struct pinctrl ;
197 struct pinctrl_state ;
194 struct dev_pin_info { struct pinctrl *p; struct pinctrl_state *default_state; struct pinctrl_state *sleep_state; struct pinctrl_state *idle_state; } ;
42 struct dma_map_ops ;
42 struct dev_archdata { struct dma_map_ops *dma_ops; void *iommu; } ;
14 struct device_private ;
15 struct device_driver ;
16 struct driver_private ;
17 struct class ;
18 struct subsys_private ;
19 struct bus_type ;
20 struct iommu_ops ;
21 struct iommu_group ;
60 struct device_attribute ;
60 struct bus_type { const char *name; const char *dev_name; struct device *dev_root; struct device_attribute *dev_attrs; const struct attribute_group **bus_groups; const struct attribute_group **dev_groups; const struct attribute_group **drv_groups; int (*match)(struct device *, struct device_driver *); int (*uevent)(struct device *, struct kobj_uevent_env *); int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*online)(struct device *); int (*offline)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct dev_pm_ops *pm; struct iommu_ops *iommu_ops; struct subsys_private *p; struct lock_class_key lock_key; } ;
138 struct device_type ;
195 struct of_device_id ;
195 struct acpi_device_id ;
195 struct device_driver { const char *name; struct bus_type *bus; struct module *owner; const char *mod_name; bool suppress_bind_attrs; const struct of_device_id *of_match_table; const struct acpi_device_id *acpi_match_table; int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct attribute_group **groups; const struct dev_pm_ops *pm; struct driver_private *p; } ;
321 struct class_attribute ;
321 struct class { const char *name; struct module *owner; struct class_attribute *class_attrs; const struct attribute_group **dev_groups; struct kobject *dev_kobj; int (*dev_uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *); void (*class_release)(struct class *); void (*dev_release)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct kobj_ns_type_operations *ns_type; const void * (*namespace)(struct device *); const struct dev_pm_ops *pm; struct subsys_private *p; } ;
414 struct class_attribute { struct attribute attr; ssize_t (*show)(struct class *, struct class_attribute *, char *); ssize_t (*store)(struct class *, struct class_attribute *, const char *, size_t ); } ;
482 struct device_type { const char *name; const struct attribute_group **groups; int (*uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *, kuid_t *, kgid_t *); void (*release)(struct device *); const struct dev_pm_ops *pm; } ;
510 struct device_attribute { struct attribute attr; ssize_t (*show)(struct device *, struct device_attribute *, char *); ssize_t (*store)(struct device *, struct device_attribute *, const char *, size_t ); } ;
637 struct device_dma_parameters { unsigned int max_segment_size; unsigned long segment_boundary_mask; } ;
646 struct acpi_device ;
647 struct acpi_dev_node { struct acpi_device *companion; } ;
653 struct dma_coherent_mem ;
653 struct device { struct device *parent; struct device_private *p; struct kobject kobj; const char *init_name; const struct device_type *type; struct mutex mutex; struct bus_type *bus; struct device_driver *driver; void *platform_data; struct dev_pm_info power; struct dev_pm_domain *pm_domain; struct dev_pin_info *pins; int numa_node; u64 *dma_mask; u64 coherent_dma_mask; struct device_dma_parameters *dma_parms; struct list_head dma_pools; struct dma_coherent_mem *dma_mem; struct dev_archdata archdata; struct device_node *of_node; struct acpi_dev_node acpi_node; dev_t devt; u32 id; spinlock_t devres_lock; struct list_head devres_head; struct klist_node knode_class; struct class *class; const struct attribute_group **groups; void (*release)(struct device *); struct iommu_group *iommu_group; bool offline_disabled; bool offline; } ;
795 struct wakeup_source { const char *name; struct list_head entry; spinlock_t lock; struct timer_list timer; unsigned long timer_expires; ktime_t total_time; ktime_t max_time; ktime_t last_time; ktime_t start_prevent_time; ktime_t prevent_sleep_time; unsigned long event_count; unsigned long active_count; unsigned long relax_count; unsigned long expire_count; unsigned long wakeup_count; bool active; bool autosleep_enabled; } ;
1196 struct dma_attrs { unsigned long flags[1U]; } ;
45 struct arch_uprobe_task { unsigned long saved_scratch_register; unsigned int saved_trap_nr; unsigned int saved_tf; } ;
54 enum uprobe_task_state { UTASK_RUNNING = 0, UTASK_SSTEP = 1, UTASK_SSTEP_ACK = 2, UTASK_SSTEP_TRAPPED = 3 } ;
61 struct __anonstruct____missing_field_name_135 { struct arch_uprobe_task autask; unsigned long vaddr; } ;
61 struct __anonstruct____missing_field_name_136 { struct callback_head dup_xol_work; unsigned long dup_xol_addr; } ;
61 union __anonunion____missing_field_name_134 { struct __anonstruct____missing_field_name_135 __annonCompField34; struct __anonstruct____missing_field_name_136 __annonCompField35; } ;
61 struct uprobe ;
61 struct return_instance ;
61 struct uprobe_task { enum uprobe_task_state state; union __anonunion____missing_field_name_134 __annonCompField36; struct uprobe *active_uprobe; unsigned long xol_vaddr; struct return_instance *return_instances; unsigned int depth; } ;
93 struct xol_area ;
94 struct uprobes_state { struct xol_area *xol_area; } ;
129 struct address_space ;
130 union __anonunion____missing_field_name_137 { struct address_space *mapping; void *s_mem; } ;
130 union __anonunion____missing_field_name_139 { unsigned long index; void *freelist; bool pfmemalloc; } ;
130 struct __anonstruct____missing_field_name_143 { unsigned short inuse; unsigned short objects; unsigned char frozen; } ;
130 union __anonunion____missing_field_name_142 { atomic_t _mapcount; struct __anonstruct____missing_field_name_143 __annonCompField39; int units; } ;
130 struct __anonstruct____missing_field_name_141 { union __anonunion____missing_field_name_142 __annonCompField40; atomic_t _count; } ;
130 union __anonunion____missing_field_name_140 { unsigned long counters; struct __anonstruct____missing_field_name_141 __annonCompField41; unsigned int active; } ;
130 struct __anonstruct____missing_field_name_138 { union __anonunion____missing_field_name_139 __annonCompField38; union __anonunion____missing_field_name_140 __annonCompField42; } ;
130 struct __anonstruct____missing_field_name_145 { struct page *next; int pages; int pobjects; } ;
130 struct slab ;
130 union __anonunion____missing_field_name_144 { struct list_head lru; struct __anonstruct____missing_field_name_145 __annonCompField44; struct list_head list; struct slab *slab_page; struct callback_head callback_head; pgtable_t pmd_huge_pte; } ;
130 union __anonunion____missing_field_name_146 { unsigned long private; spinlock_t *ptl; struct kmem_cache *slab_cache; struct page *first_page; } ;
130 struct page { unsigned long flags; union __anonunion____missing_field_name_137 __annonCompField37; struct __anonstruct____missing_field_name_138 __annonCompField43; union __anonunion____missing_field_name_144 __annonCompField45; union __anonunion____missing_field_name_146 __annonCompField46; unsigned long debug_flags; } ;
186 struct page_frag { struct page *page; __u32 offset; __u32 size; } ;
238 struct __anonstruct_linear_148 { struct rb_node rb; unsigned long rb_subtree_last; } ;
238 union __anonunion_shared_147 { struct __anonstruct_linear_148 linear; struct list_head nonlinear; } ;
238 struct anon_vma ;
238 struct mempolicy ;
238 struct vm_area_struct { unsigned long vm_start; unsigned long vm_end; struct vm_area_struct *vm_next; struct vm_area_struct *vm_prev; struct rb_node vm_rb; unsigned long rb_subtree_gap; struct mm_struct *vm_mm; pgprot_t vm_page_prot; unsigned long vm_flags; union __anonunion_shared_147 shared; struct list_head anon_vma_chain; struct anon_vma *anon_vma; const struct vm_operations_struct *vm_ops; unsigned long vm_pgoff; struct file *vm_file; void *vm_private_data; struct mempolicy *vm_policy; } ;
310 struct core_thread { struct task_struct *task; struct core_thread *next; } ;
316 struct core_state { atomic_t nr_threads; struct core_thread dumper; struct completion startup; } ;
329 struct task_rss_stat { int events; int count[3U]; } ;
337 struct mm_rss_stat { atomic_long_t count[3U]; } ;
342 struct kioctx_table ;
343 struct linux_binfmt ;
343 struct mmu_notifier_mm ;
343 struct mm_struct { struct vm_area_struct *mmap; struct rb_root mm_rb; struct vm_area_struct *mmap_cache; unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); unsigned long mmap_base; unsigned long mmap_legacy_base; unsigned long task_size; unsigned long highest_vm_end; pgd_t *pgd; atomic_t mm_users; atomic_t mm_count; atomic_long_t nr_ptes; int map_count; spinlock_t page_table_lock; struct rw_semaphore mmap_sem; struct list_head mmlist; unsigned long hiwater_rss; unsigned long hiwater_vm; unsigned long total_vm; unsigned long locked_vm; unsigned long pinned_vm; unsigned long shared_vm; unsigned long exec_vm; unsigned long stack_vm; unsigned long def_flags; unsigned long start_code; unsigned long end_code; unsigned long start_data; unsigned long end_data; unsigned long start_brk; unsigned long brk; unsigned long start_stack; unsigned long arg_start; unsigned long arg_end; unsigned long env_start; unsigned long env_end; unsigned long saved_auxv[46U]; struct mm_rss_stat rss_stat; struct linux_binfmt *binfmt; cpumask_var_t cpu_vm_mask_var; mm_context_t context; unsigned long flags; struct core_state *core_state; spinlock_t ioctx_lock; struct kioctx_table *ioctx_table; struct task_struct *owner; struct file *exe_file; struct mmu_notifier_mm *mmu_notifier_mm; struct cpumask cpumask_allocation; unsigned long numa_next_scan; unsigned long numa_scan_offset; int numa_scan_seq; bool tlb_flush_pending; struct uprobes_state uprobes_state; } ;
93 struct shrink_control { gfp_t gfp_mask; unsigned long nr_to_scan; nodemask_t nodes_to_scan; int nid; } ;
26 struct shrinker { unsigned long int (*count_objects)(struct shrinker *, struct shrink_control *); unsigned long int (*scan_objects)(struct shrinker *, struct shrink_control *); int seeks; long batch; unsigned long flags; struct list_head list; atomic_long_t *nr_deferred; } ;
71 struct file_ra_state ;
72 struct user_struct ;
73 struct writeback_control ;
185 struct vm_fault { unsigned int flags; unsigned long pgoff; void *virtual_address; struct page *page; } ;
210 struct vm_operations_struct { void (*open)(struct vm_area_struct *); void (*close)(struct vm_area_struct *); int (*fault)(struct vm_area_struct *, struct vm_fault *); int (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *); int (*access)(struct vm_area_struct *, unsigned long, void *, int, int); int (*set_policy)(struct vm_area_struct *, struct mempolicy *); struct mempolicy * (*get_policy)(struct vm_area_struct *, unsigned long); int (*migrate)(struct vm_area_struct *, const nodemask_t *, const nodemask_t *, unsigned long); int (*remap_pages)(struct vm_area_struct *, unsigned long, unsigned long, unsigned long); } ;
2071 struct scatterlist { unsigned long sg_magic; unsigned long page_link; unsigned int offset; unsigned int length; dma_addr_t dma_address; unsigned int dma_length; } ;
17 struct sg_table { struct scatterlist *sgl; unsigned int nents; unsigned int orig_nents; } ;
351 struct dma_map_ops { void * (*alloc)(struct device *, size_t , dma_addr_t *, gfp_t , struct dma_attrs *); void (*free)(struct device *, size_t , void *, dma_addr_t , struct dma_attrs *); int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t , size_t , struct dma_attrs *); int (*get_sgtable)(struct device *, struct sg_table *, void *, dma_addr_t , size_t , struct dma_attrs *); dma_addr_t (*map_page)(struct device *, struct page *, unsigned long, size_t , enum dma_data_direction , struct dma_attrs *); void (*unmap_page)(struct device *, dma_addr_t , size_t , enum dma_data_direction , struct dma_attrs *); int (*map_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , struct dma_attrs *); void (*unmap_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , struct dma_attrs *); void (*sync_single_for_cpu)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_single_for_device)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_sg_for_cpu)(struct device *, struct scatterlist *, int, enum dma_data_direction ); void (*sync_sg_for_device)(struct device *, struct scatterlist *, int, enum dma_data_direction ); int (*mapping_error)(struct device *, dma_addr_t ); int (*dma_supported)(struct device *, u64 ); int (*set_dma_mask)(struct device *, u64 ); int is_phys; } ;
13 typedef unsigned long kernel_ulong_t;
186 struct acpi_device_id { __u8 id[9U]; kernel_ulong_t driver_data; } ;
219 struct of_device_id { char name[32U]; char type[32U]; char compatible[128U]; const void *data; } ;
69 enum irqreturn { IRQ_NONE = 0, IRQ_HANDLED = 1, IRQ_WAKE_THREAD = 2 } ;
16 typedef enum irqreturn irqreturn_t;
17 struct dma_pool ;
58 struct mem_cgroup ;
355 struct kmem_cache_cpu { void **freelist; unsigned long tid; struct page *page; struct page *partial; unsigned int stat[26U]; } ;
48 struct kmem_cache_order_objects { unsigned long x; } ;
58 struct memcg_cache_params ;
58 struct kmem_cache_node ;
58 struct kmem_cache { struct kmem_cache_cpu *cpu_slab; unsigned long flags; unsigned long min_partial; int size; int object_size; int offset; int cpu_partial; struct kmem_cache_order_objects oo; struct kmem_cache_order_objects max; struct kmem_cache_order_objects min; gfp_t allocflags; int refcount; void (*ctor)(void *); int inuse; int align; int reserved; const char *name; struct list_head list; struct kobject kobj; struct memcg_cache_params *memcg_params; int max_attr_size; int remote_node_defrag_ratio; struct kmem_cache_node *node[1024U]; } ;
497 struct __anonstruct____missing_field_name_154 { struct callback_head callback_head; struct kmem_cache *memcg_caches[0U]; } ;
497 struct __anonstruct____missing_field_name_155 { struct mem_cgroup *memcg; struct list_head list; struct kmem_cache *root_cache; bool dead; atomic_t nr_pages; struct work_struct destroy; } ;
497 union __anonunion____missing_field_name_153 { struct __anonstruct____missing_field_name_154 __annonCompField48; struct __anonstruct____missing_field_name_155 __annonCompField49; } ;
497 struct memcg_cache_params { bool is_root_cache; union __anonunion____missing_field_name_153 __annonCompField50; } ;
1809 struct umc_dev { u16 version; u8 cap_id; u8 bar; struct resource resource; unsigned int irq; struct device dev; } ;
253 struct usb_device_descriptor { __u8 bLength; __u8 bDescriptorType; __le16 bcdUSB; __u8 bDeviceClass; __u8 bDeviceSubClass; __u8 bDeviceProtocol; __u8 bMaxPacketSize0; __le16 idVendor; __le16 idProduct; __le16 bcdDevice; __u8 iManufacturer; __u8 iProduct; __u8 iSerialNumber; __u8 bNumConfigurations; } ;
275 struct usb_config_descriptor { __u8 bLength; __u8 bDescriptorType; __le16 wTotalLength; __u8 bNumInterfaces; __u8 bConfigurationValue; __u8 iConfiguration; __u8 bmAttributes; __u8 bMaxPower; } ;
343 struct usb_interface_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bInterfaceNumber; __u8 bAlternateSetting; __u8 bNumEndpoints; __u8 bInterfaceClass; __u8 bInterfaceSubClass; __u8 bInterfaceProtocol; __u8 iInterface; } ;
363 struct usb_endpoint_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bEndpointAddress; __u8 bmAttributes; __le16 wMaxPacketSize; __u8 bInterval; __u8 bRefresh; __u8 bSynchAddress; } ;
613 struct usb_ss_ep_comp_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bMaxBurst; __u8 bmAttributes; __le16 wBytesPerInterval; } ;
692 struct usb_interface_assoc_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bFirstInterface; __u8 bInterfaceCount; __u8 bFunctionClass; __u8 bFunctionSubClass; __u8 bFunctionProtocol; __u8 iFunction; } ;
721 struct usb_key_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 tTKID[3U]; __u8 bReserved; __u8 bKeyData[0U]; } ;
735 struct usb_encryption_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bEncryptionType; __u8 bEncryptionValue; __u8 bAuthKeyIndex; } ;
751 struct usb_bos_descriptor { __u8 bLength; __u8 bDescriptorType; __le16 wTotalLength; __u8 bNumDeviceCaps; } ;
773 struct usb_wireless_cap_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDevCapabilityType; __u8 bmAttributes; __le16 wPHYRates; __u8 bmTFITXPowerInfo; __u8 bmFFITXPowerInfo; __le16 bmBandGroup; __u8 bReserved; } ;
801 struct usb_ext_cap_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDevCapabilityType; __le32 bmAttributes; } ;
811 struct usb_ss_cap_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDevCapabilityType; __u8 bmAttributes; __le16 wSpeedSupported; __u8 bFunctionalitySupport; __u8 bU1devExitLat; __le16 bU2DevExitLat; } ;
840 struct usb_ss_container_id_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDevCapabilityType; __u8 bReserved; __u8 ContainerID[16U]; } ;
905 enum usb_device_speed { USB_SPEED_UNKNOWN = 0, USB_SPEED_LOW = 1, USB_SPEED_FULL = 2, USB_SPEED_HIGH = 3, USB_SPEED_WIRELESS = 4, USB_SPEED_SUPER = 5 } ;
914 enum usb_device_state { USB_STATE_NOTATTACHED = 0, USB_STATE_ATTACHED = 1, USB_STATE_POWERED = 2, USB_STATE_RECONNECTING = 3, USB_STATE_UNAUTHENTICATED = 4, USB_STATE_DEFAULT = 5, USB_STATE_ADDRESS = 6, USB_STATE_CONFIGURED = 7, USB_STATE_SUSPENDED = 8 } ;
926 enum usb3_link_state { USB3_LPM_U0 = 0, USB3_LPM_U1 = 1, USB3_LPM_U2 = 2, USB3_LPM_U3 = 3 } ;
60 struct timerqueue_node { struct rb_node node; ktime_t expires; } ;
12 struct timerqueue_head { struct rb_root head; struct timerqueue_node *next; } ;
50 struct hrtimer_clock_base ;
51 struct hrtimer_cpu_base ;
60 enum hrtimer_restart { HRTIMER_NORESTART = 0, HRTIMER_RESTART = 1 } ;
65 struct hrtimer { struct timerqueue_node node; ktime_t _softexpires; enum hrtimer_restart (*function)(struct hrtimer *); struct hrtimer_clock_base *base; unsigned long state; int start_pid; void *start_site; char start_comm[16U]; } ;
132 struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base; int index; clockid_t clockid; struct timerqueue_head active; ktime_t resolution; ktime_t (*get_time)(); ktime_t softirq_time; ktime_t offset; } ;
163 struct hrtimer_cpu_base { raw_spinlock_t lock; unsigned int active_bases; unsigned int clock_was_set; ktime_t expires_next; int hres_active; int hang_detected; unsigned long nr_events; unsigned long nr_retries; unsigned long nr_hangs; ktime_t max_hang_time; struct hrtimer_clock_base clock_base[4U]; } ;
405 struct tasklet_struct { struct tasklet_struct *next; unsigned long state; atomic_t count; void (*func)(unsigned long); unsigned long data; } ;
460 struct hlist_bl_node ;
460 struct hlist_bl_head { struct hlist_bl_node *first; } ;
36 struct hlist_bl_node { struct hlist_bl_node *next; struct hlist_bl_node **pprev; } ;
114 struct __anonstruct____missing_field_name_159 { spinlock_t lock; unsigned int count; } ;
114 union __anonunion____missing_field_name_158 { struct __anonstruct____missing_field_name_159 __annonCompField52; } ;
114 struct lockref { union __anonunion____missing_field_name_158 __annonCompField53; } ;
49 struct nameidata ;
50 struct vfsmount ;
51 struct __anonstruct____missing_field_name_161 { u32 hash; u32 len; } ;
51 union __anonunion____missing_field_name_160 { struct __anonstruct____missing_field_name_161 __annonCompField54; u64 hash_len; } ;
51 struct qstr { union __anonunion____missing_field_name_160 __annonCompField55; const unsigned char *name; } ;
90 struct dentry_operations ;
90 union __anonunion_d_u_162 { struct list_head d_child; struct callback_head d_rcu; } ;
90 struct dentry { unsigned int d_flags; seqcount_t d_seq; struct hlist_bl_node d_hash; struct dentry *d_parent; struct qstr d_name; struct inode *d_inode; unsigned char d_iname[32U]; struct lockref d_lockref; const struct dentry_operations *d_op; struct super_block *d_sb; unsigned long d_time; void *d_fsdata; struct list_head d_lru; union __anonunion_d_u_162 d_u; struct list_head d_subdirs; struct hlist_node d_alias; } ;
142 struct dentry_operations { int (*d_revalidate)(struct dentry *, unsigned int); int (*d_weak_revalidate)(struct dentry *, unsigned int); int (*d_hash)(const struct dentry *, struct qstr *); int (*d_compare)(const struct dentry *, const struct dentry *, unsigned int, const char *, const struct qstr *); int (*d_delete)(const struct dentry *); void (*d_release)(struct dentry *); void (*d_prune)(struct dentry *); void (*d_iput)(struct dentry *, struct inode *); char * (*d_dname)(struct dentry *, char *, int); struct vfsmount * (*d_automount)(struct path *); int (*d_manage)(struct dentry *, bool ); } ;
469 struct path { struct vfsmount *mnt; struct dentry *dentry; } ;
26 struct list_lru_node { spinlock_t lock; struct list_head list; long nr_items; } ;
28 struct list_lru { struct list_lru_node *node; nodemask_t active_nodes; } ;
58 struct radix_tree_node ;
58 struct radix_tree_root { unsigned int height; gfp_t gfp_mask; struct radix_tree_node *rnode; } ;
381 enum pid_type { PIDTYPE_PID = 0, PIDTYPE_PGID = 1, PIDTYPE_SID = 2, PIDTYPE_MAX = 3 } ;
388 struct pid_namespace ;
388 struct upid { int nr; struct pid_namespace *ns; struct hlist_node pid_chain; } ;
56 struct pid { atomic_t count; unsigned int level; struct hlist_head tasks[3U]; struct callback_head rcu; struct upid numbers[1U]; } ;
68 struct pid_link { struct hlist_node node; struct pid *pid; } ;
22 struct kernel_cap_struct { __u32 cap[2U]; } ;
25 typedef struct kernel_cap_struct kernel_cap_t;
45 struct fiemap_extent { __u64 fe_logical; __u64 fe_physical; __u64 fe_length; __u64 fe_reserved64[2U]; __u32 fe_flags; __u32 fe_reserved[3U]; } ;
38 enum migrate_mode { MIGRATE_ASYNC = 0, MIGRATE_SYNC_LIGHT = 1, MIGRATE_SYNC = 2 } ;
30 struct block_device ;
31 struct io_context ;
56 struct export_operations ;
58 struct iovec ;
59 struct kiocb ;
60 struct pipe_inode_info ;
61 struct poll_table_struct ;
62 struct kstatfs ;
63 struct cred ;
64 struct swap_info_struct ;
68 struct iattr { unsigned int ia_valid; umode_t ia_mode; kuid_t ia_uid; kgid_t ia_gid; loff_t ia_size; struct timespec ia_atime; struct timespec ia_mtime; struct timespec ia_ctime; struct file *ia_file; } ;
246 struct percpu_counter { raw_spinlock_t lock; s64 count; struct list_head list; s32 *counters; } ;
176 struct fs_disk_quota { __s8 d_version; __s8 d_flags; __u16 d_fieldmask; __u32 d_id; __u64 d_blk_hardlimit; __u64 d_blk_softlimit; __u64 d_ino_hardlimit; __u64 d_ino_softlimit; __u64 d_bcount; __u64 d_icount; __s32 d_itimer; __s32 d_btimer; __u16 d_iwarns; __u16 d_bwarns; __s32 d_padding2; __u64 d_rtb_hardlimit; __u64 d_rtb_softlimit; __u64 d_rtbcount; __s32 d_rtbtimer; __u16 d_rtbwarns; __s16 d_padding3; char d_padding4[8U]; } ;
76 struct fs_qfilestat { __u64 qfs_ino; __u64 qfs_nblks; __u32 qfs_nextents; } ;
151 typedef struct fs_qfilestat fs_qfilestat_t;
152 struct fs_quota_stat { __s8 qs_version; __u16 qs_flags; __s8 qs_pad; fs_qfilestat_t qs_uquota; fs_qfilestat_t qs_gquota; __u32 qs_incoredqs; __s32 qs_btimelimit; __s32 qs_itimelimit; __s32 qs_rtbtimelimit; __u16 qs_bwarnlimit; __u16 qs_iwarnlimit; } ;
166 struct fs_qfilestatv { __u64 qfs_ino; __u64 qfs_nblks; __u32 qfs_nextents; __u32 qfs_pad; } ;
196 struct fs_quota_statv { __s8 qs_version; __u8 qs_pad1; __u16 qs_flags; __u32 qs_incoredqs; struct fs_qfilestatv qs_uquota; struct fs_qfilestatv qs_gquota; struct fs_qfilestatv qs_pquota; __s32 qs_btimelimit; __s32 qs_itimelimit; __s32 qs_rtbtimelimit; __u16 qs_bwarnlimit; __u16 qs_iwarnlimit; __u64 qs_pad2[8U]; } ;
212 struct dquot ;
19 typedef __kernel_uid32_t projid_t;
23 struct __anonstruct_kprojid_t_164 { projid_t val; } ;
23 typedef struct __anonstruct_kprojid_t_164 kprojid_t;
119 struct if_dqinfo { __u64 dqi_bgrace; __u64 dqi_igrace; __u32 dqi_flags; __u32 dqi_valid; } ;
152 enum quota_type { USRQUOTA = 0, GRPQUOTA = 1, PRJQUOTA = 2 } ;
60 typedef long long qsize_t;
61 union __anonunion____missing_field_name_165 { kuid_t uid; kgid_t gid; kprojid_t projid; } ;
61 struct kqid { union __anonunion____missing_field_name_165 __annonCompField56; enum quota_type type; } ;
178 struct mem_dqblk { qsize_t dqb_bhardlimit; qsize_t dqb_bsoftlimit; qsize_t dqb_curspace; qsize_t dqb_rsvspace; qsize_t dqb_ihardlimit; qsize_t dqb_isoftlimit; qsize_t dqb_curinodes; time_t dqb_btime; time_t dqb_itime; } ;
200 struct quota_format_type ;
201 struct mem_dqinfo { struct quota_format_type *dqi_format; int dqi_fmt_id; struct list_head dqi_dirty_list; unsigned long dqi_flags; unsigned int dqi_bgrace; unsigned int dqi_igrace; qsize_t dqi_maxblimit; qsize_t dqi_maxilimit; void *dqi_priv; } ;
264 struct dquot { struct hlist_node dq_hash; struct list_head dq_inuse; struct list_head dq_free; struct list_head dq_dirty; struct mutex dq_lock; atomic_t dq_count; wait_queue_head_t dq_wait_unused; struct super_block *dq_sb; struct kqid dq_id; loff_t dq_off; unsigned long dq_flags; struct mem_dqblk dq_dqb; } ;
291 struct quota_format_ops { int (*check_quota_file)(struct super_block *, int); int (*read_file_info)(struct super_block *, int); int (*write_file_info)(struct super_block *, int); int (*free_file_info)(struct super_block *, int); int (*read_dqblk)(struct dquot *); int (*commit_dqblk)(struct dquot *); int (*release_dqblk)(struct dquot *); } ;
302 struct dquot_operations { int (*write_dquot)(struct dquot *); struct dquot * (*alloc_dquot)(struct super_block *, int); void (*destroy_dquot)(struct dquot *); int (*acquire_dquot)(struct dquot *); int (*release_dquot)(struct dquot *); int (*mark_dirty)(struct dquot *); int (*write_info)(struct super_block *, int); qsize_t * (*get_reserved_space)(struct inode *); } ;
316 struct quotactl_ops { int (*quota_on)(struct super_block *, int, int, struct path *); int (*quota_on_meta)(struct super_block *, int, int); int (*quota_off)(struct super_block *, int); int (*quota_sync)(struct super_block *, int); int (*get_info)(struct super_block *, int, struct if_dqinfo *); int (*set_info)(struct super_block *, int, struct if_dqinfo *); int (*get_dqblk)(struct super_block *, struct kqid , struct fs_disk_quota *); int (*set_dqblk)(struct super_block *, struct kqid , struct fs_disk_quota *); int (*get_xstate)(struct super_block *, struct fs_quota_stat *); int (*set_xstate)(struct super_block *, unsigned int, int); int (*get_xstatev)(struct super_block *, struct fs_quota_statv *); } ;
333 struct quota_format_type { int qf_fmt_id; const struct quota_format_ops *qf_ops; struct module *qf_owner; struct quota_format_type *qf_next; } ;
379 struct quota_info { unsigned int flags; struct mutex dqio_mutex; struct mutex dqonoff_mutex; struct rw_semaphore dqptr_sem; struct inode *files[2U]; struct mem_dqinfo info[2U]; const struct quota_format_ops *ops[2U]; } ;
345 union __anonunion_arg_167 { char *buf; void *data; } ;
345 struct __anonstruct_read_descriptor_t_166 { size_t written; size_t count; union __anonunion_arg_167 arg; int error; } ;
345 typedef struct __anonstruct_read_descriptor_t_166 read_descriptor_t;
348 struct address_space_operations { int (*writepage)(struct page *, struct writeback_control *); int (*readpage)(struct file *, struct page *); int (*writepages)(struct address_space *, struct writeback_control *); int (*set_page_dirty)(struct page *); int (*readpages)(struct file *, struct address_space *, struct list_head *, unsigned int); int (*write_begin)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page **, void **); int (*write_end)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page *, void *); sector_t (*bmap)(struct address_space *, sector_t ); void (*invalidatepage)(struct page *, unsigned int, unsigned int); int (*releasepage)(struct page *, gfp_t ); void (*freepage)(struct page *); ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *, loff_t , unsigned long); int (*get_xip_mem)(struct address_space *, unsigned long, int, void **, unsigned long *); int (*migratepage)(struct address_space *, struct page *, struct page *, enum migrate_mode ); int (*launder_page)(struct page *); int (*is_partially_uptodate)(struct page *, read_descriptor_t *, unsigned long); void (*is_dirty_writeback)(struct page *, bool *, bool *); int (*error_remove_page)(struct address_space *, struct page *); int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *); void (*swap_deactivate)(struct file *); } ;
408 struct backing_dev_info ;
409 struct address_space { struct inode *host; struct radix_tree_root page_tree; spinlock_t tree_lock; unsigned int i_mmap_writable; struct rb_root i_mmap; struct list_head i_mmap_nonlinear; struct mutex i_mmap_mutex; unsigned long nrpages; unsigned long writeback_index; const struct address_space_operations *a_ops; unsigned long flags; struct backing_dev_info *backing_dev_info; spinlock_t private_lock; struct list_head private_list; void *private_data; } ;
430 struct request_queue ;
431 struct hd_struct ;
431 struct gendisk ;
431 struct block_device { dev_t bd_dev; int bd_openers; struct inode *bd_inode; struct super_block *bd_super; struct mutex bd_mutex; struct list_head bd_inodes; void *bd_claiming; void *bd_holder; int bd_holders; bool bd_write_holder; struct list_head bd_holder_disks; struct block_device *bd_contains; unsigned int bd_block_size; struct hd_struct *bd_part; unsigned int bd_part_count; int bd_invalidated; struct gendisk *bd_disk; struct request_queue *bd_queue; struct list_head bd_list; unsigned long bd_private; int bd_fsfreeze_count; struct mutex bd_fsfreeze_mutex; } ;
503 struct posix_acl ;
504 struct inode_operations ;
504 union __anonunion____missing_field_name_168 { const unsigned int i_nlink; unsigned int __i_nlink; } ;
504 union __anonunion____missing_field_name_169 { struct hlist_head i_dentry; struct callback_head i_rcu; } ;
504 struct file_lock ;
504 struct cdev ;
504 union __anonunion____missing_field_name_170 { struct pipe_inode_info *i_pipe; struct block_device *i_bdev; struct cdev *i_cdev; } ;
504 struct inode { umode_t i_mode; unsigned short i_opflags; kuid_t i_uid; kgid_t i_gid; unsigned int i_flags; struct posix_acl *i_acl; struct posix_acl *i_default_acl; const struct inode_operations *i_op; struct super_block *i_sb; struct address_space *i_mapping; void *i_security; unsigned long i_ino; union __anonunion____missing_field_name_168 __annonCompField57; dev_t i_rdev; loff_t i_size; struct timespec i_atime; struct timespec i_mtime; struct timespec i_ctime; spinlock_t i_lock; unsigned short i_bytes; unsigned int i_blkbits; blkcnt_t i_blocks; unsigned long i_state; struct mutex i_mutex; unsigned long dirtied_when; struct hlist_node i_hash; struct list_head i_wb_list; struct list_head i_lru; struct list_head i_sb_list; union __anonunion____missing_field_name_169 __annonCompField58; u64 i_version; atomic_t i_count; atomic_t i_dio_count; atomic_t i_writecount; const struct file_operations *i_fop; struct file_lock *i_flock; struct address_space i_data; struct dquot *i_dquot[2U]; struct list_head i_devices; union __anonunion____missing_field_name_170 __annonCompField59; __u32 i_generation; __u32 i_fsnotify_mask; struct hlist_head i_fsnotify_marks; atomic_t i_readcount; void *i_private; } ;
740 struct fown_struct { rwlock_t lock; struct pid *pid; enum pid_type pid_type; kuid_t uid; kuid_t euid; int signum; } ;
748 struct file_ra_state { unsigned long start; unsigned int size; unsigned int async_size; unsigned int ra_pages; unsigned int mmap_miss; loff_t prev_pos; } ;
771 union __anonunion_f_u_171 { struct llist_node fu_llist; struct callback_head fu_rcuhead; } ;
771 struct file { union __anonunion_f_u_171 f_u; struct path f_path; struct inode *f_inode; const struct file_operations *f_op; spinlock_t f_lock; atomic_long_t f_count; unsigned int f_flags; fmode_t f_mode; struct mutex f_pos_lock; loff_t f_pos; struct fown_struct f_owner; const struct cred *f_cred; struct file_ra_state f_ra; u64 f_version; void *f_security; void *private_data; struct list_head f_ep_links; struct list_head f_tfile_llink; struct address_space *f_mapping; unsigned long f_mnt_write_state; } ;
909 struct files_struct ;
909 typedef struct files_struct *fl_owner_t;
910 struct file_lock_operations { void (*fl_copy_lock)(struct file_lock *, struct file_lock *); void (*fl_release_private)(struct file_lock *); } ;
915 struct lock_manager_operations { int (*lm_compare_owner)(struct file_lock *, struct file_lock *); unsigned long int (*lm_owner_key)(struct file_lock *); void (*lm_notify)(struct file_lock *); int (*lm_grant)(struct file_lock *, struct file_lock *, int); void (*lm_break)(struct file_lock *); int (*lm_change)(struct file_lock **, int); } ;
933 struct nlm_lockowner ;
934 struct nfs_lock_info { u32 state; struct nlm_lockowner *owner; struct list_head list; } ;
14 struct nfs4_lock_state ;
15 struct nfs4_lock_info { struct nfs4_lock_state *owner; } ;
19 struct fasync_struct ;
19 struct __anonstruct_afs_173 { struct list_head link; int state; } ;
19 union __anonunion_fl_u_172 { struct nfs_lock_info nfs_fl; struct nfs4_lock_info nfs4_fl; struct __anonstruct_afs_173 afs; } ;
19 struct file_lock { struct file_lock *fl_next; struct hlist_node fl_link; struct list_head fl_block; fl_owner_t fl_owner; unsigned int fl_flags; unsigned char fl_type; unsigned int fl_pid; int fl_link_cpu; struct pid *fl_nspid; wait_queue_head_t fl_wait; struct file *fl_file; loff_t fl_start; loff_t fl_end; struct fasync_struct *fl_fasync; unsigned long fl_break_time; unsigned long fl_downgrade_time; const struct file_lock_operations *fl_ops; const struct lock_manager_operations *fl_lmops; union __anonunion_fl_u_172 fl_u; } ;
1036 struct fasync_struct { spinlock_t fa_lock; int magic; int fa_fd; struct fasync_struct *fa_next; struct file *fa_file; struct callback_head fa_rcu; } ;
1228 struct sb_writers { struct percpu_counter counter[3U]; wait_queue_head_t wait; int frozen; wait_queue_head_t wait_unfrozen; struct lockdep_map lock_map[3U]; } ;
1244 struct super_operations ;
1244 struct xattr_handler ;
1244 struct mtd_info ;
1244 struct super_block { struct list_head s_list; dev_t s_dev; unsigned char s_blocksize_bits; unsigned long s_blocksize; loff_t s_maxbytes; struct file_system_type *s_type; const struct super_operations *s_op; const struct dquot_operations *dq_op; const struct quotactl_ops *s_qcop; const struct export_operations *s_export_op; unsigned long s_flags; unsigned long s_magic; struct dentry *s_root; struct rw_semaphore s_umount; int s_count; atomic_t s_active; void *s_security; const struct xattr_handler **s_xattr; struct list_head s_inodes; struct hlist_bl_head s_anon; struct list_head s_mounts; struct block_device *s_bdev; struct backing_dev_info *s_bdi; struct mtd_info *s_mtd; struct hlist_node s_instances; struct quota_info s_dquot; struct sb_writers s_writers; char s_id[32U]; u8 s_uuid[16U]; void *s_fs_info; unsigned int s_max_links; fmode_t s_mode; u32 s_time_gran; struct mutex s_vfs_rename_mutex; char *s_subtype; char *s_options; const struct dentry_operations *s_d_op; int cleancache_poolid; struct shrinker s_shrink; atomic_long_t s_remove_count; int s_readonly_remount; struct workqueue_struct *s_dio_done_wq; struct list_lru s_dentry_lru; struct list_lru s_inode_lru; struct callback_head rcu; } ;
1474 struct fiemap_extent_info { unsigned int fi_flags; unsigned int fi_extents_mapped; unsigned int fi_extents_max; struct fiemap_extent *fi_extents_start; } ;
1512 struct dir_context { int (*actor)(void *, const char *, int, loff_t , u64 , unsigned int); loff_t pos; } ;
1517 struct file_operations { struct module *owner; loff_t (*llseek)(struct file *, loff_t , int); ssize_t (*read)(struct file *, char *, size_t , loff_t *); ssize_t (*write)(struct file *, const char *, size_t , loff_t *); ssize_t (*aio_read)(struct kiocb *, const struct iovec *, unsigned long, loff_t ); ssize_t (*aio_write)(struct kiocb *, const struct iovec *, unsigned long, loff_t ); int (*iterate)(struct file *, struct dir_context *); unsigned int (*poll)(struct file *, struct poll_table_struct *); long int (*unlocked_ioctl)(struct file *, unsigned int, unsigned long); long int (*compat_ioctl)(struct file *, unsigned int, unsigned long); int (*mmap)(struct file *, struct vm_area_struct *); int (*open)(struct inode *, struct file *); int (*flush)(struct file *, fl_owner_t ); int (*release)(struct inode *, struct file *); int (*fsync)(struct file *, loff_t , loff_t , int); int (*aio_fsync)(struct kiocb *, int); int (*fasync)(int, struct file *, int); int (*lock)(struct file *, int, struct file_lock *); ssize_t (*sendpage)(struct file *, struct page *, int, size_t , loff_t *, int); unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); int (*check_flags)(int); int (*flock)(struct file *, int, struct file_lock *); ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t , unsigned int); ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t , unsigned int); int (*setlease)(struct file *, long, struct file_lock **); long int (*fallocate)(struct file *, int, loff_t , loff_t ); int (*show_fdinfo)(struct seq_file *, struct file *); } ;
1555 struct inode_operations { struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int); void * (*follow_link)(struct dentry *, struct nameidata *); int (*permission)(struct inode *, int); struct posix_acl * (*get_acl)(struct inode *, int); int (*readlink)(struct dentry *, char *, int); void (*put_link)(struct dentry *, struct nameidata *, void *); int (*create)(struct inode *, struct dentry *, umode_t , bool ); int (*link)(struct dentry *, struct inode *, struct dentry *); int (*unlink)(struct inode *, struct dentry *); int (*symlink)(struct inode *, struct dentry *, const char *); int (*mkdir)(struct inode *, struct dentry *, umode_t ); int (*rmdir)(struct inode *, struct dentry *); int (*mknod)(struct inode *, struct dentry *, umode_t , dev_t ); int (*rename)(struct inode *, struct dentry *, struct inode *, struct dentry *); int (*setattr)(struct dentry *, struct iattr *); int (*getattr)(struct vfsmount *, struct dentry *, struct kstat *); int (*setxattr)(struct dentry *, const char *, const void *, size_t , int); ssize_t (*getxattr)(struct dentry *, const char *, void *, size_t ); ssize_t (*listxattr)(struct dentry *, char *, size_t ); int (*removexattr)(struct dentry *, const char *); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 , u64 ); int (*update_time)(struct inode *, struct timespec *, int); int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t , int *); int (*tmpfile)(struct inode *, struct dentry *, umode_t ); int (*set_acl)(struct inode *, struct posix_acl *, int); } ;
1600 struct super_operations { struct inode * (*alloc_inode)(struct super_block *); void (*destroy_inode)(struct inode *); void (*dirty_inode)(struct inode *, int); int (*write_inode)(struct inode *, struct writeback_control *); int (*drop_inode)(struct inode *); void (*evict_inode)(struct inode *); void (*put_super)(struct super_block *); int (*sync_fs)(struct super_block *, int); int (*freeze_fs)(struct super_block *); int (*unfreeze_fs)(struct super_block *); int (*statfs)(struct dentry *, struct kstatfs *); int (*remount_fs)(struct super_block *, int *, char *); void (*umount_begin)(struct super_block *); int (*show_options)(struct seq_file *, struct dentry *); int (*show_devname)(struct seq_file *, struct dentry *); int (*show_path)(struct seq_file *, struct dentry *); int (*show_stats)(struct seq_file *, struct dentry *); ssize_t (*quota_read)(struct super_block *, int, char *, size_t , loff_t ); ssize_t (*quota_write)(struct super_block *, int, const char *, size_t , loff_t ); int (*bdev_try_to_free_page)(struct super_block *, struct page *, gfp_t ); long int (*nr_cached_objects)(struct super_block *, int); long int (*free_cached_objects)(struct super_block *, long, int); } ;
1814 struct file_system_type { const char *name; int fs_flags; struct dentry * (*mount)(struct file_system_type *, int, const char *, void *); void (*kill_sb)(struct super_block *); struct module *owner; struct file_system_type *next; struct hlist_head fs_supers; struct lock_class_key s_lock_key; struct lock_class_key s_umount_key; struct lock_class_key s_vfs_rename_key; struct lock_class_key s_writers_key[3U]; struct lock_class_key i_lock_key; struct lock_class_key i_mutex_key; struct lock_class_key i_mutex_dir_key; } ;
84 struct plist_node { int prio; struct list_head prio_list; struct list_head node_list; } ;
4 typedef unsigned long cputime_t;
25 struct sem_undo_list ;
25 struct sysv_sem { struct sem_undo_list *undo_list; } ;
24 struct __anonstruct_sigset_t_174 { unsigned long sig[1U]; } ;
24 typedef struct __anonstruct_sigset_t_174 sigset_t;
25 struct siginfo ;
17 typedef void __signalfn_t(int);
18 typedef __signalfn_t *__sighandler_t;
20 typedef void __restorefn_t();
21 typedef __restorefn_t *__sigrestore_t;
34 union sigval { int sival_int; void *sival_ptr; } ;
10 typedef union sigval sigval_t;
11 struct __anonstruct__kill_176 { __kernel_pid_t _pid; __kernel_uid32_t _uid; } ;
11 struct __anonstruct__timer_177 { __kernel_timer_t _tid; int _overrun; char _pad[0U]; sigval_t _sigval; int _sys_private; } ;
11 struct __anonstruct__rt_178 { __kernel_pid_t _pid; __kernel_uid32_t _uid; sigval_t _sigval; } ;
11 struct __anonstruct__sigchld_179 { __kernel_pid_t _pid; __kernel_uid32_t _uid; int _status; __kernel_clock_t _utime; __kernel_clock_t _stime; } ;
11 struct __anonstruct__sigfault_180 { void *_addr; short _addr_lsb; } ;
11 struct __anonstruct__sigpoll_181 { long _band; int _fd; } ;
11 struct __anonstruct__sigsys_182 { void *_call_addr; int _syscall; unsigned int _arch; } ;
11 union __anonunion__sifields_175 { int _pad[28U]; struct __anonstruct__kill_176 _kill; struct __anonstruct__timer_177 _timer; struct __anonstruct__rt_178 _rt; struct __anonstruct__sigchld_179 _sigchld; struct __anonstruct__sigfault_180 _sigfault; struct __anonstruct__sigpoll_181 _sigpoll; struct __anonstruct__sigsys_182 _sigsys; } ;
11 struct siginfo { int si_signo; int si_errno; int si_code; union __anonunion__sifields_175 _sifields; } ;
109 typedef struct siginfo siginfo_t;
21 struct sigpending { struct list_head list; sigset_t signal; } ;
251 struct sigaction { __sighandler_t sa_handler; unsigned long sa_flags; __sigrestore_t sa_restorer; sigset_t sa_mask; } ;
265 struct k_sigaction { struct sigaction sa; } ;
46 struct seccomp_filter ;
47 struct seccomp { int mode; struct seccomp_filter *filter; } ;
40 struct rt_mutex_waiter ;
41 struct rlimit { __kernel_ulong_t rlim_cur; __kernel_ulong_t rlim_max; } ;
11 struct task_io_accounting { u64 rchar; u64 wchar; u64 syscr; u64 syscw; u64 read_bytes; u64 write_bytes; u64 cancelled_write_bytes; } ;
45 struct latency_record { unsigned long backtrace[12U]; unsigned int count; unsigned long time; unsigned long max; } ;
835 struct nsproxy ;
193 struct assoc_array_ptr ;
193 struct assoc_array { struct assoc_array_ptr *root; unsigned long nr_leaves_on_tree; } ;
31 typedef int32_t key_serial_t;
34 typedef uint32_t key_perm_t;
35 struct key ;
36 struct signal_struct ;
37 struct key_type ;
41 struct keyring_index_key { struct key_type *type; const char *description; size_t desc_len; } ;
123 union __anonunion____missing_field_name_187 { struct list_head graveyard_link; struct rb_node serial_node; } ;
123 struct key_user ;
123 union __anonunion____missing_field_name_188 { time_t expiry; time_t revoked_at; } ;
123 struct __anonstruct____missing_field_name_190 { struct key_type *type; char *description; } ;
123 union __anonunion____missing_field_name_189 { struct keyring_index_key index_key; struct __anonstruct____missing_field_name_190 __annonCompField64; } ;
123 union __anonunion_type_data_191 { struct list_head link; unsigned long x[2U]; void *p[2U]; int reject_error; } ;
123 union __anonunion_payload_193 { unsigned long value; void *rcudata; void *data; void *data2[2U]; } ;
123 union __anonunion____missing_field_name_192 { union __anonunion_payload_193 payload; struct assoc_array keys; } ;
123 struct key { atomic_t usage; key_serial_t serial; union __anonunion____missing_field_name_187 __annonCompField62; struct rw_semaphore sem; struct key_user *user; void *security; union __anonunion____missing_field_name_188 __annonCompField63; time_t last_used_at; kuid_t uid; kgid_t gid; key_perm_t perm; unsigned short quotalen; unsigned short datalen; unsigned long flags; union __anonunion____missing_field_name_189 __annonCompField65; union __anonunion_type_data_191 type_data; union __anonunion____missing_field_name_192 __annonCompField66; } ;
345 struct audit_context ;
27 struct group_info { atomic_t usage; int ngroups; int nblocks; kgid_t small_block[32U]; kgid_t *blocks[0U]; } ;
78 struct cred { atomic_t usage; atomic_t subscribers; void *put_addr; unsigned int magic; kuid_t uid; kgid_t gid; kuid_t suid; kgid_t sgid; kuid_t euid; kgid_t egid; kuid_t fsuid; kgid_t fsgid; unsigned int securebits; kernel_cap_t cap_inheritable; kernel_cap_t cap_permitted; kernel_cap_t cap_effective; kernel_cap_t cap_bset; unsigned char jit_keyring; struct key *session_keyring; struct key *process_keyring; struct key *thread_keyring; struct key *request_key_auth; void *security; struct user_struct *user; struct user_namespace *user_ns; struct group_info *group_info; struct callback_head rcu; } ;
123 struct futex_pi_state ;
124 struct robust_list_head ;
125 struct bio_list ;
126 struct fs_struct ;
127 struct perf_event_context ;
128 struct blk_plug ;
180 struct cfs_rq ;
181 struct task_group ;
421 struct sighand_struct { atomic_t count; struct k_sigaction action[64U]; spinlock_t siglock; wait_queue_head_t signalfd_wqh; } ;
460 struct pacct_struct { int ac_flag; long ac_exitcode; unsigned long ac_mem; cputime_t ac_utime; cputime_t ac_stime; unsigned long ac_minflt; unsigned long ac_majflt; } ;
468 struct cpu_itimer { cputime_t expires; cputime_t incr; u32 error; u32 incr_error; } ;
475 struct cputime { cputime_t utime; cputime_t stime; } ;
487 struct task_cputime { cputime_t utime; cputime_t stime; unsigned long long sum_exec_runtime; } ;
507 struct thread_group_cputimer { struct task_cputime cputime; int running; raw_spinlock_t lock; } ;
549 struct autogroup ;
550 struct tty_struct ;
550 struct taskstats ;
550 struct tty_audit_buf ;
550 struct signal_struct { atomic_t sigcnt; atomic_t live; int nr_threads; struct list_head thread_head; wait_queue_head_t wait_chldexit; struct task_struct *curr_target; struct sigpending shared_pending; int group_exit_code; int notify_count; struct task_struct *group_exit_task; int group_stop_count; unsigned int flags; unsigned char is_child_subreaper; unsigned char has_child_subreaper; int posix_timer_id; struct list_head posix_timers; struct hrtimer real_timer; struct pid *leader_pid; ktime_t it_real_incr; struct cpu_itimer it[2U]; struct thread_group_cputimer cputimer; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; struct pid *tty_old_pgrp; int leader; struct tty_struct *tty; struct autogroup *autogroup; cputime_t utime; cputime_t stime; cputime_t cutime; cputime_t cstime; cputime_t gtime; cputime_t cgtime; struct cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; unsigned long cnvcsw; unsigned long cnivcsw; unsigned long min_flt; unsigned long maj_flt; unsigned long cmin_flt; unsigned long cmaj_flt; unsigned long inblock; unsigned long oublock; unsigned long cinblock; unsigned long coublock; unsigned long maxrss; unsigned long cmaxrss; struct task_io_accounting ioac; unsigned long long sum_sched_runtime; struct rlimit rlim[16U]; struct pacct_struct pacct; struct taskstats *stats; unsigned int audit_tty; unsigned int audit_tty_log_passwd; struct tty_audit_buf *tty_audit_buf; struct rw_semaphore group_rwsem; oom_flags_t oom_flags; short oom_score_adj; short oom_score_adj_min; struct mutex cred_guard_mutex; } ;
730 struct user_struct { atomic_t __count; atomic_t processes; atomic_t files; atomic_t sigpending; atomic_t inotify_watches; atomic_t inotify_devs; atomic_t fanotify_listeners; atomic_long_t epoll_watches; unsigned long mq_bytes; unsigned long locked_shm; struct key *uid_keyring; struct key *session_keyring; struct hlist_node uidhash_node; kuid_t uid; atomic_long_t locked_vm; } ;
774 struct reclaim_state ;
775 struct sched_info { unsigned long pcount; unsigned long long run_delay; unsigned long long last_arrival; unsigned long long last_queued; } ;
790 struct task_delay_info { spinlock_t lock; unsigned int flags; struct timespec blkio_start; struct timespec blkio_end; u64 blkio_delay; u64 swapin_delay; u32 blkio_count; u32 swapin_count; struct timespec freepages_start; struct timespec freepages_end; u64 freepages_delay; u32 freepages_count; } ;
1005 struct load_weight { unsigned long weight; u32 inv_weight; } ;
1013 struct sched_avg { u32 runnable_avg_sum; u32 runnable_avg_period; u64 last_runnable_update; s64 decay_count; unsigned long load_avg_contrib; } ;
1025 struct sched_statistics { u64 wait_start; u64 wait_max; u64 wait_count; u64 wait_sum; u64 iowait_count; u64 iowait_sum; u64 sleep_start; u64 sleep_max; s64 sum_sleep_runtime; u64 block_start; u64 block_max; u64 exec_max; u64 slice_max; u64 nr_migrations_cold; u64 nr_failed_migrations_affine; u64 nr_failed_migrations_running; u64 nr_failed_migrations_hot; u64 nr_forced_migrations; u64 nr_wakeups; u64 nr_wakeups_sync; u64 nr_wakeups_migrate; u64 nr_wakeups_local; u64 nr_wakeups_remote; u64 nr_wakeups_affine; u64 nr_wakeups_affine_attempts; u64 nr_wakeups_passive; u64 nr_wakeups_idle; } ;
1060 struct sched_entity { struct load_weight load; struct rb_node run_node; struct list_head group_node; unsigned int on_rq; u64 exec_start; u64 sum_exec_runtime; u64 vruntime; u64 prev_sum_exec_runtime; u64 nr_migrations; struct sched_statistics statistics; struct sched_entity *parent; struct cfs_rq *cfs_rq; struct cfs_rq *my_q; struct sched_avg avg; } ;
1091 struct rt_rq ;
1091 struct sched_rt_entity { struct list_head run_list; unsigned long timeout; unsigned long watchdog_stamp; unsigned int time_slice; struct sched_rt_entity *back; struct sched_rt_entity *parent; struct rt_rq *rt_rq; struct rt_rq *my_q; } ;
1107 struct sched_dl_entity { struct rb_node rb_node; u64 dl_runtime; u64 dl_deadline; u64 dl_period; u64 dl_bw; s64 runtime; u64 deadline; unsigned int flags; int dl_throttled; int dl_new; int dl_boosted; struct hrtimer dl_timer; } ;
1162 struct memcg_batch_info { int do_batch; struct mem_cgroup *memcg; unsigned long nr_pages; unsigned long memsw_nr_pages; } ;
1569 struct memcg_oom_info { struct mem_cgroup *memcg; gfp_t gfp_mask; int order; unsigned char may_oom; } ;
1576 struct sched_class ;
1576 struct css_set ;
1576 struct compat_robust_list_head ;
1576 struct numa_group ;
1576 struct task_struct { volatile long state; void *stack; atomic_t usage; unsigned int flags; unsigned int ptrace; struct llist_node wake_entry; int on_cpu; struct task_struct *last_wakee; unsigned long wakee_flips; unsigned long wakee_flip_decay_ts; int wake_cpu; int on_rq; int prio; int static_prio; int normal_prio; unsigned int rt_priority; const struct sched_class *sched_class; struct sched_entity se; struct sched_rt_entity rt; struct task_group *sched_task_group; struct sched_dl_entity dl; struct hlist_head preempt_notifiers; unsigned int policy; int nr_cpus_allowed; cpumask_t cpus_allowed; struct sched_info sched_info; struct list_head tasks; struct plist_node pushable_tasks; struct rb_node pushable_dl_tasks; struct mm_struct *mm; struct mm_struct *active_mm; unsigned char brk_randomized; struct task_rss_stat rss_stat; int exit_state; int exit_code; int exit_signal; int pdeath_signal; unsigned int jobctl; unsigned int personality; unsigned char in_execve; unsigned char in_iowait; unsigned char no_new_privs; unsigned char sched_reset_on_fork; unsigned char sched_contributes_to_load; pid_t pid; pid_t tgid; struct task_struct *real_parent; struct task_struct *parent; struct list_head children; struct list_head sibling; struct task_struct *group_leader; struct list_head ptraced; struct list_head ptrace_entry; struct pid_link pids[3U]; struct list_head thread_group; struct list_head thread_node; struct completion *vfork_done; int *set_child_tid; int *clear_child_tid; cputime_t utime; cputime_t stime; cputime_t utimescaled; cputime_t stimescaled; cputime_t gtime; struct cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; struct timespec start_time; struct timespec real_start_time; unsigned long min_flt; unsigned long maj_flt; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; const struct cred *real_cred; const struct cred *cred; char comm[16U]; int link_count; int total_link_count; struct sysv_sem sysvsem; unsigned long last_switch_count; struct thread_struct thread; struct fs_struct *fs; struct files_struct *files; struct nsproxy *nsproxy; struct signal_struct *signal; struct sighand_struct *sighand; sigset_t blocked; sigset_t real_blocked; sigset_t saved_sigmask; struct sigpending pending; unsigned long sas_ss_sp; size_t sas_ss_size; int (*notifier)(void *); void *notifier_data; sigset_t *notifier_mask; struct callback_head *task_works; struct audit_context *audit_context; kuid_t loginuid; unsigned int sessionid; struct seccomp seccomp; u32 parent_exec_id; u32 self_exec_id; spinlock_t alloc_lock; raw_spinlock_t pi_lock; struct rb_root pi_waiters; struct rb_node *pi_waiters_leftmost; struct rt_mutex_waiter *pi_blocked_on; struct task_struct *pi_top_task; struct mutex_waiter *blocked_on; unsigned int irq_events; unsigned long hardirq_enable_ip; unsigned long hardirq_disable_ip; unsigned int hardirq_enable_event; unsigned int hardirq_disable_event; int hardirqs_enabled; int hardirq_context; unsigned long softirq_disable_ip; unsigned long softirq_enable_ip; unsigned int softirq_disable_event; unsigned int softirq_enable_event; int softirqs_enabled; int softirq_context; u64 curr_chain_key; int lockdep_depth; unsigned int lockdep_recursion; struct held_lock held_locks[48U]; gfp_t lockdep_reclaim_gfp; void *journal_info; struct bio_list *bio_list; struct blk_plug *plug; struct reclaim_state *reclaim_state; struct backing_dev_info *backing_dev_info; struct io_context *io_context; unsigned long ptrace_message; siginfo_t *last_siginfo; struct task_io_accounting ioac; u64 acct_rss_mem1; u64 acct_vm_mem1; cputime_t acct_timexpd; nodemask_t mems_allowed; seqcount_t mems_allowed_seq; int cpuset_mem_spread_rotor; int cpuset_slab_spread_rotor; struct css_set *cgroups; struct list_head cg_list; struct robust_list_head *robust_list; struct compat_robust_list_head *compat_robust_list; struct list_head pi_state_list; struct futex_pi_state *pi_state_cache; struct perf_event_context *perf_event_ctxp[2U]; struct mutex perf_event_mutex; struct list_head perf_event_list; struct mempolicy *mempolicy; short il_next; short pref_node_fork; int numa_scan_seq; unsigned int numa_scan_period; unsigned int numa_scan_period_max; int numa_preferred_nid; int numa_migrate_deferred; unsigned long numa_migrate_retry; u64 node_stamp; struct callback_head numa_work; struct list_head numa_entry; struct numa_group *numa_group; unsigned long *numa_faults; unsigned long total_numa_faults; unsigned long *numa_faults_buffer; unsigned long numa_faults_locality[2U]; unsigned long numa_pages_migrated; struct callback_head rcu; struct pipe_inode_info *splice_pipe; struct page_frag task_frag; struct task_delay_info *delays; int make_it_fail; int nr_dirtied; int nr_dirtied_pause; unsigned long dirty_paused_when; int latency_record_count; struct latency_record latency_record[32U]; unsigned long timer_slack_ns; unsigned long default_timer_slack_ns; unsigned long trace; unsigned long trace_recursion; struct memcg_batch_info memcg_batch; unsigned int memcg_kmem_skip_account; struct memcg_oom_info memcg_oom; struct uprobe_task *utask; unsigned int sequential_io; unsigned int sequential_io_avg; } ;
262 struct usb_device ;
264 struct wusb_dev ;
265 struct ep_device ;
266 struct usb_host_endpoint { struct usb_endpoint_descriptor desc; struct usb_ss_ep_comp_descriptor ss_ep_comp; struct list_head urb_list; void *hcpriv; struct ep_device *ep_dev; unsigned char *extra; int extralen; int enabled; } ;
75 struct usb_host_interface { struct usb_interface_descriptor desc; int extralen; unsigned char *extra; struct usb_host_endpoint *endpoint; char *string; } ;
90 enum usb_interface_condition { USB_INTERFACE_UNBOUND = 0, USB_INTERFACE_BINDING = 1, USB_INTERFACE_BOUND = 2, USB_INTERFACE_UNBINDING = 3 } ;
97 struct usb_interface { struct usb_host_interface *altsetting; struct usb_host_interface *cur_altsetting; unsigned int num_altsetting; struct usb_interface_assoc_descriptor *intf_assoc; int minor; enum usb_interface_condition condition; unsigned char sysfs_files_created; unsigned char ep_devs_created; unsigned char unregistering; unsigned char needs_remote_wakeup; unsigned char needs_altsetting0; unsigned char needs_binding; unsigned char reset_running; unsigned char resetting_device; struct device dev; struct device *usb_dev; atomic_t pm_usage_cnt; struct work_struct reset_ws; } ;
204 struct usb_interface_cache { unsigned int num_altsetting; struct kref ref; struct usb_host_interface altsetting[0U]; } ;
231 struct usb_host_config { struct usb_config_descriptor desc; char *string; struct usb_interface_assoc_descriptor *intf_assoc[16U]; struct usb_interface *interface[32U]; struct usb_interface_cache *intf_cache[32U]; unsigned char *extra; int extralen; } ;
295 struct usb_host_bos { struct usb_bos_descriptor *desc; struct usb_ext_cap_descriptor *ext_cap; struct usb_ss_cap_descriptor *ss_cap; struct usb_ss_container_id_descriptor *ss_id; } ;
307 struct usb_devmap { unsigned long devicemap[2U]; } ;
319 struct mon_bus ;
319 struct usb_bus { struct device *controller; int busnum; const char *bus_name; u8 uses_dma; u8 uses_pio_for_control; u8 otg_port; unsigned char is_b_host; unsigned char b_hnp_enable; unsigned char no_stop_on_short; unsigned char no_sg_constraint; unsigned int sg_tablesize; int devnum_next; struct usb_devmap devmap; struct usb_device *root_hub; struct usb_bus *hs_companion; struct list_head bus_list; int bandwidth_allocated; int bandwidth_int_reqs; int bandwidth_isoc_reqs; unsigned int resuming_ports; struct mon_bus *mon_bus; int monitored; } ;
367 struct usb_tt ;
368 enum usb_device_removable { USB_DEVICE_REMOVABLE_UNKNOWN = 0, USB_DEVICE_REMOVABLE = 1, USB_DEVICE_FIXED = 2 } ;
381 struct usb2_lpm_parameters { unsigned int besl; int timeout; } ;
401 struct usb3_lpm_parameters { unsigned int mel; unsigned int pel; unsigned int sel; int timeout; } ;
440 struct usb_device { int devnum; char devpath[16U]; u32 route; enum usb_device_state state; enum usb_device_speed speed; struct usb_tt *tt; int ttport; unsigned int toggle[2U]; struct usb_device *parent; struct usb_bus *bus; struct usb_host_endpoint ep0; struct device dev; struct usb_device_descriptor descriptor; struct usb_host_bos *bos; struct usb_host_config *config; struct usb_host_config *actconfig; struct usb_host_endpoint *ep_in[16U]; struct usb_host_endpoint *ep_out[16U]; char **rawdescriptors; unsigned short bus_mA; u8 portnum; u8 level; unsigned char can_submit; unsigned char persist_enabled; unsigned char have_langid; unsigned char authorized; unsigned char authenticated; unsigned char wusb; unsigned char lpm_capable; unsigned char usb2_hw_lpm_capable; unsigned char usb2_hw_lpm_besl_capable; unsigned char usb2_hw_lpm_enabled; unsigned char usb2_hw_lpm_allowed; unsigned char usb3_lpm_enabled; int string_langid; char *product; char *manufacturer; char *serial; struct list_head filelist; int maxchild; u32 quirks; atomic_t urbnum; unsigned long active_duration; unsigned long connect_time; unsigned char do_remote_wakeup; unsigned char reset_resume; unsigned char port_is_suspended; struct wusb_dev *wusb_dev; int slot_id; enum usb_device_removable removable; struct usb2_lpm_parameters l1_params; struct usb3_lpm_parameters u1_params; struct usb3_lpm_parameters u2_params; unsigned int lpm_disable_count; } ;
1166 struct usb_iso_packet_descriptor { unsigned int offset; unsigned int length; unsigned int actual_length; int status; } ;
1208 struct urb ;
1209 struct usb_anchor { struct list_head urb_list; wait_queue_head_t wait; spinlock_t lock; atomic_t suspend_wakeups; unsigned char poisoned; } ;
1228 struct urb { struct kref kref; void *hcpriv; atomic_t use_count; atomic_t reject; int unlinked; struct list_head urb_list; struct list_head anchor_list; struct usb_anchor *anchor; struct usb_device *dev; struct usb_host_endpoint *ep; unsigned int pipe; unsigned int stream_id; int status; unsigned int transfer_flags; void *transfer_buffer; dma_addr_t transfer_dma; struct scatterlist *sg; int num_mapped_sgs; int num_sgs; u32 transfer_buffer_length; u32 actual_length; unsigned char *setup_packet; dma_addr_t setup_dma; int start_frame; int number_of_packets; int interval; int error_count; void *context; void (*complete)(struct urb *); struct usb_iso_packet_descriptor iso_frame_desc[0U]; } ;
1847 struct giveback_urb_bh { bool running; spinlock_t lock; struct list_head head; struct tasklet_struct bh; struct usb_host_endpoint *completing_ep; } ;
78 struct hc_driver ;
78 struct usb_phy ;
78 struct usb_hcd { struct usb_bus self; struct kref kref; const char *product_desc; int speed; char irq_descr[24U]; struct timer_list rh_timer; struct urb *status_urb; struct work_struct wakeup_work; const struct hc_driver *driver; struct usb_phy *phy; unsigned long flags; unsigned char rh_registered; unsigned char rh_pollable; unsigned char msix_enabled; unsigned char remove_phy; unsigned char uses_new_polling; unsigned char wireless; unsigned char authorized_default; unsigned char has_tt; unsigned char amd_resume_bug; unsigned int irq; void *regs; resource_size_t rsrc_start; resource_size_t rsrc_len; unsigned int power_budget; struct giveback_urb_bh high_prio_bh; struct giveback_urb_bh low_prio_bh; struct mutex *bandwidth_mutex; struct usb_hcd *shared_hcd; struct usb_hcd *primary_hcd; struct dma_pool *pool[4U]; int state; unsigned long hcd_priv[0U]; } ;
217 struct hc_driver { const char *description; const char *product_desc; size_t hcd_priv_size; irqreturn_t (*irq)(struct usb_hcd *); int flags; int (*reset)(struct usb_hcd *); int (*start)(struct usb_hcd *); int (*pci_suspend)(struct usb_hcd *, bool ); int (*pci_resume)(struct usb_hcd *, bool ); void (*stop)(struct usb_hcd *); void (*shutdown)(struct usb_hcd *); int (*get_frame_number)(struct usb_hcd *); int (*urb_enqueue)(struct usb_hcd *, struct urb *, gfp_t ); int (*urb_dequeue)(struct usb_hcd *, struct urb *, int); int (*map_urb_for_dma)(struct usb_hcd *, struct urb *, gfp_t ); void (*unmap_urb_for_dma)(struct usb_hcd *, struct urb *); void (*endpoint_disable)(struct usb_hcd *, struct usb_host_endpoint *); void (*endpoint_reset)(struct usb_hcd *, struct usb_host_endpoint *); int (*hub_status_data)(struct usb_hcd *, char *); int (*hub_control)(struct usb_hcd *, u16 , u16 , u16 , char *, u16 ); int (*bus_suspend)(struct usb_hcd *); int (*bus_resume)(struct usb_hcd *); int (*start_port_reset)(struct usb_hcd *, unsigned int); void (*relinquish_port)(struct usb_hcd *, int); int (*port_handed_over)(struct usb_hcd *, int); void (*clear_tt_buffer_complete)(struct usb_hcd *, struct usb_host_endpoint *); int (*alloc_dev)(struct usb_hcd *, struct usb_device *); void (*free_dev)(struct usb_hcd *, struct usb_device *); int (*alloc_streams)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint **, unsigned int, unsigned int, gfp_t ); int (*free_streams)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint **, unsigned int, gfp_t ); int (*add_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *); int (*drop_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *); int (*check_bandwidth)(struct usb_hcd *, struct usb_device *); void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *); int (*address_device)(struct usb_hcd *, struct usb_device *); int (*enable_device)(struct usb_hcd *, struct usb_device *); int (*update_hub_device)(struct usb_hcd *, struct usb_device *, struct usb_tt *, gfp_t ); int (*reset_device)(struct usb_hcd *, struct usb_device *); int (*update_device)(struct usb_hcd *, struct usb_device *); int (*set_usb2_hw_lpm)(struct usb_hcd *, struct usb_device *, int); int (*enable_usb3_lpm_timeout)(struct usb_hcd *, struct usb_device *, enum usb3_link_state ); int (*disable_usb3_lpm_timeout)(struct usb_hcd *, struct usb_device *, enum usb3_link_state ); int (*find_raw_port_number)(struct usb_hcd *, int); } ;
266 struct usb_tt { struct usb_device *hub; int multi; unsigned int think_time; void *hcpriv; spinlock_t lock; struct list_head clear_list; struct work_struct clear_work; } ;
98 struct iovec { void *iov_base; __kernel_size_t iov_len; } ;
66 struct uwb_mac_addr { u8 data[6U]; } ;
136 struct uwb_dev_addr { u8 data[2U]; } ;
192 enum uwb_drp_type { UWB_DRP_TYPE_ALIEN_BP = 0, UWB_DRP_TYPE_HARD = 1, UWB_DRP_TYPE_SOFT = 2, UWB_DRP_TYPE_PRIVATE = 3, UWB_DRP_TYPE_PCA = 4 } ;
219 struct uwb_drp_alloc { __le16 zone_bm; __le16 mas_bm; } ;
289 struct uwb_ie_hdr { u8 element_id; u8 length; } ;
300 struct uwb_ie_drp { struct uwb_ie_hdr hdr; __le16 drp_control; struct uwb_dev_addr dev_addr; struct uwb_drp_alloc allocs[]; } ;
394 struct uwb_ie_drp_avail { struct uwb_ie_hdr hdr; unsigned long bmp[4U]; } ;
468 struct uwb_rccb { u8 bCommandType; __le16 wCommand; u8 bCommandContext; } ;
482 struct uwb_rceb { u8 bEventType; __le16 wEvent; u8 bEventContext; } ;
612 struct uwb_rc_cmd_set_ie { struct uwb_rccb rccb; __le16 wIELength; u8 IEData[]; } ;
780 struct uwb_dev ;
781 struct uwb_beca_e ;
782 struct uwb_rc ;
783 struct uwb_rsv ;
784 struct uwb_dbg ;
785 struct uwb_dev { struct mutex mutex; struct list_head list_node; struct device dev; struct uwb_rc *rc; struct uwb_beca_e *bce; struct uwb_mac_addr mac_addr; struct uwb_dev_addr dev_addr; int beacon_slot; unsigned long streams[1U]; unsigned long last_availability_bm[4U]; } ;
77 struct uwb_notifs_chain { struct list_head list; struct mutex mutex; } ;
91 struct uwb_beca { struct list_head list; size_t entries; struct mutex mutex; } ;
98 struct uwbd { int pid; struct task_struct *task; wait_queue_head_t wq; struct list_head event_list; spinlock_t event_list_lock; } ;
107 struct uwb_mas_bm { unsigned long bm[4U]; unsigned long unsafe_bm[4U]; int safe; int unsafe; } ;
118 enum uwb_rsv_state { UWB_RSV_STATE_NONE = 0, UWB_RSV_STATE_O_INITIATED = 1, UWB_RSV_STATE_O_PENDING = 2, UWB_RSV_STATE_O_MODIFIED = 3, UWB_RSV_STATE_O_ESTABLISHED = 4, UWB_RSV_STATE_O_TO_BE_MOVED = 5, UWB_RSV_STATE_O_MOVE_EXPANDING = 6, UWB_RSV_STATE_O_MOVE_COMBINING = 7, UWB_RSV_STATE_O_MOVE_REDUCING = 8, UWB_RSV_STATE_T_ACCEPTED = 9, UWB_RSV_STATE_T_DENIED = 10, UWB_RSV_STATE_T_CONFLICT = 11, UWB_RSV_STATE_T_PENDING = 12, UWB_RSV_STATE_T_EXPANDING_ACCEPTED = 13, UWB_RSV_STATE_T_EXPANDING_CONFLICT = 14, UWB_RSV_STATE_T_EXPANDING_PENDING = 15, UWB_RSV_STATE_T_EXPANDING_DENIED = 16, UWB_RSV_STATE_T_RESIZED = 17, UWB_RSV_STATE_LAST = 18 } ;
140 enum uwb_rsv_target_type { UWB_RSV_TARGET_DEV = 0, UWB_RSV_TARGET_DEVADDR = 1 } ;
145 union __anonunion____missing_field_name_205 { struct uwb_dev *dev; struct uwb_dev_addr devaddr; } ;
145 struct uwb_rsv_target { enum uwb_rsv_target_type type; union __anonunion____missing_field_name_205 __annonCompField73; } ;
183 struct uwb_rsv_move { struct uwb_mas_bm final_mas; struct uwb_ie_drp *companion_drp_ie; struct uwb_mas_bm companion_mas; } ;
196 struct uwb_rsv { struct uwb_rc *rc; struct list_head rc_node; struct list_head pal_node; struct kref kref; struct uwb_dev *owner; struct uwb_rsv_target target; enum uwb_drp_type type; int max_mas; int min_mas; int max_interval; bool is_multicast; void (*callback)(struct uwb_rsv *); void *pal_priv; enum uwb_rsv_state state; bool needs_release_companion_mas; u8 stream; u8 tiebreaker; struct uwb_mas_bm mas; struct uwb_ie_drp *drp_ie; struct uwb_rsv_move mv; bool ie_valid; struct timer_list timer; struct work_struct handle_timeout_work; } ;
275 struct uwb_drp_avail { unsigned long global[4U]; unsigned long local[4U]; unsigned long pending[4U]; struct uwb_ie_drp_avail ie; bool ie_valid; } ;
306 struct uwb_drp_backoff_win { u8 window; u8 n; int total_expired; struct timer_list timer; bool can_reserve_extra_mases; } ;
330 struct uwb_rc { struct uwb_dev uwb_dev; int index; u16 version; struct module *owner; void *priv; int (*start)(struct uwb_rc *); void (*stop)(struct uwb_rc *); int (*cmd)(struct uwb_rc *, const struct uwb_rccb *, size_t ); int (*reset)(struct uwb_rc *); int (*filter_cmd)(struct uwb_rc *, struct uwb_rccb **, size_t *); int (*filter_event)(struct uwb_rc *, struct uwb_rceb **, const size_t , size_t *, size_t *); spinlock_t neh_lock; struct list_head neh_list; unsigned long ctx_bm[4U]; u8 ctx_roll; int beaconing; int beaconing_forced; int scanning; unsigned char scan_type; unsigned char ready; struct uwb_notifs_chain notifs_chain; struct uwb_beca uwb_beca; struct uwbd uwbd; struct uwb_drp_backoff_win bow; struct uwb_drp_avail drp_avail; struct list_head reservations; struct list_head cnflt_alien_list; struct uwb_mas_bm cnflt_alien_bitmap; struct mutex rsvs_mutex; spinlock_t rsvs_lock; struct workqueue_struct *rsv_workq; struct delayed_work rsv_update_work; struct delayed_work rsv_alien_bp_work; int set_drp_ie_pending; struct mutex ies_mutex; struct uwb_rc_cmd_set_ie *ies; size_t ies_capacity; struct list_head pals; int active_pals; struct uwb_dbg *dbg; } ;
422 struct uwb_pal { struct list_head node; const char *name; struct device *device; struct uwb_rc *rc; void (*channel_changed)(struct uwb_pal *, int); void (*new_rsv)(struct uwb_pal *, struct uwb_rsv *); int channel; struct dentry *debugfs_dir; } ;
830 struct wuie_hdr { u8 bLength; u8 bIEIdentifier; } ;
61 struct wusb_ckhdid { u8 data[16U]; } ;
84 struct wuie_host_info { struct wuie_hdr hdr; __le16 attributes; struct wusb_ckhdid CHID; } ;
98 struct __anonstruct_blk_206 { struct wusb_ckhdid CDID; u8 bDeviceAddress; u8 bReserved; } ;
98 struct wuie_connect_ack { struct wuie_hdr hdr; struct __anonstruct_blk_206 blk[4U]; } ;
136 struct wuie_keep_alive { struct wuie_hdr hdr; u8 bDeviceAddress[4U]; } ;
375 struct wusbhc ;
375 struct wusb_dev { struct kref refcnt; struct wusbhc *wusbhc; struct list_head cack_node; struct list_head rekey_node; u8 port_idx; u8 addr; unsigned char beacon_type; struct usb_encryption_descriptor ccm1_etd; struct wusb_ckhdid cdid; unsigned long entry_ts; struct usb_bos_descriptor *bos; struct usb_wireless_cap_descriptor *wusb_cap_descr; struct uwb_mas_bm availability; struct work_struct devconnect_acked_work; struct usb_device *usb_dev; } ;
134 struct wusb_port { u16 status; u16 change; struct wusb_dev *wusb_dev; u32 ptk_tkid; } ;
158 struct wusb_dev_info ;
158 struct __anonstruct_gtk_207 { struct usb_key_descriptor descr; u8 data[16U]; } ;
158 struct wusbhc { struct usb_hcd usb_hcd; struct device *dev; struct uwb_rc *uwb_rc; struct uwb_pal pal; unsigned int trust_timeout; struct wusb_ckhdid chid; uint8_t phy_rate; uint8_t dnts_num_slots; uint8_t dnts_interval; uint8_t retry_count; struct wuie_host_info *wuie_host_info; struct mutex mutex; u16 cluster_id; struct wusb_port *port; struct wusb_dev_info *dev_info; u8 ports_max; unsigned char active; struct wuie_keep_alive keep_alive_ie; struct delayed_work keep_alive_timer; struct list_head cack_list; size_t cack_count; struct wuie_connect_ack cack_ie; struct uwb_rsv *rsv; struct mutex mmcie_mutex; struct wuie_hdr **mmcie; u8 mmcies_max; int (*start)(struct wusbhc *); void (*stop)(struct wusbhc *, int); int (*mmcie_add)(struct wusbhc *, u8 , u8 , u8 , struct wuie_hdr *); int (*mmcie_rm)(struct wusbhc *, u8 ); int (*dev_info_set)(struct wusbhc *, struct wusb_dev *); int (*bwa_set)(struct wusbhc *, s8 , const struct uwb_mas_bm *); int (*set_ptk)(struct wusbhc *, u8 , u32 , const void *, size_t ); int (*set_gtk)(struct wusbhc *, u32 , const void *, size_t ); int (*set_num_dnts)(struct wusbhc *, u8 , u8 ); struct __anonstruct_gtk_207 gtk; u8 gtk_index; u32 gtk_tkid; struct work_struct gtk_rekey_work; struct usb_encryption_descriptor *ccm1_etd; } ;
114 struct whc_qtd { __le32 status; __le32 options; __le64 page_list_ptr; __u8 setup[8U]; } ;
55 struct whc_itd { __le16 presentation_time; __u8 num_segments; __u8 status; __le32 options; __le64 page_list_ptr; __le64 seg_list_ptr; } ;
87 struct whc_page_list_entry { __le64 buf_ptr; } ;
127 union __anonunion_overlay_208 { struct whc_qtd qtd; struct whc_itd itd; } ;
127 struct whc_qhead { __le64 link; __le32 info1; __le32 info2; __le32 info3; __le16 status; __le16 err_count; __le32 cur_window; __le32 scratch[3U]; union __anonunion_overlay_208 overlay; } ;
198 union __anonunion____missing_field_name_209 { struct whc_qtd qtd[8U]; struct whc_itd itd[8U]; } ;
198 struct whc ;
198 struct whc_qset { struct whc_qhead qh; union __anonunion____missing_field_name_209 __annonCompField74; dma_addr_t qset_dma; struct whc *whc; struct usb_host_endpoint *ep; struct list_head stds; int ntds; int td_start; int td_end; struct list_head list_node; unsigned char in_sw_list; unsigned char in_hw_list; unsigned char remove; unsigned char reset; struct urb *pause_after_urb; struct completion remove_complete; uint16_t max_packet; uint8_t max_burst; uint8_t max_seq; } ;
275 struct di_buf_entry { __le32 availability_info[8U]; __le32 addr_sec_info; __le32 reserved[7U]; } ;
286 struct dn_buf_entry { __u8 msg_size; __u8 reserved1; __u8 src_addr; __u8 status; __le32 tkid; __u8 dn_data[56U]; } ;
307 struct whc_dbg ;
308 struct whc { struct wusbhc wusbhc; struct umc_dev *umc; resource_size_t base_phys; void *base; int irq; u8 n_devices; u8 n_keys; u8 n_mmc_ies; u64 *pz_list; struct dn_buf_entry *dn_buf; struct di_buf_entry *di_buf; dma_addr_t pz_list_dma; dma_addr_t dn_buf_dma; dma_addr_t di_buf_dma; spinlock_t lock; struct mutex mutex; void *gen_cmd_buf; dma_addr_t gen_cmd_buf_dma; wait_queue_head_t cmd_wq; struct workqueue_struct *workqueue; struct work_struct dn_work; struct dma_pool *qset_pool; struct list_head async_list; struct list_head async_removed_list; wait_queue_head_t async_list_wq; struct work_struct async_work; struct list_head periodic_list[5U]; struct list_head periodic_removed_list; wait_queue_head_t periodic_list_wq; struct work_struct periodic_work; struct whc_dbg *dbg; } ;
77 struct whc_std { struct urb *urb; size_t len; int ntds_remaining; struct whc_qtd *qtd; struct list_head list_node; int num_pointers; dma_addr_t dma_addr; struct whc_page_list_entry *pl_virt; void *bounce_buf; struct scatterlist *bounce_sg; unsigned int bounce_offset; } ;
110 struct whc_urb { struct urb *urb; struct whc_qset *qset; struct work_struct dequeue_work; bool is_async; int status; } ;
135 enum whc_update { WHC_UPDATE_ADDED = 1, WHC_UPDATE_REMOVED = 2, WHC_UPDATE_UPDATED = 4 } ;
214 struct whc_dbg { struct dentry *di_f; struct dentry *asl_f; struct dentry *pzl_f; } ;
18 typedef short s16;
99 struct kernel_symbol { unsigned long value; const char *name; } ;
142 typedef void (*ctor_fn_t)();
527 struct bug_entry { int bug_addr_disp; int file_disp; unsigned short line; unsigned short flags; } ;
195 struct static_key ;
59 struct static_key { atomic_t enabled; } ;
15 typedef __u64 Elf64_Addr;
16 typedef __u16 Elf64_Half;
20 typedef __u32 Elf64_Word;
21 typedef __u64 Elf64_Xword;
190 struct elf64_sym { Elf64_Word st_name; unsigned char st_info; unsigned char st_other; Elf64_Half st_shndx; Elf64_Addr st_value; Elf64_Xword st_size; } ;
198 typedef struct elf64_sym Elf64_Sym;
219 struct kernel_param ;
224 struct kernel_param_ops { unsigned int flags; int (*set)(const char *, const struct kernel_param *); int (*get)(char *, const struct kernel_param *); void (*free)(void *); } ;
58 struct kparam_string ;
58 struct kparam_array ;
58 union __anonunion____missing_field_name_139___0 { void *arg; const struct kparam_string *str; const struct kparam_array *arr; } ;
58 struct kernel_param { const char *name; const struct kernel_param_ops *ops; u16 perm; s16 level; union __anonunion____missing_field_name_139___0 __annonCompField35; } ;
70 struct kparam_string { unsigned int maxlen; char *string; } ;
76 struct kparam_array { unsigned int max; unsigned int elemsize; unsigned int *num; const struct kernel_param_ops *ops; void *elem; } ;
463 struct tracepoint ;
464 struct tracepoint_func { void *func; void *data; } ;
29 struct tracepoint { const char *name; struct static_key key; void (*regfunc)(); void (*unregfunc)(); struct tracepoint_func *funcs; } ;
92 struct mod_arch_specific { } ;
36 struct module_param_attrs ;
36 struct module_kobject { struct kobject kobj; struct module *mod; struct kobject *drivers_dir; struct module_param_attrs *mp; struct completion *kobj_completion; } ;
46 struct module_attribute { struct attribute attr; ssize_t (*show)(struct module_attribute *, struct module_kobject *, char *); ssize_t (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t ); void (*setup)(struct module *, const char *); int (*test)(struct module *); void (*free)(struct module *); } ;
72 struct exception_table_entry ;
208 enum module_state { MODULE_STATE_LIVE = 0, MODULE_STATE_COMING = 1, MODULE_STATE_GOING = 2, MODULE_STATE_UNFORMED = 3 } ;
215 struct module_ref { unsigned long incs; unsigned long decs; } ;
229 struct module_sect_attrs ;
229 struct module_notes_attrs ;
229 struct ftrace_event_call ;
229 struct module { enum module_state state; struct list_head list; char name[56U]; struct module_kobject mkobj; struct module_attribute *modinfo_attrs; const char *version; const char *srcversion; struct kobject *holders_dir; const struct kernel_symbol *syms; const unsigned long *crcs; unsigned int num_syms; struct kernel_param *kp; unsigned int num_kp; unsigned int num_gpl_syms; const struct kernel_symbol *gpl_syms; const unsigned long *gpl_crcs; const struct kernel_symbol *unused_syms; const unsigned long *unused_crcs; unsigned int num_unused_syms; unsigned int num_unused_gpl_syms; const struct kernel_symbol *unused_gpl_syms; const unsigned long *unused_gpl_crcs; bool sig_ok; const struct kernel_symbol *gpl_future_syms; const unsigned long *gpl_future_crcs; unsigned int num_gpl_future_syms; unsigned int num_exentries; struct exception_table_entry *extable; int (*init)(); void *module_init; void *module_core; unsigned int init_size; unsigned int core_size; unsigned int init_text_size; unsigned int core_text_size; unsigned int init_ro_size; unsigned int core_ro_size; struct mod_arch_specific arch; unsigned int taints; unsigned int num_bugs; struct list_head bug_list; struct bug_entry *bug_table; Elf64_Sym *symtab; Elf64_Sym *core_symtab; unsigned int num_symtab; unsigned int core_num_syms; char *strtab; char *core_strtab; struct module_sect_attrs *sect_attrs; struct module_notes_attrs *notes_attrs; char *args; void *percpu; unsigned int percpu_size; unsigned int num_tracepoints; const struct tracepoint **tracepoints_ptrs; unsigned int num_trace_bprintk_fmt; const char **trace_bprintk_fmt_start; struct ftrace_event_call **trace_events; unsigned int num_trace_events; struct list_head source_list; struct list_head target_list; void (*exit)(); struct module_ref *refptr; ctor_fn_t (**ctors)(); unsigned int num_ctors; } ;
14 struct pci_device_id { __u32 vendor; __u32 device; __u32 subvendor; __u32 subdevice; __u32 class; __u32 class_mask; kernel_ulong_t driver_data; } ;
72 struct umc_driver { char *name; u8 cap_id; int (*match)(struct umc_driver *, struct umc_dev *); const void *match_data; int (*probe)(struct umc_dev *); void (*remove)(struct umc_dev *); int (*suspend)(struct umc_dev *, pm_message_t ); int (*resume)(struct umc_dev *); int (*pre_reset)(struct umc_dev *); int (*post_reset)(struct umc_dev *); struct device_driver driver; } ;
62 struct exception_table_entry { int insn; int fixup; } ;
394 struct paravirt_callee_save { void *func; } ;
196 struct pv_irq_ops { struct paravirt_callee_save save_fl; struct paravirt_callee_save restore_fl; struct paravirt_callee_save irq_disable; struct paravirt_callee_save irq_enable; void (*safe_halt)(); void (*halt)(); void (*adjust_exception_frame)(); } ;
212 struct _ddebug { const char *modname; const char *function; const char *filename; const char *format; unsigned int lineno; unsigned char flags; } ;
181 struct wusb_dn_hdr { u8 bType; u8 notifdata[]; } ;
855 struct usb_wireless_ep_comp_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bMaxBurst; __u8 bMaxSequence; __le16 wMaxStreamDelay; __le16 wOverTheAirPacketSize; __u8 bOverTheAirInterval; __u8 bmCompAttributes; } ;
38 typedef int Set;
1 long int __builtin_expect(long, long);
24 void INIT_LIST_HEAD(struct list_head *list);
47 void __list_add(struct list_head *, struct list_head *, struct list_head *);
60 void list_add(struct list_head *new, struct list_head *head);
111 void __list_del_entry(struct list_head *);
112 void list_del(struct list_head *);
153 void list_move(struct list_head *list, struct list_head *head);
186 int list_empty(const struct list_head *head);
29 void _raw_spin_lock_irq(raw_spinlock_t *);
32 unsigned long int _raw_spin_lock_irqsave(raw_spinlock_t *);
41 void _raw_spin_unlock_irq(raw_spinlock_t *);
43 void _raw_spin_unlock_irqrestore(raw_spinlock_t *, unsigned long);
290 raw_spinlock_t * spinlock_check(spinlock_t *lock);
326 void spin_lock_irq(spinlock_t *lock);
351 void spin_unlock_irq(spinlock_t *lock);
356 void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags);
822 long int prepare_to_wait_event(wait_queue_head_t *, wait_queue_t *, int);
823 void finish_wait(wait_queue_head_t *, wait_queue_t *);
138 void mutex_lock_nested(struct mutex *, unsigned int);
174 void mutex_unlock(struct mutex *);
303 unsigned long int msecs_to_jiffies(const unsigned int);
464 bool queue_work_on(int, struct workqueue_struct *, struct work_struct *);
504 bool queue_work(struct workqueue_struct *wq, struct work_struct *work);
31 unsigned int ioread32(void *);
37 void iowrite32(u32 , void *);
372 long int schedule_timeout(long);
392 int usb_hcd_link_urb_to_ep(struct usb_hcd *, struct urb *);
393 int usb_hcd_check_unlink_urb(struct usb_hcd *, struct urb *, int);
395 void usb_hcd_unlink_urb_from_ep(struct usb_hcd *, struct urb *);
88 u32 le_readl(void *addr);
96 void le_writeq(u64 value, void *addr);
113 int whci_wait_for(struct device *, u32 *, u32 , u32 , unsigned long, const char *);
268 void whc_qset_set_link_ptr(u64 *ptr, u64 target);
147 void whc_write_wusbcmd(struct whc *whc, u32 mask, u32 val);
149 void whc_hw_error(struct whc *whc, const char *reason);
171 void asl_start(struct whc *whc);
172 void asl_stop(struct whc *whc);
173 int asl_init(struct whc *whc);
174 void asl_clean_up(struct whc *whc);
175 int asl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags);
176 int asl_urb_dequeue(struct whc *whc, struct urb *urb, int status);
177 void asl_qset_delete(struct whc *whc, struct whc_qset *qset);
178 void scan_async_work(struct work_struct *work);
191 struct whc_qset * qset_alloc(struct whc *whc, gfp_t mem_flags);
192 void qset_free(struct whc *whc, struct whc_qset *qset);
193 struct whc_qset * get_qset(struct whc *whc, struct urb *urb, gfp_t mem_flags);
194 void qset_delete(struct whc *whc, struct whc_qset *qset);
195 void qset_clear(struct whc *whc, struct whc_qset *qset);
196 void qset_reset(struct whc *whc, struct whc_qset *qset);
197 int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb, gfp_t mem_flags);
199 void qset_free_std(struct whc *whc, struct whc_std *std);
200 void qset_remove_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb, int status);
202 void process_halted_qtd(struct whc *whc, struct whc_qset *qset, struct whc_qtd *qtd);
204 void process_inactive_qtd(struct whc *whc, struct whc_qset *qset, struct whc_qtd *qtd);
206 enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset);
207 void qset_remove_complete(struct whc *whc, struct whc_qset *qset);
209 void asl_update(struct whc *whc, uint32_t wusbcmd);
28 void qset_get_next_prev(struct whc *whc, struct whc_qset *qset, struct whc_qset **next, struct whc_qset **prev);
47 void asl_qset_insert_begin(struct whc *whc, struct whc_qset *qset);
53 void asl_qset_insert(struct whc *whc, struct whc_qset *qset);
66 void asl_qset_remove(struct whc *whc, struct whc_qset *qset);
97 uint32_t process_qset(struct whc *whc, struct whc_qset *qset);
33 extern struct module __this_module;
123 int bitmap_scnprintf(char *, unsigned int, const unsigned long *, int);
142 void kfree(const void *);
302 void * __kmalloc(size_t , gfp_t );
441 void * kmalloc(size_t size, gfp_t flags);
638 void * kzalloc(size_t size, gfp_t flags);
99 ssize_t seq_read(struct file *, char *, size_t , loff_t *);
100 loff_t seq_lseek(struct file *, loff_t , int);
107 int seq_printf(struct seq_file *, const char *, ...);
140 int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
142 int single_release(struct inode *, struct file *);
49 struct dentry * debugfs_create_file(const char *, umode_t , struct dentry *, void *, const struct file_operations *);
58 void debugfs_remove(struct dentry *);
212 void whc_dbg_init(struct whc *whc);
213 void whc_dbg_clean_up(struct whc *whc);
35 void qset_print(struct seq_file *s, struct whc_qset *qset);
87 int di_print(struct seq_file *s, void *p);
110 int asl_print(struct seq_file *s, void *p);
122 int pzl_print(struct seq_file *s, void *p);
137 int di_open(struct inode *inode, struct file *file);
142 int asl_open(struct inode *inode, struct file *file);
147 int pzl_open(struct inode *inode, struct file *file);
152 const struct file_operations di_fops = { &__this_module, &seq_lseek, &seq_read, 0, 0, 0, 0, 0, 0, 0, 0, &di_open, 0, &single_release, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
160 const struct file_operations asl_fops = { &__this_module, &seq_lseek, &seq_read, 0, 0, 0, 0, 0, 0, 0, 0, &asl_open, 0, &single_release, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
168 const struct file_operations pzl_fops = { &__this_module, &seq_lseek, &seq_read, 0, 0, 0, 0, 0, 0, 0, 0, &pzl_open, 0, &single_release, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
222 void ldv_check_final_state();
225 void ldv_check_return_value(int);
231 void ldv_initialize();
234 void ldv_handler_precall();
237 int nondet_int();
240 int LDV_IN_INTERRUPT = 0;
243 void ldv_main1_sequence_infinite_withcheck_stateful();
71 void set_bit(long nr, volatile unsigned long *addr);
91 int device_wakeup_enable(struct device *);
913 void * dev_get_drvdata(const struct device *);
1029 int dev_err(const struct device *, const char *, ...);
1031 int dev_warn(const struct device *, const char *, ...);
106 int __umc_driver_register(struct umc_driver *, struct module *, const char *);
117 void umc_driver_unregister(struct umc_driver *);
471 int usb_endpoint_xfer_bulk(const struct usb_endpoint_descriptor *epd);
484 int usb_endpoint_xfer_control(const struct usb_endpoint_descriptor *epd);
418 struct usb_hcd * usb_create_hcd(const struct hc_driver *, struct device *, const char *);
424 void usb_put_hcd(struct usb_hcd *);
426 int usb_add_hcd(struct usb_hcd *, unsigned int, unsigned long);
428 void usb_remove_hcd(struct usb_hcd *);
508 struct uwb_rc * uwb_rc_get_by_grandpa(const struct device *);
509 void uwb_rc_put(struct uwb_rc *);
306 int wusbhc_create(struct wusbhc *);
307 int wusbhc_b_create(struct wusbhc *);
308 void wusbhc_b_destroy(struct wusbhc *);
309 void wusbhc_destroy(struct wusbhc *);
404 int wusbhc_rh_status_data(struct usb_hcd *, char *);
405 int wusbhc_rh_control(struct usb_hcd *, u16 , u16 , u16 , char *, u16 );
406 int wusbhc_rh_start_port_reset(struct usb_hcd *, unsigned int);
441 u8 wusb_cluster_id_get();
442 void wusb_cluster_id_put(u8 );
80 void le_writel(u32 value, void *addr);
143 int whc_init(struct whc *whc);
144 void whc_clean_up(struct whc *whc);
152 int whc_wusbhc_start(struct wusbhc *wusbhc);
153 void whc_wusbhc_stop(struct wusbhc *wusbhc, int delay);
154 int whc_mmcie_add(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt, u8 handle, struct wuie_hdr *wuie);
156 int whc_mmcie_rm(struct wusbhc *wusbhc, u8 handle);
157 int whc_bwa_set(struct wusbhc *wusbhc, s8 stream_index, const struct uwb_mas_bm *mas_bm);
158 int whc_dev_info_set(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev);
159 int whc_set_num_dnts(struct wusbhc *wusbhc, u8 interval, u8 slots);
160 int whc_set_ptk(struct wusbhc *wusbhc, u8 port_idx, u32 tkid, const void *ptk, size_t key_size);
162 int whc_set_gtk(struct wusbhc *wusbhc, u32 tkid, const void *gtk, size_t key_size);
164 int whc_set_cluster_id(struct whc *whc, u8 bcid);
167 irqreturn_t whc_int_handler(struct usb_hcd *hcd);
185 int pzl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags);
186 int pzl_urb_dequeue(struct whc *whc, struct urb *urb, int status);
187 void pzl_qset_delete(struct whc *whc, struct whc_qset *qset);
33 int whc_reset(struct usb_hcd *usb_hcd);
45 int whc_start(struct usb_hcd *usb_hcd);
89 void whc_stop(struct usb_hcd *usb_hcd);
108 int whc_get_frame_number(struct usb_hcd *usb_hcd);
118 int whc_urb_enqueue(struct usb_hcd *usb_hcd, struct urb *urb, gfp_t mem_flags);
146 int whc_urb_dequeue(struct usb_hcd *usb_hcd, struct urb *urb, int status);
173 void whc_endpoint_disable(struct usb_hcd *usb_hcd, struct usb_host_endpoint *ep);
191 void whc_endpoint_reset(struct usb_hcd *usb_hcd, struct usb_host_endpoint *ep);
217 struct hc_driver whc_hc_driver = { "whci-hcd", "Wireless host controller", 2024UL, &whc_int_handler, 32, &whc_reset, &whc_start, 0, 0, &whc_stop, 0, &whc_get_frame_number, &whc_urb_enqueue, &whc_urb_dequeue, 0, 0, &whc_endpoint_disable, &whc_endpoint_reset, &wusbhc_rh_status_data, &wusbhc_rh_control, 0, 0, &wusbhc_rh_start_port_reset, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
238 int whc_probe(struct umc_dev *umc);
323 void whc_remove(struct umc_dev *umc);
340 struct umc_driver whci_hc_driver = { (char *)"whci-hcd", 1U, 0, 0, &whc_probe, &whc_remove, 0, 0, 0, 0, { 0, 0, 0, 0, (_Bool)0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } };
347 int whci_hc_driver_init();
353 void whci_hc_driver_exit();
364 const struct pci_device_id __mod_pci_device_table = { };
392 void ldv_check_return_value_probe(int);
407 void ldv_main2_sequence_infinite_withcheck_stateful();
1 void * __builtin_memcpy(void *, const void *, unsigned long);
319 void wusbhc_reset_all(struct wusbhc *);
148 int whc_do_gencmd(struct whc *whc, u32 cmd, u32 params, void *addr, size_t len);
358 extern struct pv_irq_ops pv_irq_ops;
53 int __dynamic_dev_dbg(struct _ddebug *, const struct device *, const char *, ...);
71 void warn_slowpath_null(const char *, const int);
55 void * memset(void *, int, size_t );
802 unsigned long int arch_local_save_flags();
155 int arch_irqs_disabled_flags(unsigned long flags);
279 void lockdep_init_map(struct lockdep_map *, const char *, struct lock_class_key *, int);
93 void __raw_spin_lock_init(raw_spinlock_t *, const char *, struct lock_class_key *);
68 void __init_waitqueue_head(wait_queue_head_t *, const char *, struct lock_class_key *);
119 void __mutex_init(struct mutex *, const char *, struct lock_class_key *);
192 void __init_work(struct work_struct *, int);
397 struct workqueue_struct * __alloc_workqueue_key(const char *, unsigned int, int, struct lock_class_key *, const char *, ...);
457 void destroy_workqueue(struct workqueue_struct *);
139 extern struct resource iomem_resource;
164 resource_size_t resource_size(const struct resource *res);
182 struct resource * __request_region(struct resource *, resource_size_t , resource_size_t , const char *, int);
193 void __release_region(struct resource *, resource_size_t , resource_size_t );
174 void * ioremap_nocache(resource_size_t , unsigned long);
182 void * ioremap(resource_size_t offset, unsigned long size);
187 void iounmap(volatile void *);
84 const char * kobject_name(const struct kobject *kobj);
798 const char * dev_name(const struct device *dev);
70 int is_device_dma_capable(struct device *dev);
53 void debug_dma_alloc_coherent(struct device *, size_t , dma_addr_t , void *);
56 void debug_dma_free_coherent(struct device *, size_t , void *, dma_addr_t );
27 extern struct device x86_dma_fallback_dev;
30 extern struct dma_map_ops *dma_ops;
32 struct dma_map_ops * get_dma_ops(struct device *dev);
107 unsigned long int dma_alloc_coherent_mask(struct device *dev, gfp_t gfp);
119 gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp);
135 void * dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs);
164 void dma_free_attrs(struct device *dev, size_t size, void *vaddr, dma_addr_t bus, struct dma_attrs *attrs);
17 struct dma_pool * dma_pool_create(const char *, struct device *, size_t , size_t , size_t );
20 void dma_pool_destroy(struct dma_pool *);
168 void whc_dn_work(struct work_struct *work);
181 int pzl_init(struct whc *whc);
182 void pzl_clean_up(struct whc *whc);
188 void scan_periodic_work(struct work_struct *work);
30 void whc_hw_reset(struct whc *whc);
37 void whc_hw_init_di_buf(struct whc *whc);
48 void whc_hw_init_dn_buf(struct whc *whc);
144 void __wake_up(wait_queue_head_t *, unsigned int, int, void *);
392 void wusbhc_handle_dn(struct wusbhc *, u8 , struct wusb_dn_hdr *, size_t );
25 void transfer_done(struct whc *whc);
67 int process_dn_buf(struct whc *whc);
183 void pzl_start(struct whc *whc);
184 void pzl_stop(struct whc *whc);
208 void pzl_update(struct whc *whc, uint32_t wusbcmd);
28 void update_pzl_pointers(struct whc *whc, int period, u64 addr);
65 int qset_get_period(struct whc *whc, struct whc_qset *qset);
76 void qset_insert_in_sw_list(struct whc *whc, struct whc_qset *qset);
87 void pzl_qset_remove(struct whc *whc, struct whc_qset *qset);
103 enum whc_update pzl_process_qset(struct whc *whc, struct whc_qset *qset);
207 void update_pzl_hw_view(struct whc *whc);
7 dma_addr_t ldv_dma_map_single_attrs(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
439 int fls(int x);
34 void * __memcpy(void *, const void *, size_t );
74 void list_add_tail(struct list_head *new, struct list_head *head);
142 void list_del_init(struct list_head *entry);
22 void _raw_spin_lock(raw_spinlock_t *);
39 void _raw_spin_unlock(raw_spinlock_t *);
301 void spin_lock(spinlock_t *lock);
341 void spin_unlock(spinlock_t *lock);
73 void init_completion(struct completion *x);
91 void wait_for_completion(struct completion *);
106 void complete(struct completion *);
888 void * lowmem_page_address(const struct page *page);
95 struct page * sg_page(struct scatterlist *sg);
218 void * sg_virt(struct scatterlist *sg);
224 struct scatterlist * sg_next(struct scatterlist *);
63 int valid_dma_direction(int dma_direction);
44 void debug_dma_unmap_page(struct device *, dma_addr_t , size_t , int, bool );
36 dma_addr_t ldv_dma_map_single_attrs_33(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
45 void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
141 void * krealloc(const void *, size_t , gfp_t );
22 void * dma_pool_alloc(struct dma_pool *, gfp_t , dma_addr_t *);
25 void dma_pool_free(struct dma_pool *, void *, dma_addr_t );
317 void wusbhc_giveback_urb(struct wusbhc *, struct urb *, int);
465 u8 wusb_port_no_to_idx(u8 port_no);
188 unsigned int usb_pipe_to_qh_type(unsigned int pipe);
131 bool whc_std_last(struct whc_std *std);
53 void qset_fill_qh(struct whc *whc, struct whc_qset *qset, struct urb *urb);
255 void qset_remove_qtd(struct whc *whc, struct whc_qset *qset);
264 void qset_copy_bounce_to_sg(struct whc *whc, struct whc_std *std);
331 void qset_remove_qtds(struct whc *whc, struct whc_qset *qset, struct urb *urb);
348 void qset_free_stds(struct whc_qset *qset, struct urb *urb);
358 int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_flags);
392 void urb_dequeue_work(struct work_struct *work);
413 struct whc_std * qset_new_std(struct whc *whc, struct whc_qset *qset, struct urb *urb, gfp_t mem_flags);
431 int qset_add_urb_sg(struct whc *whc, struct whc_qset *qset, struct urb *urb, gfp_t mem_flags);
550 int qset_add_urb_sg_linearize(struct whc *whc, struct whc_qset *qset, struct urb *urb, gfp_t mem_flags);
723 int get_urb_status_from_qtd(struct urb *urb, u32 status);
146 void bitmap_copy_le(void *, const unsigned long *, int);
46 void msleep(unsigned int);
271 void uwb_mas_bm_copy_le(void *dst, const struct uwb_mas_bm *mas);
25 int whc_update_di(struct whc *whc, int idx);
141 int whc_set_key(struct whc *whc, u8 key_index, uint32_t tkid, const void *key, size_t key_size, bool is_gtk);
10 void ldv_error();
25 int ldv_undef_int();
26 void * ldv_undef_ptr();
8 int LDV_DMA_MAP_CALLS = 0;
11 dma_addr_t ldv_dma_map_page(struct device *dev, struct page *page, size_t offset, size_t size, enum dma_data_direction dir);
25 int ldv_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
41 dma_addr_t ldv_dma_map_single(struct device *dev, void *cpu_addr, size_t size, enum dma_data_direction dir);
return ;
}
-entry_point
{
409 struct usb_hcd *var_group1;
410 struct urb *var_group2;
411 unsigned int var_whc_urb_enqueue_4_p2;
412 int var_whc_urb_dequeue_5_p2;
413 struct usb_host_endpoint *var_group3;
414 struct umc_dev *var_group4;
415 int res_whc_probe_8;
416 int ldv_s_whci_hc_driver_umc_driver;
417 int tmp;
418 int tmp___0;
419 int tmp___1;
477 ldv_s_whci_hc_driver_umc_driver = 0;
457 LDV_IN_INTERRUPT = 1;
466 -ldv_initialize()
{
71 LDV_DMA_MAP_CALLS = 0;
72 return ;;
}
472 ldv_handler_precall() { /* Function call is skipped due to function is undefined */}
473 -whci_hc_driver_init()
{
349 int tmp;
349 tmp = __umc_driver_register(&whci_hc_driver, &__this_module, "whci_hcd") { /* Function call is skipped due to function is undefined */}
349 return tmp;;
}
473 assume(!(tmp != 0));
480 goto ldv_36495;
480 tmp___1 = nondet_int() { /* Function call is skipped due to function is undefined */}
480 assume(tmp___1 != 0);
483 goto ldv_36494;
481 ldv_36494:;
484 tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */}
484 switch (tmp___0)
485 assume(!(tmp___0 == 0));
503 assume(!(tmp___0 == 1));
520 assume(!(tmp___0 == 2));
537 assume(!(tmp___0 == 3));
554 assume(tmp___0 == 4);
562 ldv_handler_precall() { /* Function call is skipped due to function is undefined */}
563 -whc_urb_enqueue(var_group1, var_group2, var_whc_urb_enqueue_4_p2)
{
120 struct wusbhc *wusbhc;
121 const struct usb_hcd *__mptr;
122 struct whc *whc;
123 const struct wusbhc *__mptr___0;
124 int ret;
121 __mptr = (const struct usb_hcd *)usb_hcd;
121 wusbhc = (struct wusbhc *)__mptr;
122 __mptr___0 = (const struct wusbhc *)wusbhc;
122 whc = (struct whc *)__mptr___0;
125 switch ((urb->pipe) >> 30)
126 assume(!(((urb->pipe) >> 30) == 1U));
129 assume(!(((urb->pipe) >> 30) == 0U));
133 skipped uneccesary edges
136 -asl_urb_enqueue(whc, urb, mem_flags)
{
267 struct whc_qset *qset;
268 int err;
269 unsigned long flags;
270 raw_spinlock_t *tmp;
271 -spinlock_check(&(whc->lock))
{
292 return &(lock->__annonCompField20.rlock);;
}
271 flags = _raw_spin_lock_irqsave(tmp) { /* Function call is skipped due to function is undefined */}
273 err = usb_hcd_link_urb_to_ep(&(whc->wusbhc.usb_hcd), urb) { /* Function call is skipped due to function is undefined */}
274 assume(!(err < 0));
279 -get_qset(whc, urb, 32U)
{
160 struct whc_qset *qset;
163 struct whc_qset *__CPAchecker_TMP_0 = (struct whc_qset *)(urb->ep->hcpriv);
163 qset = __CPAchecker_TMP_0;
164 assume(!(((unsigned long)qset) == ((unsigned long)((struct whc_qset *)0))));
173 return qset;;
}
280 assume(!(((unsigned long)qset) == ((unsigned long)((struct whc_qset *)0))));
283 -qset_add_urb(whc, qset, urb, 32U)
{
637 struct whc_urb *wurb;
638 int remaining;
639 unsigned long long transfer_dma;
640 int ntds_remaining;
641 int ret;
642 void *tmp;
643 struct lock_class_key __key;
644 struct __anonstruct_atomic64_t_7 __constr_expr_0;
645 struct whc_std *std;
646 unsigned long std_len;
647 int tmp___0;
639 int __CPAchecker_TMP_0 = (int)(urb->transfer_buffer_length);
639 remaining = __CPAchecker_TMP_0;
640 transfer_dma = urb->transfer_dma;
644 -kzalloc(104UL, mem_flags)
{
640 void *tmp;
640 -kmalloc(size, flags | 32768U)
{
443 void *tmp___2;
458 tmp___2 = __kmalloc(size, flags) { /* Function call is skipped due to function is undefined */}
458 return tmp___2;;
}
640 return tmp;;
}
644 wurb = (struct whc_urb *)tmp;
645 assume(!(((unsigned long)wurb) == ((unsigned long)((struct whc_urb *)0))));
647 urb->hcpriv = (void *)wurb;
648 wurb->qset = qset;
649 wurb->urb = urb;
650 __init_work(&(wurb->dequeue_work), 0) { /* Function call is skipped due to function is undefined */}
650 __constr_expr_0.counter = 137438953408L;
650 wurb->dequeue_work.data = __constr_expr_0;
650 lockdep_init_map(&(wurb->dequeue_work.lockdep_map), "(&wurb->dequeue_work)", &__key, 0) { /* Function call is skipped due to function is undefined */}
650 -INIT_LIST_HEAD(&(wurb->dequeue_work.entry))
{
26 list->next = list;
27 list->prev = list;
28 return ;;
}
650 wurb->dequeue_work.func = &urb_dequeue_work;
652 assume(!((urb->num_sgs) != 0));
663 ntds_remaining = (remaining + 1048574) / 1048575;
664 assume(!(ntds_remaining == 0));
667 goto ldv_35816;
667 assume(ntds_remaining != 0);
669 goto ldv_35815;
668 ldv_35815:;
671 std_len = (size_t )remaining;
672 assume(std_len > 1048575UL);
673 std_len = 1048575UL;
675 -qset_new_std(whc, qset, urb, mem_flags)
{
416 struct whc_std *std;
417 void *tmp;
418 -kzalloc(96UL, mem_flags)
{
640 void *tmp;
640 -kmalloc(size, flags | 32768U)
{
443 void *tmp___2;
458 tmp___2 = __kmalloc(size, flags) { /* Function call is skipped due to function is undefined */}
458 return tmp___2;;
}
640 return tmp;;
}
418 std = (struct whc_std *)tmp;
419 assume(!(((unsigned long)std) == ((unsigned long)((struct whc_std *)0))));
422 std->urb = urb;
423 std->qtd = (struct whc_qtd *)0;
425 -INIT_LIST_HEAD(&(std->list_node))
{
26 list->next = list;
27 list->prev = list;
28 return ;;
}
426 -list_add_tail(&(std->list_node), &(qset->stds))
{
76 __list_add(new, head->prev, head) { /* Function call is skipped due to function is undefined */}
77 return ;;
}
428 return std;;
}
676 assume(!(((unsigned long)std) == ((unsigned long)((struct whc_std *)0))));
679 std->dma_addr = transfer_dma;
680 std->len = std_len;
681 std->ntds_remaining = ntds_remaining;
683 -qset_fill_page_list(whc, std, mem_flags)
{
360 unsigned long long dma_addr;
361 unsigned long long sp;
362 unsigned long long ep;
363 unsigned long pl_len;
364 int p;
365 void *tmp;
360 dma_addr = std->dma_addr;
366 assume(!((std->len) <= 4096UL));
371 sp = dma_addr & 18446744073709547520ULL;
372 unsigned long long __CPAchecker_TMP_0 = (unsigned long long)(std->len);
372 ep = __CPAchecker_TMP_0 + dma_addr;
373 std->num_pointers = (int)(((ep - sp) + 4095ULL) / 4096ULL);
375 unsigned long __CPAchecker_TMP_1 = (unsigned long)(std->num_pointers);
375 pl_len = __CPAchecker_TMP_1 * 8UL;
376 -kmalloc(pl_len, mem_flags)
{
443 void *tmp___2;
458 tmp___2 = __kmalloc(size, flags) { /* Function call is skipped due to function is undefined */}
458 return tmp___2;;
}
376 std->pl_virt = (struct whc_page_list_entry *)tmp;
377 unsigned long __CPAchecker_TMP_2 = (unsigned long)(std->pl_virt);
377 assume(!(__CPAchecker_TMP_2 == ((unsigned long)((struct whc_page_list_entry *)0))));
379 void *__CPAchecker_TMP_3 = (void *)(std->pl_virt);
379 -ldv_dma_map_single_attrs_33(whc->wusbhc.dev, __CPAchecker_TMP_3, pl_len, 1, (struct dma_attrs *)0)
{
38 unsigned long long tmp;
37 -ldv_dma_map_single_attrs(dev, ptr, size, dir, attrs)
{
58 unsigned long long nonedetermined;
59 void *tmp;
58 tmp = ldv_undef_ptr() { /* Function call is skipped due to function is undefined */}
58 nonedetermined = (dma_addr_t )tmp;
61 assume(!(LDV_DMA_MAP_CALLS != 0));
63 LDV_DMA_MAP_CALLS = LDV_DMA_MAP_CALLS + 1;
65 return nonedetermined;;
}
37 return tmp;;
}
381 p = 0;
381 goto ldv_35698;
381 assume(!((std->num_pointers) > p));
386 return 0;;
}
683 assume(!(tmp___0 < 0));
686 ntds_remaining = ntds_remaining - 1;
687 remaining = (int)(((unsigned int)remaining) - ((unsigned int)std_len));
688 transfer_dma = transfer_dma + ((unsigned long long)std_len);
689 ldv_35816:;
667 assume(ntds_remaining != 0);
669 goto ldv_35815;
668 ldv_35815:;
671 std_len = (size_t )remaining;
672 assume(std_len > 1048575UL);
673 std_len = 1048575UL;
675 -qset_new_std(whc, qset, urb, mem_flags)
{
416 struct whc_std *std;
417 void *tmp;
418 -kzalloc(96UL, mem_flags)
{
640 void *tmp;
640 -kmalloc(size, flags | 32768U)
{
443 void *tmp___2;
458 tmp___2 = __kmalloc(size, flags) { /* Function call is skipped due to function is undefined */}
458 return tmp___2;;
}
640 return tmp;;
}
418 std = (struct whc_std *)tmp;
419 assume(!(((unsigned long)std) == ((unsigned long)((struct whc_std *)0))));
422 std->urb = urb;
423 std->qtd = (struct whc_qtd *)0;
425 -INIT_LIST_HEAD(&(std->list_node))
{
26 list->next = list;
27 list->prev = list;
28 return ;;
}
426 -list_add_tail(&(std->list_node), &(qset->stds))
{
76 __list_add(new, head->prev, head) { /* Function call is skipped due to function is undefined */}
77 return ;;
}
428 return std;;
}
676 assume(!(((unsigned long)std) == ((unsigned long)((struct whc_std *)0))));
679 std->dma_addr = transfer_dma;
680 std->len = std_len;
681 std->ntds_remaining = ntds_remaining;
683 -qset_fill_page_list(whc, std, mem_flags)
{
360 unsigned long long dma_addr;
361 unsigned long long sp;
362 unsigned long long ep;
363 unsigned long pl_len;
364 int p;
365 void *tmp;
360 dma_addr = std->dma_addr;
366 assume(!((std->len) <= 4096UL));
371 sp = dma_addr & 18446744073709547520ULL;
372 unsigned long long __CPAchecker_TMP_0 = (unsigned long long)(std->len);
372 ep = __CPAchecker_TMP_0 + dma_addr;
373 std->num_pointers = (int)(((ep - sp) + 4095ULL) / 4096ULL);
375 unsigned long __CPAchecker_TMP_1 = (unsigned long)(std->num_pointers);
375 pl_len = __CPAchecker_TMP_1 * 8UL;
376 -kmalloc(pl_len, mem_flags)
{
443 void *tmp___2;
458 tmp___2 = __kmalloc(size, flags) { /* Function call is skipped due to function is undefined */}
458 return tmp___2;;
}
376 std->pl_virt = (struct whc_page_list_entry *)tmp;
377 unsigned long __CPAchecker_TMP_2 = (unsigned long)(std->pl_virt);
377 assume(!(__CPAchecker_TMP_2 == ((unsigned long)((struct whc_page_list_entry *)0))));
379 void *__CPAchecker_TMP_3 = (void *)(std->pl_virt);
379 -ldv_dma_map_single_attrs_33(whc->wusbhc.dev, __CPAchecker_TMP_3, pl_len, 1, (struct dma_attrs *)0)
{
38 unsigned long long tmp;
37 -ldv_dma_map_single_attrs(dev, ptr, size, dir, attrs)
{
58 unsigned long long nonedetermined;
59 void *tmp;
58 tmp = ldv_undef_ptr() { /* Function call is skipped due to function is undefined */}
58 nonedetermined = (dma_addr_t )tmp;
61 assume(LDV_DMA_MAP_CALLS != 0);
61 -ldv_error()
{
15 LDV_ERROR:;
}
}
}
}
}
}
}
}
Source code
1 /* 2 * Wireless Host Controller (WHC) asynchronous schedule management. 3 * 4 * Copyright (C) 2007 Cambridge Silicon Radio Ltd. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License version 8 * 2 as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 #include <linux/kernel.h> 19 #include <linux/gfp.h> 20 #include <linux/dma-mapping.h> 21 #include <linux/uwb/umc.h> 22 #include <linux/usb.h> 23 24 #include "../../wusbcore/wusbhc.h" 25 26 #include "whcd.h" 27 28 static void qset_get_next_prev(struct whc *whc, struct whc_qset *qset, 29 struct whc_qset **next, struct whc_qset **prev) 30 { 31 struct list_head *n, *p; 32 33 BUG_ON(list_empty(&whc->async_list)); 34 35 n = qset->list_node.next; 36 if (n == &whc->async_list) 37 n = n->next; 38 p = qset->list_node.prev; 39 if (p == &whc->async_list) 40 p = p->prev; 41 42 *next = container_of(n, struct whc_qset, list_node); 43 *prev = container_of(p, struct whc_qset, list_node); 44 45 } 46 47 static void asl_qset_insert_begin(struct whc *whc, struct whc_qset *qset) 48 { 49 list_move(&qset->list_node, &whc->async_list); 50 qset->in_sw_list = true; 51 } 52 53 static void asl_qset_insert(struct whc *whc, struct whc_qset *qset) 54 { 55 struct whc_qset *next, *prev; 56 57 qset_clear(whc, qset); 58 59 /* Link into ASL. */ 60 qset_get_next_prev(whc, qset, &next, &prev); 61 whc_qset_set_link_ptr(&qset->qh.link, next->qset_dma); 62 whc_qset_set_link_ptr(&prev->qh.link, qset->qset_dma); 63 qset->in_hw_list = true; 64 } 65 66 static void asl_qset_remove(struct whc *whc, struct whc_qset *qset) 67 { 68 struct whc_qset *prev, *next; 69 70 qset_get_next_prev(whc, qset, &next, &prev); 71 72 list_move(&qset->list_node, &whc->async_removed_list); 73 qset->in_sw_list = false; 74 75 /* 76 * No more qsets in the ASL? The caller must stop the ASL as 77 * it's no longer valid. 78 */ 79 if (list_empty(&whc->async_list)) 80 return; 81 82 /* Remove from ASL. */ 83 whc_qset_set_link_ptr(&prev->qh.link, next->qset_dma); 84 qset->in_hw_list = false; 85 } 86 87 /** 88 * process_qset - process any recently inactivated or halted qTDs in a 89 * qset. 90 * 91 * After inactive qTDs are removed, new qTDs can be added if the 92 * urb queue still contains URBs. 93 * 94 * Returns any additional WUSBCMD bits for the ASL sync command (i.e., 95 * WUSBCMD_ASYNC_QSET_RM if a halted qset was removed). 96 */ 97 static uint32_t process_qset(struct whc *whc, struct whc_qset *qset) 98 { 99 enum whc_update update = 0; 100 uint32_t status = 0; 101 102 while (qset->ntds) { 103 struct whc_qtd *td; 104 int t; 105 106 t = qset->td_start; 107 td = &qset->qtd[qset->td_start]; 108 status = le32_to_cpu(td->status); 109 110 /* 111 * Nothing to do with a still active qTD. 112 */ 113 if (status & QTD_STS_ACTIVE) 114 break; 115 116 if (status & QTD_STS_HALTED) { 117 /* Ug, an error. */ 118 process_halted_qtd(whc, qset, td); 119 /* A halted qTD always triggers an update 120 because the qset was either removed or 121 reactivated. */ 122 update |= WHC_UPDATE_UPDATED; 123 goto done; 124 } 125 126 /* Mmm, a completed qTD. */ 127 process_inactive_qtd(whc, qset, td); 128 } 129 130 if (!qset->remove) 131 update |= qset_add_qtds(whc, qset); 132 133 done: 134 /* 135 * Remove this qset from the ASL if requested, but only if has 136 * no qTDs. 137 */ 138 if (qset->remove && qset->ntds == 0) { 139 asl_qset_remove(whc, qset); 140 update |= WHC_UPDATE_REMOVED; 141 } 142 return update; 143 } 144 145 void asl_start(struct whc *whc) 146 { 147 struct whc_qset *qset; 148 149 qset = list_first_entry(&whc->async_list, struct whc_qset, list_node); 150 151 le_writeq(qset->qset_dma | QH_LINK_NTDS(8), whc->base + WUSBASYNCLISTADDR); 152 153 whc_write_wusbcmd(whc, WUSBCMD_ASYNC_EN, WUSBCMD_ASYNC_EN); 154 whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS, 155 WUSBSTS_ASYNC_SCHED, WUSBSTS_ASYNC_SCHED, 156 1000, "start ASL"); 157 } 158 159 void asl_stop(struct whc *whc) 160 { 161 whc_write_wusbcmd(whc, WUSBCMD_ASYNC_EN, 0); 162 whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS, 163 WUSBSTS_ASYNC_SCHED, 0, 164 1000, "stop ASL"); 165 } 166 167 /** 168 * asl_update - request an ASL update and wait for the hardware to be synced 169 * @whc: the WHCI HC 170 * @wusbcmd: WUSBCMD value to start the update. 171 * 172 * If the WUSB HC is inactive (i.e., the ASL is stopped) then the 173 * update must be skipped as the hardware may not respond to update 174 * requests. 175 */ 176 void asl_update(struct whc *whc, uint32_t wusbcmd) 177 { 178 struct wusbhc *wusbhc = &whc->wusbhc; 179 long t; 180 181 mutex_lock(&wusbhc->mutex); 182 if (wusbhc->active) { 183 whc_write_wusbcmd(whc, wusbcmd, wusbcmd); 184 t = wait_event_timeout( 185 whc->async_list_wq, 186 (le_readl(whc->base + WUSBCMD) & WUSBCMD_ASYNC_UPDATED) == 0, 187 msecs_to_jiffies(1000)); 188 if (t == 0) 189 whc_hw_error(whc, "ASL update timeout"); 190 } 191 mutex_unlock(&wusbhc->mutex); 192 } 193 194 /** 195 * scan_async_work - scan the ASL for qsets to process. 196 * 197 * Process each qset in the ASL in turn and then signal the WHC that 198 * the ASL has been updated. 199 * 200 * Then start, stop or update the asynchronous schedule as required. 201 */ 202 void scan_async_work(struct work_struct *work) 203 { 204 struct whc *whc = container_of(work, struct whc, async_work); 205 struct whc_qset *qset, *t; 206 enum whc_update update = 0; 207 208 spin_lock_irq(&whc->lock); 209 210 /* 211 * Transerve the software list backwards so new qsets can be 212 * safely inserted into the ASL without making it non-circular. 213 */ 214 list_for_each_entry_safe_reverse(qset, t, &whc->async_list, list_node) { 215 if (!qset->in_hw_list) { 216 asl_qset_insert(whc, qset); 217 update |= WHC_UPDATE_ADDED; 218 } 219 220 update |= process_qset(whc, qset); 221 } 222 223 spin_unlock_irq(&whc->lock); 224 225 if (update) { 226 uint32_t wusbcmd = WUSBCMD_ASYNC_UPDATED | WUSBCMD_ASYNC_SYNCED_DB; 227 if (update & WHC_UPDATE_REMOVED) 228 wusbcmd |= WUSBCMD_ASYNC_QSET_RM; 229 asl_update(whc, wusbcmd); 230 } 231 232 /* 233 * Now that the ASL is updated, complete the removal of any 234 * removed qsets. 235 * 236 * If the qset was to be reset, do so and reinsert it into the 237 * ASL if it has pending transfers. 238 */ 239 spin_lock_irq(&whc->lock); 240 241 list_for_each_entry_safe(qset, t, &whc->async_removed_list, list_node) { 242 qset_remove_complete(whc, qset); 243 if (qset->reset) { 244 qset_reset(whc, qset); 245 if (!list_empty(&qset->stds)) { 246 asl_qset_insert_begin(whc, qset); 247 queue_work(whc->workqueue, &whc->async_work); 248 } 249 } 250 } 251 252 spin_unlock_irq(&whc->lock); 253 } 254 255 /** 256 * asl_urb_enqueue - queue an URB onto the asynchronous list (ASL). 257 * @whc: the WHCI host controller 258 * @urb: the URB to enqueue 259 * @mem_flags: flags for any memory allocations 260 * 261 * The qset for the endpoint is obtained and the urb queued on to it. 262 * 263 * Work is scheduled to update the hardware's view of the ASL. 264 */ 265 int asl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags) 266 { 267 struct whc_qset *qset; 268 int err; 269 unsigned long flags; 270 271 spin_lock_irqsave(&whc->lock, flags); 272 273 err = usb_hcd_link_urb_to_ep(&whc->wusbhc.usb_hcd, urb); 274 if (err < 0) { 275 spin_unlock_irqrestore(&whc->lock, flags); 276 return err; 277 } 278 279 qset = get_qset(whc, urb, GFP_ATOMIC); 280 if (qset == NULL) 281 err = -ENOMEM; 282 else 283 err = qset_add_urb(whc, qset, urb, GFP_ATOMIC); 284 if (!err) { 285 if (!qset->in_sw_list && !qset->remove) 286 asl_qset_insert_begin(whc, qset); 287 } else 288 usb_hcd_unlink_urb_from_ep(&whc->wusbhc.usb_hcd, urb); 289 290 spin_unlock_irqrestore(&whc->lock, flags); 291 292 if (!err) 293 queue_work(whc->workqueue, &whc->async_work); 294 295 return err; 296 } 297 298 /** 299 * asl_urb_dequeue - remove an URB (qset) from the async list. 300 * @whc: the WHCI host controller 301 * @urb: the URB to dequeue 302 * @status: the current status of the URB 303 * 304 * URBs that do yet have qTDs can simply be removed from the software 305 * queue, otherwise the qset must be removed from the ASL so the qTDs 306 * can be removed. 307 */ 308 int asl_urb_dequeue(struct whc *whc, struct urb *urb, int status) 309 { 310 struct whc_urb *wurb = urb->hcpriv; 311 struct whc_qset *qset = wurb->qset; 312 struct whc_std *std, *t; 313 bool has_qtd = false; 314 int ret; 315 unsigned long flags; 316 317 spin_lock_irqsave(&whc->lock, flags); 318 319 ret = usb_hcd_check_unlink_urb(&whc->wusbhc.usb_hcd, urb, status); 320 if (ret < 0) 321 goto out; 322 323 list_for_each_entry_safe(std, t, &qset->stds, list_node) { 324 if (std->urb == urb) { 325 if (std->qtd) 326 has_qtd = true; 327 qset_free_std(whc, std); 328 } else 329 std->qtd = NULL; /* so this std is re-added when the qset is */ 330 } 331 332 if (has_qtd) { 333 asl_qset_remove(whc, qset); 334 wurb->status = status; 335 wurb->is_async = true; 336 queue_work(whc->workqueue, &wurb->dequeue_work); 337 } else 338 qset_remove_urb(whc, qset, urb, status); 339 out: 340 spin_unlock_irqrestore(&whc->lock, flags); 341 342 return ret; 343 } 344 345 /** 346 * asl_qset_delete - delete a qset from the ASL 347 */ 348 void asl_qset_delete(struct whc *whc, struct whc_qset *qset) 349 { 350 qset->remove = 1; 351 queue_work(whc->workqueue, &whc->async_work); 352 qset_delete(whc, qset); 353 } 354 355 /** 356 * asl_init - initialize the asynchronous schedule list 357 * 358 * A dummy qset with no qTDs is added to the ASL to simplify removing 359 * qsets (no need to stop the ASL when the last qset is removed). 360 */ 361 int asl_init(struct whc *whc) 362 { 363 struct whc_qset *qset; 364 365 qset = qset_alloc(whc, GFP_KERNEL); 366 if (qset == NULL) 367 return -ENOMEM; 368 369 asl_qset_insert_begin(whc, qset); 370 asl_qset_insert(whc, qset); 371 372 return 0; 373 } 374 375 /** 376 * asl_clean_up - free ASL resources 377 * 378 * The ASL is stopped and empty except for the dummy qset. 379 */ 380 void asl_clean_up(struct whc *whc) 381 { 382 struct whc_qset *qset; 383 384 if (!list_empty(&whc->async_list)) { 385 qset = list_first_entry(&whc->async_list, struct whc_qset, list_node); 386 list_del(&qset->list_node); 387 qset_free(whc, qset); 388 } 389 }
1 2 /* 3 * Wireless Host Controller (WHC) driver. 4 * 5 * Copyright (C) 2007 Cambridge Silicon Radio Ltd. 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License version 9 * 2 as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include <linux/kernel.h> 20 #include <linux/init.h> 21 #include <linux/module.h> 22 #include <linux/uwb/umc.h> 23 24 #include "../../wusbcore/wusbhc.h" 25 26 #include "whcd.h" 27 28 /* 29 * One time initialization. 30 * 31 * Nothing to do here. 32 */ 33 static int whc_reset(struct usb_hcd *usb_hcd) 34 { 35 return 0; 36 } 37 38 /* 39 * Start the wireless host controller. 40 * 41 * Start device notification. 42 * 43 * Put hc into run state, set DNTS parameters. 44 */ 45 static int whc_start(struct usb_hcd *usb_hcd) 46 { 47 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); 48 struct whc *whc = wusbhc_to_whc(wusbhc); 49 u8 bcid; 50 int ret; 51 52 mutex_lock(&wusbhc->mutex); 53 54 le_writel(WUSBINTR_GEN_CMD_DONE 55 | WUSBINTR_HOST_ERR 56 | WUSBINTR_ASYNC_SCHED_SYNCED 57 | WUSBINTR_DNTS_INT 58 | WUSBINTR_ERR_INT 59 | WUSBINTR_INT, 60 whc->base + WUSBINTR); 61 62 /* set cluster ID */ 63 bcid = wusb_cluster_id_get(); 64 ret = whc_set_cluster_id(whc, bcid); 65 if (ret < 0) 66 goto out; 67 wusbhc->cluster_id = bcid; 68 69 /* start HC */ 70 whc_write_wusbcmd(whc, WUSBCMD_RUN, WUSBCMD_RUN); 71 72 usb_hcd->uses_new_polling = 1; 73 set_bit(HCD_FLAG_POLL_RH, &usb_hcd->flags); 74 usb_hcd->state = HC_STATE_RUNNING; 75 76 out: 77 mutex_unlock(&wusbhc->mutex); 78 return ret; 79 } 80 81 82 /* 83 * Stop the wireless host controller. 84 * 85 * Stop device notification. 86 * 87 * Wait for pending transfer to stop? Put hc into stop state? 88 */ 89 static void whc_stop(struct usb_hcd *usb_hcd) 90 { 91 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); 92 struct whc *whc = wusbhc_to_whc(wusbhc); 93 94 mutex_lock(&wusbhc->mutex); 95 96 /* stop HC */ 97 le_writel(0, whc->base + WUSBINTR); 98 whc_write_wusbcmd(whc, WUSBCMD_RUN, 0); 99 whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS, 100 WUSBSTS_HCHALTED, WUSBSTS_HCHALTED, 101 100, "HC to halt"); 102 103 wusb_cluster_id_put(wusbhc->cluster_id); 104 105 mutex_unlock(&wusbhc->mutex); 106 } 107 108 static int whc_get_frame_number(struct usb_hcd *usb_hcd) 109 { 110 /* Frame numbers are not applicable to WUSB. */ 111 return -ENOSYS; 112 } 113 114 115 /* 116 * Queue an URB to the ASL or PZL 117 */ 118 static int whc_urb_enqueue(struct usb_hcd *usb_hcd, struct urb *urb, 119 gfp_t mem_flags) 120 { 121 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); 122 struct whc *whc = wusbhc_to_whc(wusbhc); 123 int ret; 124 125 switch (usb_pipetype(urb->pipe)) { 126 case PIPE_INTERRUPT: 127 ret = pzl_urb_enqueue(whc, urb, mem_flags); 128 break; 129 case PIPE_ISOCHRONOUS: 130 dev_err(&whc->umc->dev, "isochronous transfers unsupported\n"); 131 ret = -ENOTSUPP; 132 break; 133 case PIPE_CONTROL: 134 case PIPE_BULK: 135 default: 136 ret = asl_urb_enqueue(whc, urb, mem_flags); 137 break; 138 } 139 140 return ret; 141 } 142 143 /* 144 * Remove a queued URB from the ASL or PZL. 145 */ 146 static int whc_urb_dequeue(struct usb_hcd *usb_hcd, struct urb *urb, int status) 147 { 148 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); 149 struct whc *whc = wusbhc_to_whc(wusbhc); 150 int ret; 151 152 switch (usb_pipetype(urb->pipe)) { 153 case PIPE_INTERRUPT: 154 ret = pzl_urb_dequeue(whc, urb, status); 155 break; 156 case PIPE_ISOCHRONOUS: 157 ret = -ENOTSUPP; 158 break; 159 case PIPE_CONTROL: 160 case PIPE_BULK: 161 default: 162 ret = asl_urb_dequeue(whc, urb, status); 163 break; 164 } 165 166 return ret; 167 } 168 169 /* 170 * Wait for all URBs to the endpoint to be completed, then delete the 171 * qset. 172 */ 173 static void whc_endpoint_disable(struct usb_hcd *usb_hcd, 174 struct usb_host_endpoint *ep) 175 { 176 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); 177 struct whc *whc = wusbhc_to_whc(wusbhc); 178 struct whc_qset *qset; 179 180 qset = ep->hcpriv; 181 if (qset) { 182 ep->hcpriv = NULL; 183 if (usb_endpoint_xfer_bulk(&ep->desc) 184 || usb_endpoint_xfer_control(&ep->desc)) 185 asl_qset_delete(whc, qset); 186 else 187 pzl_qset_delete(whc, qset); 188 } 189 } 190 191 static void whc_endpoint_reset(struct usb_hcd *usb_hcd, 192 struct usb_host_endpoint *ep) 193 { 194 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); 195 struct whc *whc = wusbhc_to_whc(wusbhc); 196 struct whc_qset *qset; 197 unsigned long flags; 198 199 spin_lock_irqsave(&whc->lock, flags); 200 201 qset = ep->hcpriv; 202 if (qset) { 203 qset->remove = 1; 204 qset->reset = 1; 205 206 if (usb_endpoint_xfer_bulk(&ep->desc) 207 || usb_endpoint_xfer_control(&ep->desc)) 208 queue_work(whc->workqueue, &whc->async_work); 209 else 210 queue_work(whc->workqueue, &whc->periodic_work); 211 } 212 213 spin_unlock_irqrestore(&whc->lock, flags); 214 } 215 216 217 static struct hc_driver whc_hc_driver = { 218 .description = "whci-hcd", 219 .product_desc = "Wireless host controller", 220 .hcd_priv_size = sizeof(struct whc) - sizeof(struct usb_hcd), 221 .irq = whc_int_handler, 222 .flags = HCD_USB2, 223 224 .reset = whc_reset, 225 .start = whc_start, 226 .stop = whc_stop, 227 .get_frame_number = whc_get_frame_number, 228 .urb_enqueue = whc_urb_enqueue, 229 .urb_dequeue = whc_urb_dequeue, 230 .endpoint_disable = whc_endpoint_disable, 231 .endpoint_reset = whc_endpoint_reset, 232 233 .hub_status_data = wusbhc_rh_status_data, 234 .hub_control = wusbhc_rh_control, 235 .start_port_reset = wusbhc_rh_start_port_reset, 236 }; 237 238 static int whc_probe(struct umc_dev *umc) 239 { 240 int ret; 241 struct usb_hcd *usb_hcd; 242 struct wusbhc *wusbhc; 243 struct whc *whc; 244 struct device *dev = &umc->dev; 245 246 usb_hcd = usb_create_hcd(&whc_hc_driver, dev, "whci"); 247 if (usb_hcd == NULL) { 248 dev_err(dev, "unable to create hcd\n"); 249 return -ENOMEM; 250 } 251 252 usb_hcd->wireless = 1; 253 usb_hcd->self.sg_tablesize = 2048; /* somewhat arbitrary */ 254 255 wusbhc = usb_hcd_to_wusbhc(usb_hcd); 256 whc = wusbhc_to_whc(wusbhc); 257 whc->umc = umc; 258 259 ret = whc_init(whc); 260 if (ret) 261 goto error; 262 263 wusbhc->dev = dev; 264 wusbhc->uwb_rc = uwb_rc_get_by_grandpa(umc->dev.parent); 265 if (!wusbhc->uwb_rc) { 266 ret = -ENODEV; 267 dev_err(dev, "cannot get radio controller\n"); 268 goto error; 269 } 270 271 if (whc->n_devices > USB_MAXCHILDREN) { 272 dev_warn(dev, "USB_MAXCHILDREN too low for WUSB adapter (%u ports)\n", 273 whc->n_devices); 274 wusbhc->ports_max = USB_MAXCHILDREN; 275 } else 276 wusbhc->ports_max = whc->n_devices; 277 wusbhc->mmcies_max = whc->n_mmc_ies; 278 wusbhc->start = whc_wusbhc_start; 279 wusbhc->stop = whc_wusbhc_stop; 280 wusbhc->mmcie_add = whc_mmcie_add; 281 wusbhc->mmcie_rm = whc_mmcie_rm; 282 wusbhc->dev_info_set = whc_dev_info_set; 283 wusbhc->bwa_set = whc_bwa_set; 284 wusbhc->set_num_dnts = whc_set_num_dnts; 285 wusbhc->set_ptk = whc_set_ptk; 286 wusbhc->set_gtk = whc_set_gtk; 287 288 ret = wusbhc_create(wusbhc); 289 if (ret) 290 goto error_wusbhc_create; 291 292 ret = usb_add_hcd(usb_hcd, whc->umc->irq, IRQF_SHARED); 293 if (ret) { 294 dev_err(dev, "cannot add HCD: %d\n", ret); 295 goto error_usb_add_hcd; 296 } 297 device_wakeup_enable(usb_hcd->self.controller); 298 299 ret = wusbhc_b_create(wusbhc); 300 if (ret) { 301 dev_err(dev, "WUSBHC phase B setup failed: %d\n", ret); 302 goto error_wusbhc_b_create; 303 } 304 305 whc_dbg_init(whc); 306 307 return 0; 308 309 error_wusbhc_b_create: 310 usb_remove_hcd(usb_hcd); 311 error_usb_add_hcd: 312 wusbhc_destroy(wusbhc); 313 error_wusbhc_create: 314 uwb_rc_put(wusbhc->uwb_rc); 315 error: 316 whc_clean_up(whc); 317 if (usb_hcd) 318 usb_put_hcd(usb_hcd); 319 return ret; 320 } 321 322 323 static void whc_remove(struct umc_dev *umc) 324 { 325 struct usb_hcd *usb_hcd = dev_get_drvdata(&umc->dev); 326 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); 327 struct whc *whc = wusbhc_to_whc(wusbhc); 328 329 if (usb_hcd) { 330 whc_dbg_clean_up(whc); 331 wusbhc_b_destroy(wusbhc); 332 usb_remove_hcd(usb_hcd); 333 wusbhc_destroy(wusbhc); 334 uwb_rc_put(wusbhc->uwb_rc); 335 whc_clean_up(whc); 336 usb_put_hcd(usb_hcd); 337 } 338 } 339 340 static struct umc_driver whci_hc_driver = { 341 .name = "whci-hcd", 342 .cap_id = UMC_CAP_ID_WHCI_WUSB_HC, 343 .probe = whc_probe, 344 .remove = whc_remove, 345 }; 346 347 static int __init whci_hc_driver_init(void) 348 { 349 return umc_driver_register(&whci_hc_driver); 350 } 351 module_init(whci_hc_driver_init); 352 353 static void __exit whci_hc_driver_exit(void) 354 { 355 umc_driver_unregister(&whci_hc_driver); 356 } 357 module_exit(whci_hc_driver_exit); 358 359 /* PCI device ID's that we handle (so it gets loaded) */ 360 static struct pci_device_id __used whci_hcd_id_table[] = { 361 { PCI_DEVICE_CLASS(PCI_CLASS_WIRELESS_WHCI, ~0) }, 362 { /* empty last entry */ } 363 }; 364 MODULE_DEVICE_TABLE(pci, whci_hcd_id_table); 365 366 MODULE_DESCRIPTION("WHCI Wireless USB host controller driver"); 367 MODULE_AUTHOR("Cambridge Silicon Radio Ltd."); 368 MODULE_LICENSE("GPL"); 369 370 371 372 373 374 /* LDV_COMMENT_BEGIN_MAIN */ 375 #ifdef LDV_MAIN2_sequence_infinite_withcheck_stateful 376 377 /*###########################################################################*/ 378 379 /*############## Driver Environment Generator 0.2 output ####################*/ 380 381 /*###########################################################################*/ 382 383 384 385 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */ 386 void ldv_check_final_state(void); 387 388 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */ 389 void ldv_check_return_value(int res); 390 391 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */ 392 void ldv_check_return_value_probe(int res); 393 394 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */ 395 void ldv_initialize(void); 396 397 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */ 398 void ldv_handler_precall(void); 399 400 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */ 401 int nondet_int(void); 402 403 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */ 404 int LDV_IN_INTERRUPT; 405 406 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */ 407 void ldv_main2_sequence_infinite_withcheck_stateful(void) { 408 409 410 411 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */ 412 /*============================= VARIABLE DECLARATION PART =============================*/ 413 /** STRUCT: struct type: hc_driver, struct name: whc_hc_driver **/ 414 /* content: static int whc_reset(struct usb_hcd *usb_hcd)*/ 415 /* LDV_COMMENT_END_PREP */ 416 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "whc_reset" */ 417 struct usb_hcd * var_group1; 418 /* content: static int whc_start(struct usb_hcd *usb_hcd)*/ 419 /* LDV_COMMENT_END_PREP */ 420 /* content: static void whc_stop(struct usb_hcd *usb_hcd)*/ 421 /* LDV_COMMENT_END_PREP */ 422 /* content: static int whc_get_frame_number(struct usb_hcd *usb_hcd)*/ 423 /* LDV_COMMENT_END_PREP */ 424 /* content: static int whc_urb_enqueue(struct usb_hcd *usb_hcd, struct urb *urb, gfp_t mem_flags)*/ 425 /* LDV_COMMENT_END_PREP */ 426 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "whc_urb_enqueue" */ 427 struct urb * var_group2; 428 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "whc_urb_enqueue" */ 429 gfp_t var_whc_urb_enqueue_4_p2; 430 /* content: static int whc_urb_dequeue(struct usb_hcd *usb_hcd, struct urb *urb, int status)*/ 431 /* LDV_COMMENT_END_PREP */ 432 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "whc_urb_dequeue" */ 433 int var_whc_urb_dequeue_5_p2; 434 /* content: static void whc_endpoint_disable(struct usb_hcd *usb_hcd, struct usb_host_endpoint *ep)*/ 435 /* LDV_COMMENT_END_PREP */ 436 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "whc_endpoint_disable" */ 437 struct usb_host_endpoint * var_group3; 438 /* content: static void whc_endpoint_reset(struct usb_hcd *usb_hcd, struct usb_host_endpoint *ep)*/ 439 /* LDV_COMMENT_END_PREP */ 440 441 /** STRUCT: struct type: umc_driver, struct name: whci_hc_driver **/ 442 /* content: static int whc_probe(struct umc_dev *umc)*/ 443 /* LDV_COMMENT_END_PREP */ 444 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "whc_probe" */ 445 struct umc_dev * var_group4; 446 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "whc_probe" */ 447 static int res_whc_probe_8; 448 /* content: static void whc_remove(struct umc_dev *umc)*/ 449 /* LDV_COMMENT_END_PREP */ 450 451 452 453 454 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */ 455 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */ 456 /*============================= VARIABLE INITIALIZING PART =============================*/ 457 LDV_IN_INTERRUPT=1; 458 459 460 461 462 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */ 463 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */ 464 /*============================= FUNCTION CALL SECTION =============================*/ 465 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */ 466 ldv_initialize(); 467 468 /** INIT: init_type: ST_MODULE_INIT **/ 469 /* content: static int __init whci_hc_driver_init(void)*/ 470 /* LDV_COMMENT_END_PREP */ 471 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver init function after driver loading to kernel. This function declared as "MODULE_INIT(function name)". */ 472 ldv_handler_precall(); 473 if(whci_hc_driver_init()) 474 goto ldv_final; 475 476 477 int ldv_s_whci_hc_driver_umc_driver = 0; 478 479 480 while( nondet_int() 481 || !(ldv_s_whci_hc_driver_umc_driver == 0) 482 ) { 483 484 switch(nondet_int()) { 485 486 case 0: { 487 488 /** STRUCT: struct type: hc_driver, struct name: whc_hc_driver **/ 489 490 491 /* content: static int whc_reset(struct usb_hcd *usb_hcd)*/ 492 /* LDV_COMMENT_END_PREP */ 493 /* LDV_COMMENT_FUNCTION_CALL Function from field "reset" from driver structure with callbacks "whc_hc_driver" */ 494 ldv_handler_precall(); 495 whc_reset( var_group1); 496 497 498 499 500 } 501 502 break; 503 case 1: { 504 505 /** STRUCT: struct type: hc_driver, struct name: whc_hc_driver **/ 506 507 508 /* content: static int whc_start(struct usb_hcd *usb_hcd)*/ 509 /* LDV_COMMENT_END_PREP */ 510 /* LDV_COMMENT_FUNCTION_CALL Function from field "start" from driver structure with callbacks "whc_hc_driver" */ 511 ldv_handler_precall(); 512 whc_start( var_group1); 513 514 515 516 517 } 518 519 break; 520 case 2: { 521 522 /** STRUCT: struct type: hc_driver, struct name: whc_hc_driver **/ 523 524 525 /* content: static void whc_stop(struct usb_hcd *usb_hcd)*/ 526 /* LDV_COMMENT_END_PREP */ 527 /* LDV_COMMENT_FUNCTION_CALL Function from field "stop" from driver structure with callbacks "whc_hc_driver" */ 528 ldv_handler_precall(); 529 whc_stop( var_group1); 530 531 532 533 534 } 535 536 break; 537 case 3: { 538 539 /** STRUCT: struct type: hc_driver, struct name: whc_hc_driver **/ 540 541 542 /* content: static int whc_get_frame_number(struct usb_hcd *usb_hcd)*/ 543 /* LDV_COMMENT_END_PREP */ 544 /* LDV_COMMENT_FUNCTION_CALL Function from field "get_frame_number" from driver structure with callbacks "whc_hc_driver" */ 545 ldv_handler_precall(); 546 whc_get_frame_number( var_group1); 547 548 549 550 551 } 552 553 break; 554 case 4: { 555 556 /** STRUCT: struct type: hc_driver, struct name: whc_hc_driver **/ 557 558 559 /* content: static int whc_urb_enqueue(struct usb_hcd *usb_hcd, struct urb *urb, gfp_t mem_flags)*/ 560 /* LDV_COMMENT_END_PREP */ 561 /* LDV_COMMENT_FUNCTION_CALL Function from field "urb_enqueue" from driver structure with callbacks "whc_hc_driver" */ 562 ldv_handler_precall(); 563 whc_urb_enqueue( var_group1, var_group2, var_whc_urb_enqueue_4_p2); 564 565 566 567 568 } 569 570 break; 571 case 5: { 572 573 /** STRUCT: struct type: hc_driver, struct name: whc_hc_driver **/ 574 575 576 /* content: static int whc_urb_dequeue(struct usb_hcd *usb_hcd, struct urb *urb, int status)*/ 577 /* LDV_COMMENT_END_PREP */ 578 /* LDV_COMMENT_FUNCTION_CALL Function from field "urb_dequeue" from driver structure with callbacks "whc_hc_driver" */ 579 ldv_handler_precall(); 580 whc_urb_dequeue( var_group1, var_group2, var_whc_urb_dequeue_5_p2); 581 582 583 584 585 } 586 587 break; 588 case 6: { 589 590 /** STRUCT: struct type: hc_driver, struct name: whc_hc_driver **/ 591 592 593 /* content: static void whc_endpoint_disable(struct usb_hcd *usb_hcd, struct usb_host_endpoint *ep)*/ 594 /* LDV_COMMENT_END_PREP */ 595 /* LDV_COMMENT_FUNCTION_CALL Function from field "endpoint_disable" from driver structure with callbacks "whc_hc_driver" */ 596 ldv_handler_precall(); 597 whc_endpoint_disable( var_group1, var_group3); 598 599 600 601 602 } 603 604 break; 605 case 7: { 606 607 /** STRUCT: struct type: hc_driver, struct name: whc_hc_driver **/ 608 609 610 /* content: static void whc_endpoint_reset(struct usb_hcd *usb_hcd, struct usb_host_endpoint *ep)*/ 611 /* LDV_COMMENT_END_PREP */ 612 /* LDV_COMMENT_FUNCTION_CALL Function from field "endpoint_reset" from driver structure with callbacks "whc_hc_driver" */ 613 ldv_handler_precall(); 614 whc_endpoint_reset( var_group1, var_group3); 615 616 617 618 619 } 620 621 break; 622 case 8: { 623 624 /** STRUCT: struct type: umc_driver, struct name: whci_hc_driver **/ 625 if(ldv_s_whci_hc_driver_umc_driver==0) { 626 627 /* content: static int whc_probe(struct umc_dev *umc)*/ 628 /* LDV_COMMENT_END_PREP */ 629 /* LDV_COMMENT_FUNCTION_CALL Function from field "probe" from driver structure with callbacks "whci_hc_driver". Standart function test for correct return result. */ 630 res_whc_probe_8 = whc_probe( var_group4); 631 ldv_check_return_value(res_whc_probe_8); 632 ldv_check_return_value_probe(res_whc_probe_8); 633 if(res_whc_probe_8) 634 goto ldv_module_exit; 635 ldv_s_whci_hc_driver_umc_driver++; 636 637 } 638 639 } 640 641 break; 642 case 9: { 643 644 /** STRUCT: struct type: umc_driver, struct name: whci_hc_driver **/ 645 if(ldv_s_whci_hc_driver_umc_driver==1) { 646 647 /* content: static void whc_remove(struct umc_dev *umc)*/ 648 /* LDV_COMMENT_END_PREP */ 649 /* LDV_COMMENT_FUNCTION_CALL Function from field "remove" from driver structure with callbacks "whci_hc_driver" */ 650 ldv_handler_precall(); 651 whc_remove( var_group4); 652 ldv_s_whci_hc_driver_umc_driver=0; 653 654 } 655 656 } 657 658 break; 659 default: break; 660 661 } 662 663 } 664 665 ldv_module_exit: 666 667 /** INIT: init_type: ST_MODULE_EXIT **/ 668 /* content: static void __exit whci_hc_driver_exit(void)*/ 669 /* LDV_COMMENT_END_PREP */ 670 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver release function before driver will be uploaded from kernel. This function declared as "MODULE_EXIT(function name)". */ 671 ldv_handler_precall(); 672 whci_hc_driver_exit(); 673 674 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */ 675 ldv_final: ldv_check_final_state(); 676 677 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */ 678 return; 679 680 } 681 #endif 682 683 /* LDV_COMMENT_END_MAIN */
1 /* 2 * Wireless Host Controller (WHC) qset management. 3 * 4 * Copyright (C) 2007 Cambridge Silicon Radio Ltd. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License version 8 * 2 as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 #include <linux/kernel.h> 19 #include <linux/dma-mapping.h> 20 #include <linux/slab.h> 21 #include <linux/uwb/umc.h> 22 #include <linux/usb.h> 23 24 #include "../../wusbcore/wusbhc.h" 25 26 #include "whcd.h" 27 28 struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags) 29 { 30 struct whc_qset *qset; 31 dma_addr_t dma; 32 33 qset = dma_pool_alloc(whc->qset_pool, mem_flags, &dma); 34 if (qset == NULL) 35 return NULL; 36 memset(qset, 0, sizeof(struct whc_qset)); 37 38 qset->qset_dma = dma; 39 qset->whc = whc; 40 41 INIT_LIST_HEAD(&qset->list_node); 42 INIT_LIST_HEAD(&qset->stds); 43 44 return qset; 45 } 46 47 /** 48 * qset_fill_qh - fill the static endpoint state in a qset's QHead 49 * @qset: the qset whose QH needs initializing with static endpoint 50 * state 51 * @urb: an urb for a transfer to this endpoint 52 */ 53 static void qset_fill_qh(struct whc *whc, struct whc_qset *qset, struct urb *urb) 54 { 55 struct usb_device *usb_dev = urb->dev; 56 struct wusb_dev *wusb_dev = usb_dev->wusb_dev; 57 struct usb_wireless_ep_comp_descriptor *epcd; 58 bool is_out; 59 uint8_t phy_rate; 60 61 is_out = usb_pipeout(urb->pipe); 62 63 qset->max_packet = le16_to_cpu(urb->ep->desc.wMaxPacketSize); 64 65 epcd = (struct usb_wireless_ep_comp_descriptor *)qset->ep->extra; 66 if (epcd) { 67 qset->max_seq = epcd->bMaxSequence; 68 qset->max_burst = epcd->bMaxBurst; 69 } else { 70 qset->max_seq = 2; 71 qset->max_burst = 1; 72 } 73 74 /* 75 * Initial PHY rate is 53.3 Mbit/s for control endpoints or 76 * the maximum supported by the device for other endpoints 77 * (unless limited by the user). 78 */ 79 if (usb_pipecontrol(urb->pipe)) 80 phy_rate = UWB_PHY_RATE_53; 81 else { 82 uint16_t phy_rates; 83 84 phy_rates = le16_to_cpu(wusb_dev->wusb_cap_descr->wPHYRates); 85 phy_rate = fls(phy_rates) - 1; 86 if (phy_rate > whc->wusbhc.phy_rate) 87 phy_rate = whc->wusbhc.phy_rate; 88 } 89 90 qset->qh.info1 = cpu_to_le32( 91 QH_INFO1_EP(usb_pipeendpoint(urb->pipe)) 92 | (is_out ? QH_INFO1_DIR_OUT : QH_INFO1_DIR_IN) 93 | usb_pipe_to_qh_type(urb->pipe) 94 | QH_INFO1_DEV_INFO_IDX(wusb_port_no_to_idx(usb_dev->portnum)) 95 | QH_INFO1_MAX_PKT_LEN(qset->max_packet) 96 ); 97 qset->qh.info2 = cpu_to_le32( 98 QH_INFO2_BURST(qset->max_burst) 99 | QH_INFO2_DBP(0) 100 | QH_INFO2_MAX_COUNT(3) 101 | QH_INFO2_MAX_RETRY(3) 102 | QH_INFO2_MAX_SEQ(qset->max_seq - 1) 103 ); 104 /* FIXME: where can we obtain these Tx parameters from? Why 105 * doesn't the chip know what Tx power to use? It knows the Rx 106 * strength and can presumably guess the Tx power required 107 * from that? */ 108 qset->qh.info3 = cpu_to_le32( 109 QH_INFO3_TX_RATE(phy_rate) 110 | QH_INFO3_TX_PWR(0) /* 0 == max power */ 111 ); 112 113 qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1); 114 } 115 116 /** 117 * qset_clear - clear fields in a qset so it may be reinserted into a 118 * schedule. 119 * 120 * The sequence number and current window are not cleared (see 121 * qset_reset()). 122 */ 123 void qset_clear(struct whc *whc, struct whc_qset *qset) 124 { 125 qset->td_start = qset->td_end = qset->ntds = 0; 126 127 qset->qh.link = cpu_to_le64(QH_LINK_NTDS(8) | QH_LINK_T); 128 qset->qh.status = qset->qh.status & QH_STATUS_SEQ_MASK; 129 qset->qh.err_count = 0; 130 qset->qh.scratch[0] = 0; 131 qset->qh.scratch[1] = 0; 132 qset->qh.scratch[2] = 0; 133 134 memset(&qset->qh.overlay, 0, sizeof(qset->qh.overlay)); 135 136 init_completion(&qset->remove_complete); 137 } 138 139 /** 140 * qset_reset - reset endpoint state in a qset. 141 * 142 * Clears the sequence number and current window. This qset must not 143 * be in the ASL or PZL. 144 */ 145 void qset_reset(struct whc *whc, struct whc_qset *qset) 146 { 147 qset->reset = 0; 148 149 qset->qh.status &= ~QH_STATUS_SEQ_MASK; 150 qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1); 151 } 152 153 /** 154 * get_qset - get the qset for an async endpoint 155 * 156 * A new qset is created if one does not already exist. 157 */ 158 struct whc_qset *get_qset(struct whc *whc, struct urb *urb, 159 gfp_t mem_flags) 160 { 161 struct whc_qset *qset; 162 163 qset = urb->ep->hcpriv; 164 if (qset == NULL) { 165 qset = qset_alloc(whc, mem_flags); 166 if (qset == NULL) 167 return NULL; 168 169 qset->ep = urb->ep; 170 urb->ep->hcpriv = qset; 171 qset_fill_qh(whc, qset, urb); 172 } 173 return qset; 174 } 175 176 void qset_remove_complete(struct whc *whc, struct whc_qset *qset) 177 { 178 qset->remove = 0; 179 list_del_init(&qset->list_node); 180 complete(&qset->remove_complete); 181 } 182 183 /** 184 * qset_add_qtds - add qTDs for an URB to a qset 185 * 186 * Returns true if the list (ASL/PZL) must be updated because (for a 187 * WHCI 0.95 controller) an activated qTD was pointed to be iCur. 188 */ 189 enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset) 190 { 191 struct whc_std *std; 192 enum whc_update update = 0; 193 194 list_for_each_entry(std, &qset->stds, list_node) { 195 struct whc_qtd *qtd; 196 uint32_t status; 197 198 if (qset->ntds >= WHCI_QSET_TD_MAX 199 || (qset->pause_after_urb && std->urb != qset->pause_after_urb)) 200 break; 201 202 if (std->qtd) 203 continue; /* already has a qTD */ 204 205 qtd = std->qtd = &qset->qtd[qset->td_end]; 206 207 /* Fill in setup bytes for control transfers. */ 208 if (usb_pipecontrol(std->urb->pipe)) 209 memcpy(qtd->setup, std->urb->setup_packet, 8); 210 211 status = QTD_STS_ACTIVE | QTD_STS_LEN(std->len); 212 213 if (whc_std_last(std) && usb_pipeout(std->urb->pipe)) 214 status |= QTD_STS_LAST_PKT; 215 216 /* 217 * For an IN transfer the iAlt field should be set so 218 * the h/w will automatically advance to the next 219 * transfer. However, if there are 8 or more TDs 220 * remaining in this transfer then iAlt cannot be set 221 * as it could point to somewhere in this transfer. 222 */ 223 if (std->ntds_remaining < WHCI_QSET_TD_MAX) { 224 int ialt; 225 ialt = (qset->td_end + std->ntds_remaining) % WHCI_QSET_TD_MAX; 226 status |= QTD_STS_IALT(ialt); 227 } else if (usb_pipein(std->urb->pipe)) 228 qset->pause_after_urb = std->urb; 229 230 if (std->num_pointers) 231 qtd->options = cpu_to_le32(QTD_OPT_IOC); 232 else 233 qtd->options = cpu_to_le32(QTD_OPT_IOC | QTD_OPT_SMALL); 234 qtd->page_list_ptr = cpu_to_le64(std->dma_addr); 235 236 qtd->status = cpu_to_le32(status); 237 238 if (QH_STATUS_TO_ICUR(qset->qh.status) == qset->td_end) 239 update = WHC_UPDATE_UPDATED; 240 241 if (++qset->td_end >= WHCI_QSET_TD_MAX) 242 qset->td_end = 0; 243 qset->ntds++; 244 } 245 246 return update; 247 } 248 249 /** 250 * qset_remove_qtd - remove the first qTD from a qset. 251 * 252 * The qTD might be still active (if it's part of a IN URB that 253 * resulted in a short read) so ensure it's deactivated. 254 */ 255 static void qset_remove_qtd(struct whc *whc, struct whc_qset *qset) 256 { 257 qset->qtd[qset->td_start].status = 0; 258 259 if (++qset->td_start >= WHCI_QSET_TD_MAX) 260 qset->td_start = 0; 261 qset->ntds--; 262 } 263 264 static void qset_copy_bounce_to_sg(struct whc *whc, struct whc_std *std) 265 { 266 struct scatterlist *sg; 267 void *bounce; 268 size_t remaining, offset; 269 270 bounce = std->bounce_buf; 271 remaining = std->len; 272 273 sg = std->bounce_sg; 274 offset = std->bounce_offset; 275 276 while (remaining) { 277 size_t len; 278 279 len = min(sg->length - offset, remaining); 280 memcpy(sg_virt(sg) + offset, bounce, len); 281 282 bounce += len; 283 remaining -= len; 284 285 offset += len; 286 if (offset >= sg->length) { 287 sg = sg_next(sg); 288 offset = 0; 289 } 290 } 291 292 } 293 294 /** 295 * qset_free_std - remove an sTD and free it. 296 * @whc: the WHCI host controller 297 * @std: the sTD to remove and free. 298 */ 299 void qset_free_std(struct whc *whc, struct whc_std *std) 300 { 301 list_del(&std->list_node); 302 if (std->bounce_buf) { 303 bool is_out = usb_pipeout(std->urb->pipe); 304 dma_addr_t dma_addr; 305 306 if (std->num_pointers) 307 dma_addr = le64_to_cpu(std->pl_virt[0].buf_ptr); 308 else 309 dma_addr = std->dma_addr; 310 311 dma_unmap_single(whc->wusbhc.dev, dma_addr, 312 std->len, is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 313 if (!is_out) 314 qset_copy_bounce_to_sg(whc, std); 315 kfree(std->bounce_buf); 316 } 317 if (std->pl_virt) { 318 if (std->dma_addr) 319 dma_unmap_single(whc->wusbhc.dev, std->dma_addr, 320 std->num_pointers * sizeof(struct whc_page_list_entry), 321 DMA_TO_DEVICE); 322 kfree(std->pl_virt); 323 std->pl_virt = NULL; 324 } 325 kfree(std); 326 } 327 328 /** 329 * qset_remove_qtds - remove an URB's qTDs (and sTDs). 330 */ 331 static void qset_remove_qtds(struct whc *whc, struct whc_qset *qset, 332 struct urb *urb) 333 { 334 struct whc_std *std, *t; 335 336 list_for_each_entry_safe(std, t, &qset->stds, list_node) { 337 if (std->urb != urb) 338 break; 339 if (std->qtd != NULL) 340 qset_remove_qtd(whc, qset); 341 qset_free_std(whc, std); 342 } 343 } 344 345 /** 346 * qset_free_stds - free any remaining sTDs for an URB. 347 */ 348 static void qset_free_stds(struct whc_qset *qset, struct urb *urb) 349 { 350 struct whc_std *std, *t; 351 352 list_for_each_entry_safe(std, t, &qset->stds, list_node) { 353 if (std->urb == urb) 354 qset_free_std(qset->whc, std); 355 } 356 } 357 358 static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_flags) 359 { 360 dma_addr_t dma_addr = std->dma_addr; 361 dma_addr_t sp, ep; 362 size_t pl_len; 363 int p; 364 365 /* Short buffers don't need a page list. */ 366 if (std->len <= WHCI_PAGE_SIZE) { 367 std->num_pointers = 0; 368 return 0; 369 } 370 371 sp = dma_addr & ~(WHCI_PAGE_SIZE-1); 372 ep = dma_addr + std->len; 373 std->num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE); 374 375 pl_len = std->num_pointers * sizeof(struct whc_page_list_entry); 376 std->pl_virt = kmalloc(pl_len, mem_flags); 377 if (std->pl_virt == NULL) 378 return -ENOMEM; 379 std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, pl_len, DMA_TO_DEVICE); 380 381 for (p = 0; p < std->num_pointers; p++) { 382 std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr); 383 dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1); 384 } 385 386 return 0; 387 } 388 389 /** 390 * urb_dequeue_work - executes asl/pzl update and gives back the urb to the system. 391 */ 392 static void urb_dequeue_work(struct work_struct *work) 393 { 394 struct whc_urb *wurb = container_of(work, struct whc_urb, dequeue_work); 395 struct whc_qset *qset = wurb->qset; 396 struct whc *whc = qset->whc; 397 unsigned long flags; 398 399 if (wurb->is_async == true) 400 asl_update(whc, WUSBCMD_ASYNC_UPDATED 401 | WUSBCMD_ASYNC_SYNCED_DB 402 | WUSBCMD_ASYNC_QSET_RM); 403 else 404 pzl_update(whc, WUSBCMD_PERIODIC_UPDATED 405 | WUSBCMD_PERIODIC_SYNCED_DB 406 | WUSBCMD_PERIODIC_QSET_RM); 407 408 spin_lock_irqsave(&whc->lock, flags); 409 qset_remove_urb(whc, qset, wurb->urb, wurb->status); 410 spin_unlock_irqrestore(&whc->lock, flags); 411 } 412 413 static struct whc_std *qset_new_std(struct whc *whc, struct whc_qset *qset, 414 struct urb *urb, gfp_t mem_flags) 415 { 416 struct whc_std *std; 417 418 std = kzalloc(sizeof(struct whc_std), mem_flags); 419 if (std == NULL) 420 return NULL; 421 422 std->urb = urb; 423 std->qtd = NULL; 424 425 INIT_LIST_HEAD(&std->list_node); 426 list_add_tail(&std->list_node, &qset->stds); 427 428 return std; 429 } 430 431 static int qset_add_urb_sg(struct whc *whc, struct whc_qset *qset, struct urb *urb, 432 gfp_t mem_flags) 433 { 434 size_t remaining; 435 struct scatterlist *sg; 436 int i; 437 int ntds = 0; 438 struct whc_std *std = NULL; 439 struct whc_page_list_entry *new_pl_virt; 440 dma_addr_t prev_end = 0; 441 size_t pl_len; 442 int p = 0; 443 444 remaining = urb->transfer_buffer_length; 445 446 for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) { 447 dma_addr_t dma_addr; 448 size_t dma_remaining; 449 dma_addr_t sp, ep; 450 int num_pointers; 451 452 if (remaining == 0) { 453 break; 454 } 455 456 dma_addr = sg_dma_address(sg); 457 dma_remaining = min_t(size_t, sg_dma_len(sg), remaining); 458 459 while (dma_remaining) { 460 size_t dma_len; 461 462 /* 463 * We can use the previous std (if it exists) provided that: 464 * - the previous one ended on a page boundary. 465 * - the current one begins on a page boundary. 466 * - the previous one isn't full. 467 * 468 * If a new std is needed but the previous one 469 * was not a whole number of packets then this 470 * sg list cannot be mapped onto multiple 471 * qTDs. Return an error and let the caller 472 * sort it out. 473 */ 474 if (!std 475 || (prev_end & (WHCI_PAGE_SIZE-1)) 476 || (dma_addr & (WHCI_PAGE_SIZE-1)) 477 || std->len + WHCI_PAGE_SIZE > QTD_MAX_XFER_SIZE) { 478 if (std && std->len % qset->max_packet != 0) 479 return -EINVAL; 480 std = qset_new_std(whc, qset, urb, mem_flags); 481 if (std == NULL) { 482 return -ENOMEM; 483 } 484 ntds++; 485 p = 0; 486 } 487 488 dma_len = dma_remaining; 489 490 /* 491 * If the remainder of this element doesn't 492 * fit in a single qTD, limit the qTD to a 493 * whole number of packets. This allows the 494 * remainder to go into the next qTD. 495 */ 496 if (std->len + dma_len > QTD_MAX_XFER_SIZE) { 497 dma_len = (QTD_MAX_XFER_SIZE / qset->max_packet) 498 * qset->max_packet - std->len; 499 } 500 501 std->len += dma_len; 502 std->ntds_remaining = -1; /* filled in later */ 503 504 sp = dma_addr & ~(WHCI_PAGE_SIZE-1); 505 ep = dma_addr + dma_len; 506 num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE); 507 std->num_pointers += num_pointers; 508 509 pl_len = std->num_pointers * sizeof(struct whc_page_list_entry); 510 511 new_pl_virt = krealloc(std->pl_virt, pl_len, mem_flags); 512 if (new_pl_virt == NULL) { 513 kfree(std->pl_virt); 514 std->pl_virt = NULL; 515 return -ENOMEM; 516 } 517 std->pl_virt = new_pl_virt; 518 519 for (;p < std->num_pointers; p++) { 520 std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr); 521 dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1); 522 } 523 524 prev_end = dma_addr = ep; 525 dma_remaining -= dma_len; 526 remaining -= dma_len; 527 } 528 } 529 530 /* Now the number of stds is know, go back and fill in 531 std->ntds_remaining. */ 532 list_for_each_entry(std, &qset->stds, list_node) { 533 if (std->ntds_remaining == -1) { 534 pl_len = std->num_pointers * sizeof(struct whc_page_list_entry); 535 std->ntds_remaining = ntds--; 536 std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, 537 pl_len, DMA_TO_DEVICE); 538 } 539 } 540 return 0; 541 } 542 543 /** 544 * qset_add_urb_sg_linearize - add an urb with sg list, copying the data 545 * 546 * If the URB contains an sg list whose elements cannot be directly 547 * mapped to qTDs then the data must be transferred via bounce 548 * buffers. 549 */ 550 static int qset_add_urb_sg_linearize(struct whc *whc, struct whc_qset *qset, 551 struct urb *urb, gfp_t mem_flags) 552 { 553 bool is_out = usb_pipeout(urb->pipe); 554 size_t max_std_len; 555 size_t remaining; 556 int ntds = 0; 557 struct whc_std *std = NULL; 558 void *bounce = NULL; 559 struct scatterlist *sg; 560 int i; 561 562 /* limit maximum bounce buffer to 16 * 3.5 KiB ~= 28 k */ 563 max_std_len = qset->max_burst * qset->max_packet; 564 565 remaining = urb->transfer_buffer_length; 566 567 for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) { 568 size_t len; 569 size_t sg_remaining; 570 void *orig; 571 572 if (remaining == 0) { 573 break; 574 } 575 576 sg_remaining = min_t(size_t, remaining, sg->length); 577 orig = sg_virt(sg); 578 579 while (sg_remaining) { 580 if (!std || std->len == max_std_len) { 581 std = qset_new_std(whc, qset, urb, mem_flags); 582 if (std == NULL) 583 return -ENOMEM; 584 std->bounce_buf = kmalloc(max_std_len, mem_flags); 585 if (std->bounce_buf == NULL) 586 return -ENOMEM; 587 std->bounce_sg = sg; 588 std->bounce_offset = orig - sg_virt(sg); 589 bounce = std->bounce_buf; 590 ntds++; 591 } 592 593 len = min(sg_remaining, max_std_len - std->len); 594 595 if (is_out) 596 memcpy(bounce, orig, len); 597 598 std->len += len; 599 std->ntds_remaining = -1; /* filled in later */ 600 601 bounce += len; 602 orig += len; 603 sg_remaining -= len; 604 remaining -= len; 605 } 606 } 607 608 /* 609 * For each of the new sTDs, map the bounce buffers, create 610 * page lists (if necessary), and fill in std->ntds_remaining. 611 */ 612 list_for_each_entry(std, &qset->stds, list_node) { 613 if (std->ntds_remaining != -1) 614 continue; 615 616 std->dma_addr = dma_map_single(&whc->umc->dev, std->bounce_buf, std->len, 617 is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 618 619 if (qset_fill_page_list(whc, std, mem_flags) < 0) 620 return -ENOMEM; 621 622 std->ntds_remaining = ntds--; 623 } 624 625 return 0; 626 } 627 628 /** 629 * qset_add_urb - add an urb to the qset's queue. 630 * 631 * The URB is chopped into sTDs, one for each qTD that will required. 632 * At least one qTD (and sTD) is required even if the transfer has no 633 * data (e.g., for some control transfers). 634 */ 635 int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb, 636 gfp_t mem_flags) 637 { 638 struct whc_urb *wurb; 639 int remaining = urb->transfer_buffer_length; 640 u64 transfer_dma = urb->transfer_dma; 641 int ntds_remaining; 642 int ret; 643 644 wurb = kzalloc(sizeof(struct whc_urb), mem_flags); 645 if (wurb == NULL) 646 goto err_no_mem; 647 urb->hcpriv = wurb; 648 wurb->qset = qset; 649 wurb->urb = urb; 650 INIT_WORK(&wurb->dequeue_work, urb_dequeue_work); 651 652 if (urb->num_sgs) { 653 ret = qset_add_urb_sg(whc, qset, urb, mem_flags); 654 if (ret == -EINVAL) { 655 qset_free_stds(qset, urb); 656 ret = qset_add_urb_sg_linearize(whc, qset, urb, mem_flags); 657 } 658 if (ret < 0) 659 goto err_no_mem; 660 return 0; 661 } 662 663 ntds_remaining = DIV_ROUND_UP(remaining, QTD_MAX_XFER_SIZE); 664 if (ntds_remaining == 0) 665 ntds_remaining = 1; 666 667 while (ntds_remaining) { 668 struct whc_std *std; 669 size_t std_len; 670 671 std_len = remaining; 672 if (std_len > QTD_MAX_XFER_SIZE) 673 std_len = QTD_MAX_XFER_SIZE; 674 675 std = qset_new_std(whc, qset, urb, mem_flags); 676 if (std == NULL) 677 goto err_no_mem; 678 679 std->dma_addr = transfer_dma; 680 std->len = std_len; 681 std->ntds_remaining = ntds_remaining; 682 683 if (qset_fill_page_list(whc, std, mem_flags) < 0) 684 goto err_no_mem; 685 686 ntds_remaining--; 687 remaining -= std_len; 688 transfer_dma += std_len; 689 } 690 691 return 0; 692 693 err_no_mem: 694 qset_free_stds(qset, urb); 695 return -ENOMEM; 696 } 697 698 /** 699 * qset_remove_urb - remove an URB from the urb queue. 700 * 701 * The URB is returned to the USB subsystem. 702 */ 703 void qset_remove_urb(struct whc *whc, struct whc_qset *qset, 704 struct urb *urb, int status) 705 { 706 struct wusbhc *wusbhc = &whc->wusbhc; 707 struct whc_urb *wurb = urb->hcpriv; 708 709 usb_hcd_unlink_urb_from_ep(&wusbhc->usb_hcd, urb); 710 /* Drop the lock as urb->complete() may enqueue another urb. */ 711 spin_unlock(&whc->lock); 712 wusbhc_giveback_urb(wusbhc, urb, status); 713 spin_lock(&whc->lock); 714 715 kfree(wurb); 716 } 717 718 /** 719 * get_urb_status_from_qtd - get the completed urb status from qTD status 720 * @urb: completed urb 721 * @status: qTD status 722 */ 723 static int get_urb_status_from_qtd(struct urb *urb, u32 status) 724 { 725 if (status & QTD_STS_HALTED) { 726 if (status & QTD_STS_DBE) 727 return usb_pipein(urb->pipe) ? -ENOSR : -ECOMM; 728 else if (status & QTD_STS_BABBLE) 729 return -EOVERFLOW; 730 else if (status & QTD_STS_RCE) 731 return -ETIME; 732 return -EPIPE; 733 } 734 if (usb_pipein(urb->pipe) 735 && (urb->transfer_flags & URB_SHORT_NOT_OK) 736 && urb->actual_length < urb->transfer_buffer_length) 737 return -EREMOTEIO; 738 return 0; 739 } 740 741 /** 742 * process_inactive_qtd - process an inactive (but not halted) qTD. 743 * 744 * Update the urb with the transfer bytes from the qTD, if the urb is 745 * completely transferred or (in the case of an IN only) the LPF is 746 * set, then the transfer is complete and the urb should be returned 747 * to the system. 748 */ 749 void process_inactive_qtd(struct whc *whc, struct whc_qset *qset, 750 struct whc_qtd *qtd) 751 { 752 struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node); 753 struct urb *urb = std->urb; 754 uint32_t status; 755 bool complete; 756 757 status = le32_to_cpu(qtd->status); 758 759 urb->actual_length += std->len - QTD_STS_TO_LEN(status); 760 761 if (usb_pipein(urb->pipe) && (status & QTD_STS_LAST_PKT)) 762 complete = true; 763 else 764 complete = whc_std_last(std); 765 766 qset_remove_qtd(whc, qset); 767 qset_free_std(whc, std); 768 769 /* 770 * Transfers for this URB are complete? Then return it to the 771 * USB subsystem. 772 */ 773 if (complete) { 774 qset_remove_qtds(whc, qset, urb); 775 qset_remove_urb(whc, qset, urb, get_urb_status_from_qtd(urb, status)); 776 777 /* 778 * If iAlt isn't valid then the hardware didn't 779 * advance iCur. Adjust the start and end pointers to 780 * match iCur. 781 */ 782 if (!(status & QTD_STS_IALT_VALID)) 783 qset->td_start = qset->td_end 784 = QH_STATUS_TO_ICUR(le16_to_cpu(qset->qh.status)); 785 qset->pause_after_urb = NULL; 786 } 787 } 788 789 /** 790 * process_halted_qtd - process a qset with a halted qtd 791 * 792 * Remove all the qTDs for the failed URB and return the failed URB to 793 * the USB subsystem. Then remove all other qTDs so the qset can be 794 * removed. 795 * 796 * FIXME: this is the point where rate adaptation can be done. If a 797 * transfer failed because it exceeded the maximum number of retries 798 * then it could be reactivated with a slower rate without having to 799 * remove the qset. 800 */ 801 void process_halted_qtd(struct whc *whc, struct whc_qset *qset, 802 struct whc_qtd *qtd) 803 { 804 struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node); 805 struct urb *urb = std->urb; 806 int urb_status; 807 808 urb_status = get_urb_status_from_qtd(urb, le32_to_cpu(qtd->status)); 809 810 qset_remove_qtds(whc, qset, urb); 811 qset_remove_urb(whc, qset, urb, urb_status); 812 813 list_for_each_entry(std, &qset->stds, list_node) { 814 if (qset->ntds == 0) 815 break; 816 qset_remove_qtd(whc, qset); 817 std->qtd = NULL; 818 } 819 820 qset->remove = 1; 821 } 822 823 void qset_free(struct whc *whc, struct whc_qset *qset) 824 { 825 dma_pool_free(whc->qset_pool, qset, qset->qset_dma); 826 } 827 828 /** 829 * qset_delete - wait for a qset to be unused, then free it. 830 */ 831 void qset_delete(struct whc *whc, struct whc_qset *qset) 832 { 833 wait_for_completion(&qset->remove_complete); 834 qset_free(whc, qset); 835 }
1 2 #include <linux/types.h> 3 #include <linux/dma-direction.h> 4 5 extern dma_addr_t ldv_dma_map_page(struct device *dev, struct page *page, size_t offset, size_t size, enum dma_data_direction dir); 6 extern dma_addr_t ldv_dma_map_single(struct device *dev, void *cpu_addr, size_t size, enum dma_data_direction dir); 7 extern dma_addr_t ldv_dma_map_single_attrs(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs); 8 extern int ldv_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); 9 #line 1 "/home/druidos/temp/331_1a/work/current--X--drivers--X--defaultlinux-3.14.1.tar.xz--X--331_1a--X--cpachecker/linux-3.14.1.tar.xz/csd_deg_dscv/10341/dscv_tempdir/dscv/ri/331_1a/drivers/usb/host/whci/qset.c" 10 /* 11 * Wireless Host Controller (WHC) qset management. 12 * 13 * Copyright (C) 2007 Cambridge Silicon Radio Ltd. 14 * 15 * This program is free software; you can redistribute it and/or 16 * modify it under the terms of the GNU General Public License version 17 * 2 as published by the Free Software Foundation. 18 * 19 * This program is distributed in the hope that it will be useful, 20 * but WITHOUT ANY WARRANTY; without even the implied warranty of 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 * GNU General Public License for more details. 23 * 24 * You should have received a copy of the GNU General Public License 25 * along with this program. If not, see <http://www.gnu.org/licenses/>. 26 */ 27 #include <linux/kernel.h> 28 #include <linux/dma-mapping.h> 29 #include <linux/slab.h> 30 #include <linux/uwb/umc.h> 31 #include <linux/usb.h> 32 33 #include "../../wusbcore/wusbhc.h" 34 35 #include "whcd.h" 36 37 struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags) 38 { 39 struct whc_qset *qset; 40 dma_addr_t dma; 41 42 qset = dma_pool_alloc(whc->qset_pool, mem_flags, &dma); 43 if (qset == NULL) 44 return NULL; 45 memset(qset, 0, sizeof(struct whc_qset)); 46 47 qset->qset_dma = dma; 48 qset->whc = whc; 49 50 INIT_LIST_HEAD(&qset->list_node); 51 INIT_LIST_HEAD(&qset->stds); 52 53 return qset; 54 } 55 56 /** 57 * qset_fill_qh - fill the static endpoint state in a qset's QHead 58 * @qset: the qset whose QH needs initializing with static endpoint 59 * state 60 * @urb: an urb for a transfer to this endpoint 61 */ 62 static void qset_fill_qh(struct whc *whc, struct whc_qset *qset, struct urb *urb) 63 { 64 struct usb_device *usb_dev = urb->dev; 65 struct wusb_dev *wusb_dev = usb_dev->wusb_dev; 66 struct usb_wireless_ep_comp_descriptor *epcd; 67 bool is_out; 68 uint8_t phy_rate; 69 70 is_out = usb_pipeout(urb->pipe); 71 72 qset->max_packet = le16_to_cpu(urb->ep->desc.wMaxPacketSize); 73 74 epcd = (struct usb_wireless_ep_comp_descriptor *)qset->ep->extra; 75 if (epcd) { 76 qset->max_seq = epcd->bMaxSequence; 77 qset->max_burst = epcd->bMaxBurst; 78 } else { 79 qset->max_seq = 2; 80 qset->max_burst = 1; 81 } 82 83 /* 84 * Initial PHY rate is 53.3 Mbit/s for control endpoints or 85 * the maximum supported by the device for other endpoints 86 * (unless limited by the user). 87 */ 88 if (usb_pipecontrol(urb->pipe)) 89 phy_rate = UWB_PHY_RATE_53; 90 else { 91 uint16_t phy_rates; 92 93 phy_rates = le16_to_cpu(wusb_dev->wusb_cap_descr->wPHYRates); 94 phy_rate = fls(phy_rates) - 1; 95 if (phy_rate > whc->wusbhc.phy_rate) 96 phy_rate = whc->wusbhc.phy_rate; 97 } 98 99 qset->qh.info1 = cpu_to_le32( 100 QH_INFO1_EP(usb_pipeendpoint(urb->pipe)) 101 | (is_out ? QH_INFO1_DIR_OUT : QH_INFO1_DIR_IN) 102 | usb_pipe_to_qh_type(urb->pipe) 103 | QH_INFO1_DEV_INFO_IDX(wusb_port_no_to_idx(usb_dev->portnum)) 104 | QH_INFO1_MAX_PKT_LEN(qset->max_packet) 105 ); 106 qset->qh.info2 = cpu_to_le32( 107 QH_INFO2_BURST(qset->max_burst) 108 | QH_INFO2_DBP(0) 109 | QH_INFO2_MAX_COUNT(3) 110 | QH_INFO2_MAX_RETRY(3) 111 | QH_INFO2_MAX_SEQ(qset->max_seq - 1) 112 ); 113 /* FIXME: where can we obtain these Tx parameters from? Why 114 * doesn't the chip know what Tx power to use? It knows the Rx 115 * strength and can presumably guess the Tx power required 116 * from that? */ 117 qset->qh.info3 = cpu_to_le32( 118 QH_INFO3_TX_RATE(phy_rate) 119 | QH_INFO3_TX_PWR(0) /* 0 == max power */ 120 ); 121 122 qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1); 123 } 124 125 /** 126 * qset_clear - clear fields in a qset so it may be reinserted into a 127 * schedule. 128 * 129 * The sequence number and current window are not cleared (see 130 * qset_reset()). 131 */ 132 void qset_clear(struct whc *whc, struct whc_qset *qset) 133 { 134 qset->td_start = qset->td_end = qset->ntds = 0; 135 136 qset->qh.link = cpu_to_le64(QH_LINK_NTDS(8) | QH_LINK_T); 137 qset->qh.status = qset->qh.status & QH_STATUS_SEQ_MASK; 138 qset->qh.err_count = 0; 139 qset->qh.scratch[0] = 0; 140 qset->qh.scratch[1] = 0; 141 qset->qh.scratch[2] = 0; 142 143 memset(&qset->qh.overlay, 0, sizeof(qset->qh.overlay)); 144 145 init_completion(&qset->remove_complete); 146 } 147 148 /** 149 * qset_reset - reset endpoint state in a qset. 150 * 151 * Clears the sequence number and current window. This qset must not 152 * be in the ASL or PZL. 153 */ 154 void qset_reset(struct whc *whc, struct whc_qset *qset) 155 { 156 qset->reset = 0; 157 158 qset->qh.status &= ~QH_STATUS_SEQ_MASK; 159 qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1); 160 } 161 162 /** 163 * get_qset - get the qset for an async endpoint 164 * 165 * A new qset is created if one does not already exist. 166 */ 167 struct whc_qset *get_qset(struct whc *whc, struct urb *urb, 168 gfp_t mem_flags) 169 { 170 struct whc_qset *qset; 171 172 qset = urb->ep->hcpriv; 173 if (qset == NULL) { 174 qset = qset_alloc(whc, mem_flags); 175 if (qset == NULL) 176 return NULL; 177 178 qset->ep = urb->ep; 179 urb->ep->hcpriv = qset; 180 qset_fill_qh(whc, qset, urb); 181 } 182 return qset; 183 } 184 185 void qset_remove_complete(struct whc *whc, struct whc_qset *qset) 186 { 187 qset->remove = 0; 188 list_del_init(&qset->list_node); 189 complete(&qset->remove_complete); 190 } 191 192 /** 193 * qset_add_qtds - add qTDs for an URB to a qset 194 * 195 * Returns true if the list (ASL/PZL) must be updated because (for a 196 * WHCI 0.95 controller) an activated qTD was pointed to be iCur. 197 */ 198 enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset) 199 { 200 struct whc_std *std; 201 enum whc_update update = 0; 202 203 list_for_each_entry(std, &qset->stds, list_node) { 204 struct whc_qtd *qtd; 205 uint32_t status; 206 207 if (qset->ntds >= WHCI_QSET_TD_MAX 208 || (qset->pause_after_urb && std->urb != qset->pause_after_urb)) 209 break; 210 211 if (std->qtd) 212 continue; /* already has a qTD */ 213 214 qtd = std->qtd = &qset->qtd[qset->td_end]; 215 216 /* Fill in setup bytes for control transfers. */ 217 if (usb_pipecontrol(std->urb->pipe)) 218 memcpy(qtd->setup, std->urb->setup_packet, 8); 219 220 status = QTD_STS_ACTIVE | QTD_STS_LEN(std->len); 221 222 if (whc_std_last(std) && usb_pipeout(std->urb->pipe)) 223 status |= QTD_STS_LAST_PKT; 224 225 /* 226 * For an IN transfer the iAlt field should be set so 227 * the h/w will automatically advance to the next 228 * transfer. However, if there are 8 or more TDs 229 * remaining in this transfer then iAlt cannot be set 230 * as it could point to somewhere in this transfer. 231 */ 232 if (std->ntds_remaining < WHCI_QSET_TD_MAX) { 233 int ialt; 234 ialt = (qset->td_end + std->ntds_remaining) % WHCI_QSET_TD_MAX; 235 status |= QTD_STS_IALT(ialt); 236 } else if (usb_pipein(std->urb->pipe)) 237 qset->pause_after_urb = std->urb; 238 239 if (std->num_pointers) 240 qtd->options = cpu_to_le32(QTD_OPT_IOC); 241 else 242 qtd->options = cpu_to_le32(QTD_OPT_IOC | QTD_OPT_SMALL); 243 qtd->page_list_ptr = cpu_to_le64(std->dma_addr); 244 245 qtd->status = cpu_to_le32(status); 246 247 if (QH_STATUS_TO_ICUR(qset->qh.status) == qset->td_end) 248 update = WHC_UPDATE_UPDATED; 249 250 if (++qset->td_end >= WHCI_QSET_TD_MAX) 251 qset->td_end = 0; 252 qset->ntds++; 253 } 254 255 return update; 256 } 257 258 /** 259 * qset_remove_qtd - remove the first qTD from a qset. 260 * 261 * The qTD might be still active (if it's part of a IN URB that 262 * resulted in a short read) so ensure it's deactivated. 263 */ 264 static void qset_remove_qtd(struct whc *whc, struct whc_qset *qset) 265 { 266 qset->qtd[qset->td_start].status = 0; 267 268 if (++qset->td_start >= WHCI_QSET_TD_MAX) 269 qset->td_start = 0; 270 qset->ntds--; 271 } 272 273 static void qset_copy_bounce_to_sg(struct whc *whc, struct whc_std *std) 274 { 275 struct scatterlist *sg; 276 void *bounce; 277 size_t remaining, offset; 278 279 bounce = std->bounce_buf; 280 remaining = std->len; 281 282 sg = std->bounce_sg; 283 offset = std->bounce_offset; 284 285 while (remaining) { 286 size_t len; 287 288 len = min(sg->length - offset, remaining); 289 memcpy(sg_virt(sg) + offset, bounce, len); 290 291 bounce += len; 292 remaining -= len; 293 294 offset += len; 295 if (offset >= sg->length) { 296 sg = sg_next(sg); 297 offset = 0; 298 } 299 } 300 301 } 302 303 /** 304 * qset_free_std - remove an sTD and free it. 305 * @whc: the WHCI host controller 306 * @std: the sTD to remove and free. 307 */ 308 void qset_free_std(struct whc *whc, struct whc_std *std) 309 { 310 list_del(&std->list_node); 311 if (std->bounce_buf) { 312 bool is_out = usb_pipeout(std->urb->pipe); 313 dma_addr_t dma_addr; 314 315 if (std->num_pointers) 316 dma_addr = le64_to_cpu(std->pl_virt[0].buf_ptr); 317 else 318 dma_addr = std->dma_addr; 319 320 dma_unmap_single(whc->wusbhc.dev, dma_addr, 321 std->len, is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 322 if (!is_out) 323 qset_copy_bounce_to_sg(whc, std); 324 kfree(std->bounce_buf); 325 } 326 if (std->pl_virt) { 327 if (std->dma_addr) 328 dma_unmap_single(whc->wusbhc.dev, std->dma_addr, 329 std->num_pointers * sizeof(struct whc_page_list_entry), 330 DMA_TO_DEVICE); 331 kfree(std->pl_virt); 332 std->pl_virt = NULL; 333 } 334 kfree(std); 335 } 336 337 /** 338 * qset_remove_qtds - remove an URB's qTDs (and sTDs). 339 */ 340 static void qset_remove_qtds(struct whc *whc, struct whc_qset *qset, 341 struct urb *urb) 342 { 343 struct whc_std *std, *t; 344 345 list_for_each_entry_safe(std, t, &qset->stds, list_node) { 346 if (std->urb != urb) 347 break; 348 if (std->qtd != NULL) 349 qset_remove_qtd(whc, qset); 350 qset_free_std(whc, std); 351 } 352 } 353 354 /** 355 * qset_free_stds - free any remaining sTDs for an URB. 356 */ 357 static void qset_free_stds(struct whc_qset *qset, struct urb *urb) 358 { 359 struct whc_std *std, *t; 360 361 list_for_each_entry_safe(std, t, &qset->stds, list_node) { 362 if (std->urb == urb) 363 qset_free_std(qset->whc, std); 364 } 365 } 366 367 static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_flags) 368 { 369 dma_addr_t dma_addr = std->dma_addr; 370 dma_addr_t sp, ep; 371 size_t pl_len; 372 int p; 373 374 /* Short buffers don't need a page list. */ 375 if (std->len <= WHCI_PAGE_SIZE) { 376 std->num_pointers = 0; 377 return 0; 378 } 379 380 sp = dma_addr & ~(WHCI_PAGE_SIZE-1); 381 ep = dma_addr + std->len; 382 std->num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE); 383 384 pl_len = std->num_pointers * sizeof(struct whc_page_list_entry); 385 std->pl_virt = kmalloc(pl_len, mem_flags); 386 if (std->pl_virt == NULL) 387 return -ENOMEM; 388 std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, pl_len, DMA_TO_DEVICE); 389 390 for (p = 0; p < std->num_pointers; p++) { 391 std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr); 392 dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1); 393 } 394 395 return 0; 396 } 397 398 /** 399 * urb_dequeue_work - executes asl/pzl update and gives back the urb to the system. 400 */ 401 static void urb_dequeue_work(struct work_struct *work) 402 { 403 struct whc_urb *wurb = container_of(work, struct whc_urb, dequeue_work); 404 struct whc_qset *qset = wurb->qset; 405 struct whc *whc = qset->whc; 406 unsigned long flags; 407 408 if (wurb->is_async == true) 409 asl_update(whc, WUSBCMD_ASYNC_UPDATED 410 | WUSBCMD_ASYNC_SYNCED_DB 411 | WUSBCMD_ASYNC_QSET_RM); 412 else 413 pzl_update(whc, WUSBCMD_PERIODIC_UPDATED 414 | WUSBCMD_PERIODIC_SYNCED_DB 415 | WUSBCMD_PERIODIC_QSET_RM); 416 417 spin_lock_irqsave(&whc->lock, flags); 418 qset_remove_urb(whc, qset, wurb->urb, wurb->status); 419 spin_unlock_irqrestore(&whc->lock, flags); 420 } 421 422 static struct whc_std *qset_new_std(struct whc *whc, struct whc_qset *qset, 423 struct urb *urb, gfp_t mem_flags) 424 { 425 struct whc_std *std; 426 427 std = kzalloc(sizeof(struct whc_std), mem_flags); 428 if (std == NULL) 429 return NULL; 430 431 std->urb = urb; 432 std->qtd = NULL; 433 434 INIT_LIST_HEAD(&std->list_node); 435 list_add_tail(&std->list_node, &qset->stds); 436 437 return std; 438 } 439 440 static int qset_add_urb_sg(struct whc *whc, struct whc_qset *qset, struct urb *urb, 441 gfp_t mem_flags) 442 { 443 size_t remaining; 444 struct scatterlist *sg; 445 int i; 446 int ntds = 0; 447 struct whc_std *std = NULL; 448 struct whc_page_list_entry *new_pl_virt; 449 dma_addr_t prev_end = 0; 450 size_t pl_len; 451 int p = 0; 452 453 remaining = urb->transfer_buffer_length; 454 455 for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) { 456 dma_addr_t dma_addr; 457 size_t dma_remaining; 458 dma_addr_t sp, ep; 459 int num_pointers; 460 461 if (remaining == 0) { 462 break; 463 } 464 465 dma_addr = sg_dma_address(sg); 466 dma_remaining = min_t(size_t, sg_dma_len(sg), remaining); 467 468 while (dma_remaining) { 469 size_t dma_len; 470 471 /* 472 * We can use the previous std (if it exists) provided that: 473 * - the previous one ended on a page boundary. 474 * - the current one begins on a page boundary. 475 * - the previous one isn't full. 476 * 477 * If a new std is needed but the previous one 478 * was not a whole number of packets then this 479 * sg list cannot be mapped onto multiple 480 * qTDs. Return an error and let the caller 481 * sort it out. 482 */ 483 if (!std 484 || (prev_end & (WHCI_PAGE_SIZE-1)) 485 || (dma_addr & (WHCI_PAGE_SIZE-1)) 486 || std->len + WHCI_PAGE_SIZE > QTD_MAX_XFER_SIZE) { 487 if (std && std->len % qset->max_packet != 0) 488 return -EINVAL; 489 std = qset_new_std(whc, qset, urb, mem_flags); 490 if (std == NULL) { 491 return -ENOMEM; 492 } 493 ntds++; 494 p = 0; 495 } 496 497 dma_len = dma_remaining; 498 499 /* 500 * If the remainder of this element doesn't 501 * fit in a single qTD, limit the qTD to a 502 * whole number of packets. This allows the 503 * remainder to go into the next qTD. 504 */ 505 if (std->len + dma_len > QTD_MAX_XFER_SIZE) { 506 dma_len = (QTD_MAX_XFER_SIZE / qset->max_packet) 507 * qset->max_packet - std->len; 508 } 509 510 std->len += dma_len; 511 std->ntds_remaining = -1; /* filled in later */ 512 513 sp = dma_addr & ~(WHCI_PAGE_SIZE-1); 514 ep = dma_addr + dma_len; 515 num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE); 516 std->num_pointers += num_pointers; 517 518 pl_len = std->num_pointers * sizeof(struct whc_page_list_entry); 519 520 new_pl_virt = krealloc(std->pl_virt, pl_len, mem_flags); 521 if (new_pl_virt == NULL) { 522 kfree(std->pl_virt); 523 std->pl_virt = NULL; 524 return -ENOMEM; 525 } 526 std->pl_virt = new_pl_virt; 527 528 for (;p < std->num_pointers; p++) { 529 std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr); 530 dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1); 531 } 532 533 prev_end = dma_addr = ep; 534 dma_remaining -= dma_len; 535 remaining -= dma_len; 536 } 537 } 538 539 /* Now the number of stds is know, go back and fill in 540 std->ntds_remaining. */ 541 list_for_each_entry(std, &qset->stds, list_node) { 542 if (std->ntds_remaining == -1) { 543 pl_len = std->num_pointers * sizeof(struct whc_page_list_entry); 544 std->ntds_remaining = ntds--; 545 std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, 546 pl_len, DMA_TO_DEVICE); 547 } 548 } 549 return 0; 550 } 551 552 /** 553 * qset_add_urb_sg_linearize - add an urb with sg list, copying the data 554 * 555 * If the URB contains an sg list whose elements cannot be directly 556 * mapped to qTDs then the data must be transferred via bounce 557 * buffers. 558 */ 559 static int qset_add_urb_sg_linearize(struct whc *whc, struct whc_qset *qset, 560 struct urb *urb, gfp_t mem_flags) 561 { 562 bool is_out = usb_pipeout(urb->pipe); 563 size_t max_std_len; 564 size_t remaining; 565 int ntds = 0; 566 struct whc_std *std = NULL; 567 void *bounce = NULL; 568 struct scatterlist *sg; 569 int i; 570 571 /* limit maximum bounce buffer to 16 * 3.5 KiB ~= 28 k */ 572 max_std_len = qset->max_burst * qset->max_packet; 573 574 remaining = urb->transfer_buffer_length; 575 576 for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) { 577 size_t len; 578 size_t sg_remaining; 579 void *orig; 580 581 if (remaining == 0) { 582 break; 583 } 584 585 sg_remaining = min_t(size_t, remaining, sg->length); 586 orig = sg_virt(sg); 587 588 while (sg_remaining) { 589 if (!std || std->len == max_std_len) { 590 std = qset_new_std(whc, qset, urb, mem_flags); 591 if (std == NULL) 592 return -ENOMEM; 593 std->bounce_buf = kmalloc(max_std_len, mem_flags); 594 if (std->bounce_buf == NULL) 595 return -ENOMEM; 596 std->bounce_sg = sg; 597 std->bounce_offset = orig - sg_virt(sg); 598 bounce = std->bounce_buf; 599 ntds++; 600 } 601 602 len = min(sg_remaining, max_std_len - std->len); 603 604 if (is_out) 605 memcpy(bounce, orig, len); 606 607 std->len += len; 608 std->ntds_remaining = -1; /* filled in later */ 609 610 bounce += len; 611 orig += len; 612 sg_remaining -= len; 613 remaining -= len; 614 } 615 } 616 617 /* 618 * For each of the new sTDs, map the bounce buffers, create 619 * page lists (if necessary), and fill in std->ntds_remaining. 620 */ 621 list_for_each_entry(std, &qset->stds, list_node) { 622 if (std->ntds_remaining != -1) 623 continue; 624 625 std->dma_addr = dma_map_single(&whc->umc->dev, std->bounce_buf, std->len, 626 is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 627 628 if (qset_fill_page_list(whc, std, mem_flags) < 0) 629 return -ENOMEM; 630 631 std->ntds_remaining = ntds--; 632 } 633 634 return 0; 635 } 636 637 /** 638 * qset_add_urb - add an urb to the qset's queue. 639 * 640 * The URB is chopped into sTDs, one for each qTD that will required. 641 * At least one qTD (and sTD) is required even if the transfer has no 642 * data (e.g., for some control transfers). 643 */ 644 int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb, 645 gfp_t mem_flags) 646 { 647 struct whc_urb *wurb; 648 int remaining = urb->transfer_buffer_length; 649 u64 transfer_dma = urb->transfer_dma; 650 int ntds_remaining; 651 int ret; 652 653 wurb = kzalloc(sizeof(struct whc_urb), mem_flags); 654 if (wurb == NULL) 655 goto err_no_mem; 656 urb->hcpriv = wurb; 657 wurb->qset = qset; 658 wurb->urb = urb; 659 INIT_WORK(&wurb->dequeue_work, urb_dequeue_work); 660 661 if (urb->num_sgs) { 662 ret = qset_add_urb_sg(whc, qset, urb, mem_flags); 663 if (ret == -EINVAL) { 664 qset_free_stds(qset, urb); 665 ret = qset_add_urb_sg_linearize(whc, qset, urb, mem_flags); 666 } 667 if (ret < 0) 668 goto err_no_mem; 669 return 0; 670 } 671 672 ntds_remaining = DIV_ROUND_UP(remaining, QTD_MAX_XFER_SIZE); 673 if (ntds_remaining == 0) 674 ntds_remaining = 1; 675 676 while (ntds_remaining) { 677 struct whc_std *std; 678 size_t std_len; 679 680 std_len = remaining; 681 if (std_len > QTD_MAX_XFER_SIZE) 682 std_len = QTD_MAX_XFER_SIZE; 683 684 std = qset_new_std(whc, qset, urb, mem_flags); 685 if (std == NULL) 686 goto err_no_mem; 687 688 std->dma_addr = transfer_dma; 689 std->len = std_len; 690 std->ntds_remaining = ntds_remaining; 691 692 if (qset_fill_page_list(whc, std, mem_flags) < 0) 693 goto err_no_mem; 694 695 ntds_remaining--; 696 remaining -= std_len; 697 transfer_dma += std_len; 698 } 699 700 return 0; 701 702 err_no_mem: 703 qset_free_stds(qset, urb); 704 return -ENOMEM; 705 } 706 707 /** 708 * qset_remove_urb - remove an URB from the urb queue. 709 * 710 * The URB is returned to the USB subsystem. 711 */ 712 void qset_remove_urb(struct whc *whc, struct whc_qset *qset, 713 struct urb *urb, int status) 714 { 715 struct wusbhc *wusbhc = &whc->wusbhc; 716 struct whc_urb *wurb = urb->hcpriv; 717 718 usb_hcd_unlink_urb_from_ep(&wusbhc->usb_hcd, urb); 719 /* Drop the lock as urb->complete() may enqueue another urb. */ 720 spin_unlock(&whc->lock); 721 wusbhc_giveback_urb(wusbhc, urb, status); 722 spin_lock(&whc->lock); 723 724 kfree(wurb); 725 } 726 727 /** 728 * get_urb_status_from_qtd - get the completed urb status from qTD status 729 * @urb: completed urb 730 * @status: qTD status 731 */ 732 static int get_urb_status_from_qtd(struct urb *urb, u32 status) 733 { 734 if (status & QTD_STS_HALTED) { 735 if (status & QTD_STS_DBE) 736 return usb_pipein(urb->pipe) ? -ENOSR : -ECOMM; 737 else if (status & QTD_STS_BABBLE) 738 return -EOVERFLOW; 739 else if (status & QTD_STS_RCE) 740 return -ETIME; 741 return -EPIPE; 742 } 743 if (usb_pipein(urb->pipe) 744 && (urb->transfer_flags & URB_SHORT_NOT_OK) 745 && urb->actual_length < urb->transfer_buffer_length) 746 return -EREMOTEIO; 747 return 0; 748 } 749 750 /** 751 * process_inactive_qtd - process an inactive (but not halted) qTD. 752 * 753 * Update the urb with the transfer bytes from the qTD, if the urb is 754 * completely transferred or (in the case of an IN only) the LPF is 755 * set, then the transfer is complete and the urb should be returned 756 * to the system. 757 */ 758 void process_inactive_qtd(struct whc *whc, struct whc_qset *qset, 759 struct whc_qtd *qtd) 760 { 761 struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node); 762 struct urb *urb = std->urb; 763 uint32_t status; 764 bool complete; 765 766 status = le32_to_cpu(qtd->status); 767 768 urb->actual_length += std->len - QTD_STS_TO_LEN(status); 769 770 if (usb_pipein(urb->pipe) && (status & QTD_STS_LAST_PKT)) 771 complete = true; 772 else 773 complete = whc_std_last(std); 774 775 qset_remove_qtd(whc, qset); 776 qset_free_std(whc, std); 777 778 /* 779 * Transfers for this URB are complete? Then return it to the 780 * USB subsystem. 781 */ 782 if (complete) { 783 qset_remove_qtds(whc, qset, urb); 784 qset_remove_urb(whc, qset, urb, get_urb_status_from_qtd(urb, status)); 785 786 /* 787 * If iAlt isn't valid then the hardware didn't 788 * advance iCur. Adjust the start and end pointers to 789 * match iCur. 790 */ 791 if (!(status & QTD_STS_IALT_VALID)) 792 qset->td_start = qset->td_end 793 = QH_STATUS_TO_ICUR(le16_to_cpu(qset->qh.status)); 794 qset->pause_after_urb = NULL; 795 } 796 } 797 798 /** 799 * process_halted_qtd - process a qset with a halted qtd 800 * 801 * Remove all the qTDs for the failed URB and return the failed URB to 802 * the USB subsystem. Then remove all other qTDs so the qset can be 803 * removed. 804 * 805 * FIXME: this is the point where rate adaptation can be done. If a 806 * transfer failed because it exceeded the maximum number of retries 807 * then it could be reactivated with a slower rate without having to 808 * remove the qset. 809 */ 810 void process_halted_qtd(struct whc *whc, struct whc_qset *qset, 811 struct whc_qtd *qtd) 812 { 813 struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node); 814 struct urb *urb = std->urb; 815 int urb_status; 816 817 urb_status = get_urb_status_from_qtd(urb, le32_to_cpu(qtd->status)); 818 819 qset_remove_qtds(whc, qset, urb); 820 qset_remove_urb(whc, qset, urb, urb_status); 821 822 list_for_each_entry(std, &qset->stds, list_node) { 823 if (qset->ntds == 0) 824 break; 825 qset_remove_qtd(whc, qset); 826 std->qtd = NULL; 827 } 828 829 qset->remove = 1; 830 } 831 832 void qset_free(struct whc *whc, struct whc_qset *qset) 833 { 834 dma_pool_free(whc->qset_pool, qset, qset->qset_dma); 835 } 836 837 /** 838 * qset_delete - wait for a qset to be unused, then free it. 839 */ 840 void qset_delete(struct whc *whc, struct whc_qset *qset) 841 { 842 wait_for_completion(&qset->remove_complete); 843 qset_free(whc, qset); 844 } 845 846 #line 9 "/home/druidos/temp/331_1a/work/current--X--drivers--X--defaultlinux-3.14.1.tar.xz--X--331_1a--X--cpachecker/linux-3.14.1.tar.xz/csd_deg_dscv/10341/dscv_tempdir/dscv/ri/331_1a/drivers/usb/host/whci/qset.o.c.prepared"
1 2 #include <linux/types.h> 3 #include <linux/dma-direction.h> 4 #include <verifier/rcv.h> 5 #include <verifier/set.h> 6 #include <verifier/map.h> 7 8 Set LDV_DMA_MAP_CALLS; 9 10 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_dma_map_page') maps page */ 11 dma_addr_t ldv_dma_map_page(struct device *dev, struct page *page, size_t offset, size_t size, enum dma_data_direction dir) { 12 dma_addr_t nonedetermined; 13 14 nonedetermined = ldv_undef_ptr(); 15 16 /* LDV_COMMENT_ASSERT Check that previos dma_mapping call was checked*/ 17 ldv_assert(ldv_set_is_empty(LDV_DMA_MAP_CALLS)); 18 19 ldv_set_add(LDV_DMA_MAP_CALLS, nonedetermined); 20 21 return nonedetermined; 22 } 23 24 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_dma_mapping_error') unmaps page */ 25 int ldv_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { 26 27 /* LDV_COMMENT_ASSERT No dma_mapping calls to verify */ 28 ldv_assert(ldv_set_contains(LDV_DMA_MAP_CALLS, dma_addr)); 29 ldv_set_remove(LDV_DMA_MAP_CALLS, dma_addr); 30 31 int nonedetermined; 32 33 nonedetermined = ldv_undef_int(); 34 35 return nonedetermined; 36 } 37 38 39 40 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_dma_map_single') maps pci_dma */ 41 dma_addr_t ldv_dma_map_single(struct device *dev, void *cpu_addr, size_t size, enum dma_data_direction dir) { 42 dma_addr_t nonedetermined; 43 44 nonedetermined = ldv_undef_ptr(); 45 46 /* LDV_COMMENT_ASSERT Check that previos dma_mapping call was checked*/ 47 ldv_assert(ldv_set_is_empty(LDV_DMA_MAP_CALLS)); 48 49 ldv_set_add(LDV_DMA_MAP_CALLS, nonedetermined); 50 51 return nonedetermined; 52 } 53 54 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_dma_map_single_attrs') maps pci_dma */ 55 dma_addr_t ldv_dma_map_single_attrs(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { 56 dma_addr_t nonedetermined; 57 58 nonedetermined = ldv_undef_ptr(); 59 60 /* LDV_COMMENT_ASSERT Check that previos dma_mapping call was checked*/ 61 ldv_assert(ldv_set_is_empty(LDV_DMA_MAP_CALLS)); 62 63 ldv_set_add(LDV_DMA_MAP_CALLS, nonedetermined); 64 65 return nonedetermined; 66 } 67 68 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_initialize') Initialize all module reference counters at the beginning */ 69 void ldv_initialize(void) { 70 /* LDV_COMMENT_CHANGE_STATE All module reference counters have some initial value at the beginning */ 71 ldv_set_init(LDV_DMA_MAP_CALLS); 72 } 73 74 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_check_final_state') Check that all module reference counters have their initial values at the end */ 75 void ldv_check_final_state(void) { 76 /* LDV_COMMENT_ASSERT All incremented module reference counters should be decremented before module unloading*/ 77 ldv_assert(ldv_set_is_empty(LDV_DMA_MAP_CALLS)); 78 }
1 #ifndef _LINUX_LIST_H 2 #define _LINUX_LIST_H 3 4 #include <linux/types.h> 5 #include <linux/stddef.h> 6 #include <linux/poison.h> 7 #include <linux/const.h> 8 9 /* 10 * Simple doubly linked list implementation. 11 * 12 * Some of the internal functions ("__xxx") are useful when 13 * manipulating whole lists rather than single entries, as 14 * sometimes we already know the next/prev entries and we can 15 * generate better code by using them directly rather than 16 * using the generic single-entry routines. 17 */ 18 19 #define LIST_HEAD_INIT(name) { &(name), &(name) } 20 21 #define LIST_HEAD(name) \ 22 struct list_head name = LIST_HEAD_INIT(name) 23 24 static inline void INIT_LIST_HEAD(struct list_head *list) 25 { 26 list->next = list; 27 list->prev = list; 28 } 29 30 /* 31 * Insert a new entry between two known consecutive entries. 32 * 33 * This is only for internal list manipulation where we know 34 * the prev/next entries already! 35 */ 36 #ifndef CONFIG_DEBUG_LIST 37 static inline void __list_add(struct list_head *new, 38 struct list_head *prev, 39 struct list_head *next) 40 { 41 next->prev = new; 42 new->next = next; 43 new->prev = prev; 44 prev->next = new; 45 } 46 #else 47 extern void __list_add(struct list_head *new, 48 struct list_head *prev, 49 struct list_head *next); 50 #endif 51 52 /** 53 * list_add - add a new entry 54 * @new: new entry to be added 55 * @head: list head to add it after 56 * 57 * Insert a new entry after the specified head. 58 * This is good for implementing stacks. 59 */ 60 static inline void list_add(struct list_head *new, struct list_head *head) 61 { 62 __list_add(new, head, head->next); 63 } 64 65 66 /** 67 * list_add_tail - add a new entry 68 * @new: new entry to be added 69 * @head: list head to add it before 70 * 71 * Insert a new entry before the specified head. 72 * This is useful for implementing queues. 73 */ 74 static inline void list_add_tail(struct list_head *new, struct list_head *head) 75 { 76 __list_add(new, head->prev, head); 77 } 78 79 /* 80 * Delete a list entry by making the prev/next entries 81 * point to each other. 82 * 83 * This is only for internal list manipulation where we know 84 * the prev/next entries already! 85 */ 86 static inline void __list_del(struct list_head * prev, struct list_head * next) 87 { 88 next->prev = prev; 89 prev->next = next; 90 } 91 92 /** 93 * list_del - deletes entry from list. 94 * @entry: the element to delete from the list. 95 * Note: list_empty() on entry does not return true after this, the entry is 96 * in an undefined state. 97 */ 98 #ifndef CONFIG_DEBUG_LIST 99 static inline void __list_del_entry(struct list_head *entry) 100 { 101 __list_del(entry->prev, entry->next); 102 } 103 104 static inline void list_del(struct list_head *entry) 105 { 106 __list_del(entry->prev, entry->next); 107 entry->next = LIST_POISON1; 108 entry->prev = LIST_POISON2; 109 } 110 #else 111 extern void __list_del_entry(struct list_head *entry); 112 extern void list_del(struct list_head *entry); 113 #endif 114 115 /** 116 * list_replace - replace old entry by new one 117 * @old : the element to be replaced 118 * @new : the new element to insert 119 * 120 * If @old was empty, it will be overwritten. 121 */ 122 static inline void list_replace(struct list_head *old, 123 struct list_head *new) 124 { 125 new->next = old->next; 126 new->next->prev = new; 127 new->prev = old->prev; 128 new->prev->next = new; 129 } 130 131 static inline void list_replace_init(struct list_head *old, 132 struct list_head *new) 133 { 134 list_replace(old, new); 135 INIT_LIST_HEAD(old); 136 } 137 138 /** 139 * list_del_init - deletes entry from list and reinitialize it. 140 * @entry: the element to delete from the list. 141 */ 142 static inline void list_del_init(struct list_head *entry) 143 { 144 __list_del_entry(entry); 145 INIT_LIST_HEAD(entry); 146 } 147 148 /** 149 * list_move - delete from one list and add as another's head 150 * @list: the entry to move 151 * @head: the head that will precede our entry 152 */ 153 static inline void list_move(struct list_head *list, struct list_head *head) 154 { 155 __list_del_entry(list); 156 list_add(list, head); 157 } 158 159 /** 160 * list_move_tail - delete from one list and add as another's tail 161 * @list: the entry to move 162 * @head: the head that will follow our entry 163 */ 164 static inline void list_move_tail(struct list_head *list, 165 struct list_head *head) 166 { 167 __list_del_entry(list); 168 list_add_tail(list, head); 169 } 170 171 /** 172 * list_is_last - tests whether @list is the last entry in list @head 173 * @list: the entry to test 174 * @head: the head of the list 175 */ 176 static inline int list_is_last(const struct list_head *list, 177 const struct list_head *head) 178 { 179 return list->next == head; 180 } 181 182 /** 183 * list_empty - tests whether a list is empty 184 * @head: the list to test. 185 */ 186 static inline int list_empty(const struct list_head *head) 187 { 188 return head->next == head; 189 } 190 191 /** 192 * list_empty_careful - tests whether a list is empty and not being modified 193 * @head: the list to test 194 * 195 * Description: 196 * tests whether a list is empty _and_ checks that no other CPU might be 197 * in the process of modifying either member (next or prev) 198 * 199 * NOTE: using list_empty_careful() without synchronization 200 * can only be safe if the only activity that can happen 201 * to the list entry is list_del_init(). Eg. it cannot be used 202 * if another CPU could re-list_add() it. 203 */ 204 static inline int list_empty_careful(const struct list_head *head) 205 { 206 struct list_head *next = head->next; 207 return (next == head) && (next == head->prev); 208 } 209 210 /** 211 * list_rotate_left - rotate the list to the left 212 * @head: the head of the list 213 */ 214 static inline void list_rotate_left(struct list_head *head) 215 { 216 struct list_head *first; 217 218 if (!list_empty(head)) { 219 first = head->next; 220 list_move_tail(first, head); 221 } 222 } 223 224 /** 225 * list_is_singular - tests whether a list has just one entry. 226 * @head: the list to test. 227 */ 228 static inline int list_is_singular(const struct list_head *head) 229 { 230 return !list_empty(head) && (head->next == head->prev); 231 } 232 233 static inline void __list_cut_position(struct list_head *list, 234 struct list_head *head, struct list_head *entry) 235 { 236 struct list_head *new_first = entry->next; 237 list->next = head->next; 238 list->next->prev = list; 239 list->prev = entry; 240 entry->next = list; 241 head->next = new_first; 242 new_first->prev = head; 243 } 244 245 /** 246 * list_cut_position - cut a list into two 247 * @list: a new list to add all removed entries 248 * @head: a list with entries 249 * @entry: an entry within head, could be the head itself 250 * and if so we won't cut the list 251 * 252 * This helper moves the initial part of @head, up to and 253 * including @entry, from @head to @list. You should 254 * pass on @entry an element you know is on @head. @list 255 * should be an empty list or a list you do not care about 256 * losing its data. 257 * 258 */ 259 static inline void list_cut_position(struct list_head *list, 260 struct list_head *head, struct list_head *entry) 261 { 262 if (list_empty(head)) 263 return; 264 if (list_is_singular(head) && 265 (head->next != entry && head != entry)) 266 return; 267 if (entry == head) 268 INIT_LIST_HEAD(list); 269 else 270 __list_cut_position(list, head, entry); 271 } 272 273 static inline void __list_splice(const struct list_head *list, 274 struct list_head *prev, 275 struct list_head *next) 276 { 277 struct list_head *first = list->next; 278 struct list_head *last = list->prev; 279 280 first->prev = prev; 281 prev->next = first; 282 283 last->next = next; 284 next->prev = last; 285 } 286 287 /** 288 * list_splice - join two lists, this is designed for stacks 289 * @list: the new list to add. 290 * @head: the place to add it in the first list. 291 */ 292 static inline void list_splice(const struct list_head *list, 293 struct list_head *head) 294 { 295 if (!list_empty(list)) 296 __list_splice(list, head, head->next); 297 } 298 299 /** 300 * list_splice_tail - join two lists, each list being a queue 301 * @list: the new list to add. 302 * @head: the place to add it in the first list. 303 */ 304 static inline void list_splice_tail(struct list_head *list, 305 struct list_head *head) 306 { 307 if (!list_empty(list)) 308 __list_splice(list, head->prev, head); 309 } 310 311 /** 312 * list_splice_init - join two lists and reinitialise the emptied list. 313 * @list: the new list to add. 314 * @head: the place to add it in the first list. 315 * 316 * The list at @list is reinitialised 317 */ 318 static inline void list_splice_init(struct list_head *list, 319 struct list_head *head) 320 { 321 if (!list_empty(list)) { 322 __list_splice(list, head, head->next); 323 INIT_LIST_HEAD(list); 324 } 325 } 326 327 /** 328 * list_splice_tail_init - join two lists and reinitialise the emptied list 329 * @list: the new list to add. 330 * @head: the place to add it in the first list. 331 * 332 * Each of the lists is a queue. 333 * The list at @list is reinitialised 334 */ 335 static inline void list_splice_tail_init(struct list_head *list, 336 struct list_head *head) 337 { 338 if (!list_empty(list)) { 339 __list_splice(list, head->prev, head); 340 INIT_LIST_HEAD(list); 341 } 342 } 343 344 /** 345 * list_entry - get the struct for this entry 346 * @ptr: the &struct list_head pointer. 347 * @type: the type of the struct this is embedded in. 348 * @member: the name of the list_struct within the struct. 349 */ 350 #define list_entry(ptr, type, member) \ 351 container_of(ptr, type, member) 352 353 /** 354 * list_first_entry - get the first element from a list 355 * @ptr: the list head to take the element from. 356 * @type: the type of the struct this is embedded in. 357 * @member: the name of the list_struct within the struct. 358 * 359 * Note, that list is expected to be not empty. 360 */ 361 #define list_first_entry(ptr, type, member) \ 362 list_entry((ptr)->next, type, member) 363 364 /** 365 * list_last_entry - get the last element from a list 366 * @ptr: the list head to take the element from. 367 * @type: the type of the struct this is embedded in. 368 * @member: the name of the list_struct within the struct. 369 * 370 * Note, that list is expected to be not empty. 371 */ 372 #define list_last_entry(ptr, type, member) \ 373 list_entry((ptr)->prev, type, member) 374 375 /** 376 * list_first_entry_or_null - get the first element from a list 377 * @ptr: the list head to take the element from. 378 * @type: the type of the struct this is embedded in. 379 * @member: the name of the list_struct within the struct. 380 * 381 * Note that if the list is empty, it returns NULL. 382 */ 383 #define list_first_entry_or_null(ptr, type, member) \ 384 (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL) 385 386 /** 387 * list_next_entry - get the next element in list 388 * @pos: the type * to cursor 389 * @member: the name of the list_struct within the struct. 390 */ 391 #define list_next_entry(pos, member) \ 392 list_entry((pos)->member.next, typeof(*(pos)), member) 393 394 /** 395 * list_prev_entry - get the prev element in list 396 * @pos: the type * to cursor 397 * @member: the name of the list_struct within the struct. 398 */ 399 #define list_prev_entry(pos, member) \ 400 list_entry((pos)->member.prev, typeof(*(pos)), member) 401 402 /** 403 * list_for_each - iterate over a list 404 * @pos: the &struct list_head to use as a loop cursor. 405 * @head: the head for your list. 406 */ 407 #define list_for_each(pos, head) \ 408 for (pos = (head)->next; pos != (head); pos = pos->next) 409 410 /** 411 * list_for_each_prev - iterate over a list backwards 412 * @pos: the &struct list_head to use as a loop cursor. 413 * @head: the head for your list. 414 */ 415 #define list_for_each_prev(pos, head) \ 416 for (pos = (head)->prev; pos != (head); pos = pos->prev) 417 418 /** 419 * list_for_each_safe - iterate over a list safe against removal of list entry 420 * @pos: the &struct list_head to use as a loop cursor. 421 * @n: another &struct list_head to use as temporary storage 422 * @head: the head for your list. 423 */ 424 #define list_for_each_safe(pos, n, head) \ 425 for (pos = (head)->next, n = pos->next; pos != (head); \ 426 pos = n, n = pos->next) 427 428 /** 429 * list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry 430 * @pos: the &struct list_head to use as a loop cursor. 431 * @n: another &struct list_head to use as temporary storage 432 * @head: the head for your list. 433 */ 434 #define list_for_each_prev_safe(pos, n, head) \ 435 for (pos = (head)->prev, n = pos->prev; \ 436 pos != (head); \ 437 pos = n, n = pos->prev) 438 439 /** 440 * list_for_each_entry - iterate over list of given type 441 * @pos: the type * to use as a loop cursor. 442 * @head: the head for your list. 443 * @member: the name of the list_struct within the struct. 444 */ 445 #define list_for_each_entry(pos, head, member) \ 446 for (pos = list_first_entry(head, typeof(*pos), member); \ 447 &pos->member != (head); \ 448 pos = list_next_entry(pos, member)) 449 450 /** 451 * list_for_each_entry_reverse - iterate backwards over list of given type. 452 * @pos: the type * to use as a loop cursor. 453 * @head: the head for your list. 454 * @member: the name of the list_struct within the struct. 455 */ 456 #define list_for_each_entry_reverse(pos, head, member) \ 457 for (pos = list_last_entry(head, typeof(*pos), member); \ 458 &pos->member != (head); \ 459 pos = list_prev_entry(pos, member)) 460 461 /** 462 * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue() 463 * @pos: the type * to use as a start point 464 * @head: the head of the list 465 * @member: the name of the list_struct within the struct. 466 * 467 * Prepares a pos entry for use as a start point in list_for_each_entry_continue(). 468 */ 469 #define list_prepare_entry(pos, head, member) \ 470 ((pos) ? : list_entry(head, typeof(*pos), member)) 471 472 /** 473 * list_for_each_entry_continue - continue iteration over list of given type 474 * @pos: the type * to use as a loop cursor. 475 * @head: the head for your list. 476 * @member: the name of the list_struct within the struct. 477 * 478 * Continue to iterate over list of given type, continuing after 479 * the current position. 480 */ 481 #define list_for_each_entry_continue(pos, head, member) \ 482 for (pos = list_next_entry(pos, member); \ 483 &pos->member != (head); \ 484 pos = list_next_entry(pos, member)) 485 486 /** 487 * list_for_each_entry_continue_reverse - iterate backwards from the given point 488 * @pos: the type * to use as a loop cursor. 489 * @head: the head for your list. 490 * @member: the name of the list_struct within the struct. 491 * 492 * Start to iterate over list of given type backwards, continuing after 493 * the current position. 494 */ 495 #define list_for_each_entry_continue_reverse(pos, head, member) \ 496 for (pos = list_prev_entry(pos, member); \ 497 &pos->member != (head); \ 498 pos = list_prev_entry(pos, member)) 499 500 /** 501 * list_for_each_entry_from - iterate over list of given type from the current point 502 * @pos: the type * to use as a loop cursor. 503 * @head: the head for your list. 504 * @member: the name of the list_struct within the struct. 505 * 506 * Iterate over list of given type, continuing from current position. 507 */ 508 #define list_for_each_entry_from(pos, head, member) \ 509 for (; &pos->member != (head); \ 510 pos = list_next_entry(pos, member)) 511 512 /** 513 * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry 514 * @pos: the type * to use as a loop cursor. 515 * @n: another type * to use as temporary storage 516 * @head: the head for your list. 517 * @member: the name of the list_struct within the struct. 518 */ 519 #define list_for_each_entry_safe(pos, n, head, member) \ 520 for (pos = list_first_entry(head, typeof(*pos), member), \ 521 n = list_next_entry(pos, member); \ 522 &pos->member != (head); \ 523 pos = n, n = list_next_entry(n, member)) 524 525 /** 526 * list_for_each_entry_safe_continue - continue list iteration safe against removal 527 * @pos: the type * to use as a loop cursor. 528 * @n: another type * to use as temporary storage 529 * @head: the head for your list. 530 * @member: the name of the list_struct within the struct. 531 * 532 * Iterate over list of given type, continuing after current point, 533 * safe against removal of list entry. 534 */ 535 #define list_for_each_entry_safe_continue(pos, n, head, member) \ 536 for (pos = list_next_entry(pos, member), \ 537 n = list_next_entry(pos, member); \ 538 &pos->member != (head); \ 539 pos = n, n = list_next_entry(n, member)) 540 541 /** 542 * list_for_each_entry_safe_from - iterate over list from current point safe against removal 543 * @pos: the type * to use as a loop cursor. 544 * @n: another type * to use as temporary storage 545 * @head: the head for your list. 546 * @member: the name of the list_struct within the struct. 547 * 548 * Iterate over list of given type from current point, safe against 549 * removal of list entry. 550 */ 551 #define list_for_each_entry_safe_from(pos, n, head, member) \ 552 for (n = list_next_entry(pos, member); \ 553 &pos->member != (head); \ 554 pos = n, n = list_next_entry(n, member)) 555 556 /** 557 * list_for_each_entry_safe_reverse - iterate backwards over list safe against removal 558 * @pos: the type * to use as a loop cursor. 559 * @n: another type * to use as temporary storage 560 * @head: the head for your list. 561 * @member: the name of the list_struct within the struct. 562 * 563 * Iterate backwards over list of given type, safe against removal 564 * of list entry. 565 */ 566 #define list_for_each_entry_safe_reverse(pos, n, head, member) \ 567 for (pos = list_last_entry(head, typeof(*pos), member), \ 568 n = list_prev_entry(pos, member); \ 569 &pos->member != (head); \ 570 pos = n, n = list_prev_entry(n, member)) 571 572 /** 573 * list_safe_reset_next - reset a stale list_for_each_entry_safe loop 574 * @pos: the loop cursor used in the list_for_each_entry_safe loop 575 * @n: temporary storage used in list_for_each_entry_safe 576 * @member: the name of the list_struct within the struct. 577 * 578 * list_safe_reset_next is not safe to use in general if the list may be 579 * modified concurrently (eg. the lock is dropped in the loop body). An 580 * exception to this is if the cursor element (pos) is pinned in the list, 581 * and list_safe_reset_next is called after re-taking the lock and before 582 * completing the current iteration of the loop body. 583 */ 584 #define list_safe_reset_next(pos, n, member) \ 585 n = list_next_entry(pos, member) 586 587 /* 588 * Double linked lists with a single pointer list head. 589 * Mostly useful for hash tables where the two pointer list head is 590 * too wasteful. 591 * You lose the ability to access the tail in O(1). 592 */ 593 594 #define HLIST_HEAD_INIT { .first = NULL } 595 #define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } 596 #define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) 597 static inline void INIT_HLIST_NODE(struct hlist_node *h) 598 { 599 h->next = NULL; 600 h->pprev = NULL; 601 } 602 603 static inline int hlist_unhashed(const struct hlist_node *h) 604 { 605 return !h->pprev; 606 } 607 608 static inline int hlist_empty(const struct hlist_head *h) 609 { 610 return !h->first; 611 } 612 613 static inline void __hlist_del(struct hlist_node *n) 614 { 615 struct hlist_node *next = n->next; 616 struct hlist_node **pprev = n->pprev; 617 *pprev = next; 618 if (next) 619 next->pprev = pprev; 620 } 621 622 static inline void hlist_del(struct hlist_node *n) 623 { 624 __hlist_del(n); 625 n->next = LIST_POISON1; 626 n->pprev = LIST_POISON2; 627 } 628 629 static inline void hlist_del_init(struct hlist_node *n) 630 { 631 if (!hlist_unhashed(n)) { 632 __hlist_del(n); 633 INIT_HLIST_NODE(n); 634 } 635 } 636 637 static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) 638 { 639 struct hlist_node *first = h->first; 640 n->next = first; 641 if (first) 642 first->pprev = &n->next; 643 h->first = n; 644 n->pprev = &h->first; 645 } 646 647 /* next must be != NULL */ 648 static inline void hlist_add_before(struct hlist_node *n, 649 struct hlist_node *next) 650 { 651 n->pprev = next->pprev; 652 n->next = next; 653 next->pprev = &n->next; 654 *(n->pprev) = n; 655 } 656 657 static inline void hlist_add_after(struct hlist_node *n, 658 struct hlist_node *next) 659 { 660 next->next = n->next; 661 n->next = next; 662 next->pprev = &n->next; 663 664 if(next->next) 665 next->next->pprev = &next->next; 666 } 667 668 /* after that we'll appear to be on some hlist and hlist_del will work */ 669 static inline void hlist_add_fake(struct hlist_node *n) 670 { 671 n->pprev = &n->next; 672 } 673 674 /* 675 * Move a list from one list head to another. Fixup the pprev 676 * reference of the first entry if it exists. 677 */ 678 static inline void hlist_move_list(struct hlist_head *old, 679 struct hlist_head *new) 680 { 681 new->first = old->first; 682 if (new->first) 683 new->first->pprev = &new->first; 684 old->first = NULL; 685 } 686 687 #define hlist_entry(ptr, type, member) container_of(ptr,type,member) 688 689 #define hlist_for_each(pos, head) \ 690 for (pos = (head)->first; pos ; pos = pos->next) 691 692 #define hlist_for_each_safe(pos, n, head) \ 693 for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \ 694 pos = n) 695 696 #define hlist_entry_safe(ptr, type, member) \ 697 ({ typeof(ptr) ____ptr = (ptr); \ 698 ____ptr ? hlist_entry(____ptr, type, member) : NULL; \ 699 }) 700 701 /** 702 * hlist_for_each_entry - iterate over list of given type 703 * @pos: the type * to use as a loop cursor. 704 * @head: the head for your list. 705 * @member: the name of the hlist_node within the struct. 706 */ 707 #define hlist_for_each_entry(pos, head, member) \ 708 for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member);\ 709 pos; \ 710 pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) 711 712 /** 713 * hlist_for_each_entry_continue - iterate over a hlist continuing after current point 714 * @pos: the type * to use as a loop cursor. 715 * @member: the name of the hlist_node within the struct. 716 */ 717 #define hlist_for_each_entry_continue(pos, member) \ 718 for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\ 719 pos; \ 720 pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) 721 722 /** 723 * hlist_for_each_entry_from - iterate over a hlist continuing from current point 724 * @pos: the type * to use as a loop cursor. 725 * @member: the name of the hlist_node within the struct. 726 */ 727 #define hlist_for_each_entry_from(pos, member) \ 728 for (; pos; \ 729 pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) 730 731 /** 732 * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry 733 * @pos: the type * to use as a loop cursor. 734 * @n: another &struct hlist_node to use as temporary storage 735 * @head: the head for your list. 736 * @member: the name of the hlist_node within the struct. 737 */ 738 #define hlist_for_each_entry_safe(pos, n, head, member) \ 739 for (pos = hlist_entry_safe((head)->first, typeof(*pos), member);\ 740 pos && ({ n = pos->member.next; 1; }); \ 741 pos = hlist_entry_safe(n, typeof(*pos), member)) 742 743 #endif
1 /* 2 * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk). 3 * 4 * (C) SGI 2006, Christoph Lameter 5 * Cleaned up and restructured to ease the addition of alternative 6 * implementations of SLAB allocators. 7 * (C) Linux Foundation 2008-2013 8 * Unified interface for all slab allocators 9 */ 10 11 #ifndef _LINUX_SLAB_H 12 #define _LINUX_SLAB_H 13 14 #include <linux/gfp.h> 15 #include <linux/types.h> 16 #include <linux/workqueue.h> 17 18 19 /* 20 * Flags to pass to kmem_cache_create(). 21 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set. 22 */ 23 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */ 24 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */ 25 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */ 26 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */ 27 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */ 28 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */ 29 #define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */ 30 /* 31 * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS! 32 * 33 * This delays freeing the SLAB page by a grace period, it does _NOT_ 34 * delay object freeing. This means that if you do kmem_cache_free() 35 * that memory location is free to be reused at any time. Thus it may 36 * be possible to see another object there in the same RCU grace period. 37 * 38 * This feature only ensures the memory location backing the object 39 * stays valid, the trick to using this is relying on an independent 40 * object validation pass. Something like: 41 * 42 * rcu_read_lock() 43 * again: 44 * obj = lockless_lookup(key); 45 * if (obj) { 46 * if (!try_get_ref(obj)) // might fail for free objects 47 * goto again; 48 * 49 * if (obj->key != key) { // not the object we expected 50 * put_ref(obj); 51 * goto again; 52 * } 53 * } 54 * rcu_read_unlock(); 55 * 56 * This is useful if we need to approach a kernel structure obliquely, 57 * from its address obtained without the usual locking. We can lock 58 * the structure to stabilize it and check it's still at the given address, 59 * only if we can be sure that the memory has not been meanwhile reused 60 * for some other kind of object (which our subsystem's lock might corrupt). 61 * 62 * rcu_read_lock before reading the address, then rcu_read_unlock after 63 * taking the spinlock within the structure expected at that address. 64 */ 65 #define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */ 66 #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ 67 #define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */ 68 69 /* Flag to prevent checks on free */ 70 #ifdef CONFIG_DEBUG_OBJECTS 71 # define SLAB_DEBUG_OBJECTS 0x00400000UL 72 #else 73 # define SLAB_DEBUG_OBJECTS 0x00000000UL 74 #endif 75 76 #define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */ 77 78 /* Don't track use of uninitialized memory */ 79 #ifdef CONFIG_KMEMCHECK 80 # define SLAB_NOTRACK 0x01000000UL 81 #else 82 # define SLAB_NOTRACK 0x00000000UL 83 #endif 84 #ifdef CONFIG_FAILSLAB 85 # define SLAB_FAILSLAB 0x02000000UL /* Fault injection mark */ 86 #else 87 # define SLAB_FAILSLAB 0x00000000UL 88 #endif 89 90 /* The following flags affect the page allocator grouping pages by mobility */ 91 #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ 92 #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ 93 /* 94 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. 95 * 96 * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault. 97 * 98 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can. 99 * Both make kfree a no-op. 100 */ 101 #define ZERO_SIZE_PTR ((void *)16) 102 103 #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \ 104 (unsigned long)ZERO_SIZE_PTR) 105 106 #include <linux/kmemleak.h> 107 108 struct mem_cgroup; 109 /* 110 * struct kmem_cache related prototypes 111 */ 112 void __init kmem_cache_init(void); 113 int slab_is_available(void); 114 115 struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, 116 unsigned long, 117 void (*)(void *)); 118 struct kmem_cache * 119 kmem_cache_create_memcg(struct mem_cgroup *, const char *, size_t, size_t, 120 unsigned long, void (*)(void *), struct kmem_cache *); 121 void kmem_cache_destroy(struct kmem_cache *); 122 int kmem_cache_shrink(struct kmem_cache *); 123 void kmem_cache_free(struct kmem_cache *, void *); 124 125 /* 126 * Please use this macro to create slab caches. Simply specify the 127 * name of the structure and maybe some flags that are listed above. 128 * 129 * The alignment of the struct determines object alignment. If you 130 * f.e. add ____cacheline_aligned_in_smp to the struct declaration 131 * then the objects will be properly aligned in SMP configurations. 132 */ 133 #define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\ 134 sizeof(struct __struct), __alignof__(struct __struct),\ 135 (__flags), NULL) 136 137 /* 138 * Common kmalloc functions provided by all allocators 139 */ 140 void * __must_check __krealloc(const void *, size_t, gfp_t); 141 void * __must_check krealloc(const void *, size_t, gfp_t); 142 void kfree(const void *); 143 void kzfree(const void *); 144 size_t ksize(const void *); 145 146 /* 147 * Some archs want to perform DMA into kmalloc caches and need a guaranteed 148 * alignment larger than the alignment of a 64-bit integer. 149 * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that. 150 */ 151 #if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8 152 #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN 153 #define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN 154 #define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN) 155 #else 156 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) 157 #endif 158 159 #ifdef CONFIG_SLOB 160 /* 161 * Common fields provided in kmem_cache by all slab allocators 162 * This struct is either used directly by the allocator (SLOB) 163 * or the allocator must include definitions for all fields 164 * provided in kmem_cache_common in their definition of kmem_cache. 165 * 166 * Once we can do anonymous structs (C11 standard) we could put a 167 * anonymous struct definition in these allocators so that the 168 * separate allocations in the kmem_cache structure of SLAB and 169 * SLUB is no longer needed. 170 */ 171 struct kmem_cache { 172 unsigned int object_size;/* The original size of the object */ 173 unsigned int size; /* The aligned/padded/added on size */ 174 unsigned int align; /* Alignment as calculated */ 175 unsigned long flags; /* Active flags on the slab */ 176 const char *name; /* Slab name for sysfs */ 177 int refcount; /* Use counter */ 178 void (*ctor)(void *); /* Called on object slot creation */ 179 struct list_head list; /* List of all slab caches on the system */ 180 }; 181 182 #endif /* CONFIG_SLOB */ 183 184 /* 185 * Kmalloc array related definitions 186 */ 187 188 #ifdef CONFIG_SLAB 189 /* 190 * The largest kmalloc size supported by the SLAB allocators is 191 * 32 megabyte (2^25) or the maximum allocatable page order if that is 192 * less than 32 MB. 193 * 194 * WARNING: Its not easy to increase this value since the allocators have 195 * to do various tricks to work around compiler limitations in order to 196 * ensure proper constant folding. 197 */ 198 #define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \ 199 (MAX_ORDER + PAGE_SHIFT - 1) : 25) 200 #define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH 201 #ifndef KMALLOC_SHIFT_LOW 202 #define KMALLOC_SHIFT_LOW 5 203 #endif 204 #endif 205 206 #ifdef CONFIG_SLUB 207 /* 208 * SLUB directly allocates requests fitting in to an order-1 page 209 * (PAGE_SIZE*2). Larger requests are passed to the page allocator. 210 */ 211 #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1) 212 #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT) 213 #ifndef KMALLOC_SHIFT_LOW 214 #define KMALLOC_SHIFT_LOW 3 215 #endif 216 #endif 217 218 #ifdef CONFIG_SLOB 219 /* 220 * SLOB passes all requests larger than one page to the page allocator. 221 * No kmalloc array is necessary since objects of different sizes can 222 * be allocated from the same page. 223 */ 224 #define KMALLOC_SHIFT_HIGH PAGE_SHIFT 225 #define KMALLOC_SHIFT_MAX 30 226 #ifndef KMALLOC_SHIFT_LOW 227 #define KMALLOC_SHIFT_LOW 3 228 #endif 229 #endif 230 231 /* Maximum allocatable size */ 232 #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX) 233 /* Maximum size for which we actually use a slab cache */ 234 #define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH) 235 /* Maximum order allocatable via the slab allocagtor */ 236 #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT) 237 238 /* 239 * Kmalloc subsystem. 240 */ 241 #ifndef KMALLOC_MIN_SIZE 242 #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW) 243 #endif 244 245 #ifndef CONFIG_SLOB 246 extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; 247 #ifdef CONFIG_ZONE_DMA 248 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1]; 249 #endif 250 251 /* 252 * Figure out which kmalloc slab an allocation of a certain size 253 * belongs to. 254 * 0 = zero alloc 255 * 1 = 65 .. 96 bytes 256 * 2 = 120 .. 192 bytes 257 * n = 2^(n-1) .. 2^n -1 258 */ 259 static __always_inline int kmalloc_index(size_t size) 260 { 261 if (!size) 262 return 0; 263 264 if (size <= KMALLOC_MIN_SIZE) 265 return KMALLOC_SHIFT_LOW; 266 267 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96) 268 return 1; 269 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192) 270 return 2; 271 if (size <= 8) return 3; 272 if (size <= 16) return 4; 273 if (size <= 32) return 5; 274 if (size <= 64) return 6; 275 if (size <= 128) return 7; 276 if (size <= 256) return 8; 277 if (size <= 512) return 9; 278 if (size <= 1024) return 10; 279 if (size <= 2 * 1024) return 11; 280 if (size <= 4 * 1024) return 12; 281 if (size <= 8 * 1024) return 13; 282 if (size <= 16 * 1024) return 14; 283 if (size <= 32 * 1024) return 15; 284 if (size <= 64 * 1024) return 16; 285 if (size <= 128 * 1024) return 17; 286 if (size <= 256 * 1024) return 18; 287 if (size <= 512 * 1024) return 19; 288 if (size <= 1024 * 1024) return 20; 289 if (size <= 2 * 1024 * 1024) return 21; 290 if (size <= 4 * 1024 * 1024) return 22; 291 if (size <= 8 * 1024 * 1024) return 23; 292 if (size <= 16 * 1024 * 1024) return 24; 293 if (size <= 32 * 1024 * 1024) return 25; 294 if (size <= 64 * 1024 * 1024) return 26; 295 BUG(); 296 297 /* Will never be reached. Needed because the compiler may complain */ 298 return -1; 299 } 300 #endif /* !CONFIG_SLOB */ 301 302 void *__kmalloc(size_t size, gfp_t flags); 303 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags); 304 305 #ifdef CONFIG_NUMA 306 void *__kmalloc_node(size_t size, gfp_t flags, int node); 307 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 308 #else 309 static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node) 310 { 311 return __kmalloc(size, flags); 312 } 313 314 static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) 315 { 316 return kmem_cache_alloc(s, flags); 317 } 318 #endif 319 320 #ifdef CONFIG_TRACING 321 extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t); 322 323 #ifdef CONFIG_NUMA 324 extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, 325 gfp_t gfpflags, 326 int node, size_t size); 327 #else 328 static __always_inline void * 329 kmem_cache_alloc_node_trace(struct kmem_cache *s, 330 gfp_t gfpflags, 331 int node, size_t size) 332 { 333 return kmem_cache_alloc_trace(s, gfpflags, size); 334 } 335 #endif /* CONFIG_NUMA */ 336 337 #else /* CONFIG_TRACING */ 338 static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s, 339 gfp_t flags, size_t size) 340 { 341 return kmem_cache_alloc(s, flags); 342 } 343 344 static __always_inline void * 345 kmem_cache_alloc_node_trace(struct kmem_cache *s, 346 gfp_t gfpflags, 347 int node, size_t size) 348 { 349 return kmem_cache_alloc_node(s, gfpflags, node); 350 } 351 #endif /* CONFIG_TRACING */ 352 353 #ifdef CONFIG_SLAB 354 #include <linux/slab_def.h> 355 #endif 356 357 #ifdef CONFIG_SLUB 358 #include <linux/slub_def.h> 359 #endif 360 361 static __always_inline void * 362 kmalloc_order(size_t size, gfp_t flags, unsigned int order) 363 { 364 void *ret; 365 366 flags |= (__GFP_COMP | __GFP_KMEMCG); 367 ret = (void *) __get_free_pages(flags, order); 368 kmemleak_alloc(ret, size, 1, flags); 369 return ret; 370 } 371 372 #ifdef CONFIG_TRACING 373 extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order); 374 #else 375 static __always_inline void * 376 kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) 377 { 378 return kmalloc_order(size, flags, order); 379 } 380 #endif 381 382 static __always_inline void *kmalloc_large(size_t size, gfp_t flags) 383 { 384 unsigned int order = get_order(size); 385 return kmalloc_order_trace(size, flags, order); 386 } 387 388 /** 389 * kmalloc - allocate memory 390 * @size: how many bytes of memory are required. 391 * @flags: the type of memory to allocate. 392 * 393 * kmalloc is the normal method of allocating memory 394 * for objects smaller than page size in the kernel. 395 * 396 * The @flags argument may be one of: 397 * 398 * %GFP_USER - Allocate memory on behalf of user. May sleep. 399 * 400 * %GFP_KERNEL - Allocate normal kernel ram. May sleep. 401 * 402 * %GFP_ATOMIC - Allocation will not sleep. May use emergency pools. 403 * For example, use this inside interrupt handlers. 404 * 405 * %GFP_HIGHUSER - Allocate pages from high memory. 406 * 407 * %GFP_NOIO - Do not do any I/O at all while trying to get memory. 408 * 409 * %GFP_NOFS - Do not make any fs calls while trying to get memory. 410 * 411 * %GFP_NOWAIT - Allocation will not sleep. 412 * 413 * %__GFP_THISNODE - Allocate node-local memory only. 414 * 415 * %GFP_DMA - Allocation suitable for DMA. 416 * Should only be used for kmalloc() caches. Otherwise, use a 417 * slab created with SLAB_DMA. 418 * 419 * Also it is possible to set different flags by OR'ing 420 * in one or more of the following additional @flags: 421 * 422 * %__GFP_COLD - Request cache-cold pages instead of 423 * trying to return cache-warm pages. 424 * 425 * %__GFP_HIGH - This allocation has high priority and may use emergency pools. 426 * 427 * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail 428 * (think twice before using). 429 * 430 * %__GFP_NORETRY - If memory is not immediately available, 431 * then give up at once. 432 * 433 * %__GFP_NOWARN - If allocation fails, don't issue any warnings. 434 * 435 * %__GFP_REPEAT - If allocation fails initially, try once more before failing. 436 * 437 * There are other flags available as well, but these are not intended 438 * for general use, and so are not documented here. For a full list of 439 * potential flags, always refer to linux/gfp.h. 440 */ 441 static __always_inline void *kmalloc(size_t size, gfp_t flags) 442 { 443 if (__builtin_constant_p(size)) { 444 if (size > KMALLOC_MAX_CACHE_SIZE) 445 return kmalloc_large(size, flags); 446 #ifndef CONFIG_SLOB 447 if (!(flags & GFP_DMA)) { 448 int index = kmalloc_index(size); 449 450 if (!index) 451 return ZERO_SIZE_PTR; 452 453 return kmem_cache_alloc_trace(kmalloc_caches[index], 454 flags, size); 455 } 456 #endif 457 } 458 return __kmalloc(size, flags); 459 } 460 461 /* 462 * Determine size used for the nth kmalloc cache. 463 * return size or 0 if a kmalloc cache for that 464 * size does not exist 465 */ 466 static __always_inline int kmalloc_size(int n) 467 { 468 #ifndef CONFIG_SLOB 469 if (n > 2) 470 return 1 << n; 471 472 if (n == 1 && KMALLOC_MIN_SIZE <= 32) 473 return 96; 474 475 if (n == 2 && KMALLOC_MIN_SIZE <= 64) 476 return 192; 477 #endif 478 return 0; 479 } 480 481 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) 482 { 483 #ifndef CONFIG_SLOB 484 if (__builtin_constant_p(size) && 485 size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) { 486 int i = kmalloc_index(size); 487 488 if (!i) 489 return ZERO_SIZE_PTR; 490 491 return kmem_cache_alloc_node_trace(kmalloc_caches[i], 492 flags, node, size); 493 } 494 #endif 495 return __kmalloc_node(size, flags, node); 496 } 497 498 /* 499 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment. 500 * Intended for arches that get misalignment faults even for 64 bit integer 501 * aligned buffers. 502 */ 503 #ifndef ARCH_SLAB_MINALIGN 504 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) 505 #endif 506 /* 507 * This is the main placeholder for memcg-related information in kmem caches. 508 * struct kmem_cache will hold a pointer to it, so the memory cost while 509 * disabled is 1 pointer. The runtime cost while enabled, gets bigger than it 510 * would otherwise be if that would be bundled in kmem_cache: we'll need an 511 * extra pointer chase. But the trade off clearly lays in favor of not 512 * penalizing non-users. 513 * 514 * Both the root cache and the child caches will have it. For the root cache, 515 * this will hold a dynamically allocated array large enough to hold 516 * information about the currently limited memcgs in the system. To allow the 517 * array to be accessed without taking any locks, on relocation we free the old 518 * version only after a grace period. 519 * 520 * Child caches will hold extra metadata needed for its operation. Fields are: 521 * 522 * @memcg: pointer to the memcg this cache belongs to 523 * @list: list_head for the list of all caches in this memcg 524 * @root_cache: pointer to the global, root cache, this cache was derived from 525 * @dead: set to true after the memcg dies; the cache may still be around. 526 * @nr_pages: number of pages that belongs to this cache. 527 * @destroy: worker to be called whenever we are ready, or believe we may be 528 * ready, to destroy this cache. 529 */ 530 struct memcg_cache_params { 531 bool is_root_cache; 532 union { 533 struct { 534 struct rcu_head rcu_head; 535 struct kmem_cache *memcg_caches[0]; 536 }; 537 struct { 538 struct mem_cgroup *memcg; 539 struct list_head list; 540 struct kmem_cache *root_cache; 541 bool dead; 542 atomic_t nr_pages; 543 struct work_struct destroy; 544 }; 545 }; 546 }; 547 548 int memcg_update_all_caches(int num_memcgs); 549 550 struct seq_file; 551 int cache_show(struct kmem_cache *s, struct seq_file *m); 552 void print_slabinfo_header(struct seq_file *m); 553 554 /** 555 * kmalloc_array - allocate memory for an array. 556 * @n: number of elements. 557 * @size: element size. 558 * @flags: the type of memory to allocate (see kmalloc). 559 */ 560 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) 561 { 562 if (size != 0 && n > SIZE_MAX / size) 563 return NULL; 564 return __kmalloc(n * size, flags); 565 } 566 567 /** 568 * kcalloc - allocate memory for an array. The memory is set to zero. 569 * @n: number of elements. 570 * @size: element size. 571 * @flags: the type of memory to allocate (see kmalloc). 572 */ 573 static inline void *kcalloc(size_t n, size_t size, gfp_t flags) 574 { 575 return kmalloc_array(n, size, flags | __GFP_ZERO); 576 } 577 578 /* 579 * kmalloc_track_caller is a special version of kmalloc that records the 580 * calling function of the routine calling it for slab leak tracking instead 581 * of just the calling function (confusing, eh?). 582 * It's useful when the call to kmalloc comes from a widely-used standard 583 * allocator where we care about the real place the memory allocation 584 * request comes from. 585 */ 586 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \ 587 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \ 588 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING)) 589 extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); 590 #define kmalloc_track_caller(size, flags) \ 591 __kmalloc_track_caller(size, flags, _RET_IP_) 592 #else 593 #define kmalloc_track_caller(size, flags) \ 594 __kmalloc(size, flags) 595 #endif /* DEBUG_SLAB */ 596 597 #ifdef CONFIG_NUMA 598 /* 599 * kmalloc_node_track_caller is a special version of kmalloc_node that 600 * records the calling function of the routine calling it for slab leak 601 * tracking instead of just the calling function (confusing, eh?). 602 * It's useful when the call to kmalloc_node comes from a widely-used 603 * standard allocator where we care about the real place the memory 604 * allocation request comes from. 605 */ 606 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \ 607 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \ 608 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING)) 609 extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long); 610 #define kmalloc_node_track_caller(size, flags, node) \ 611 __kmalloc_node_track_caller(size, flags, node, \ 612 _RET_IP_) 613 #else 614 #define kmalloc_node_track_caller(size, flags, node) \ 615 __kmalloc_node(size, flags, node) 616 #endif 617 618 #else /* CONFIG_NUMA */ 619 620 #define kmalloc_node_track_caller(size, flags, node) \ 621 kmalloc_track_caller(size, flags) 622 623 #endif /* CONFIG_NUMA */ 624 625 /* 626 * Shortcuts 627 */ 628 static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags) 629 { 630 return kmem_cache_alloc(k, flags | __GFP_ZERO); 631 } 632 633 /** 634 * kzalloc - allocate memory. The memory is set to zero. 635 * @size: how many bytes of memory are required. 636 * @flags: the type of memory to allocate (see kmalloc). 637 */ 638 static inline void *kzalloc(size_t size, gfp_t flags) 639 { 640 return kmalloc(size, flags | __GFP_ZERO); 641 } 642 643 /** 644 * kzalloc_node - allocate zeroed memory from a particular memory node. 645 * @size: how many bytes of memory are required. 646 * @flags: the type of memory to allocate (see kmalloc). 647 * @node: memory node from which to allocate 648 */ 649 static inline void *kzalloc_node(size_t size, gfp_t flags, int node) 650 { 651 return kmalloc_node(size, flags | __GFP_ZERO, node); 652 } 653 654 /* 655 * Determine the size of a slab object 656 */ 657 static inline unsigned int kmem_cache_size(struct kmem_cache *s) 658 { 659 return s->object_size; 660 } 661 662 void __init kmem_cache_init_late(void); 663 664 #endif /* _LINUX_SLAB_H */
1 #ifndef __LINUX_SPINLOCK_H 2 #define __LINUX_SPINLOCK_H 3 4 /* 5 * include/linux/spinlock.h - generic spinlock/rwlock declarations 6 * 7 * here's the role of the various spinlock/rwlock related include files: 8 * 9 * on SMP builds: 10 * 11 * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the 12 * initializers 13 * 14 * linux/spinlock_types.h: 15 * defines the generic type and initializers 16 * 17 * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel 18 * implementations, mostly inline assembly code 19 * 20 * (also included on UP-debug builds:) 21 * 22 * linux/spinlock_api_smp.h: 23 * contains the prototypes for the _spin_*() APIs. 24 * 25 * linux/spinlock.h: builds the final spin_*() APIs. 26 * 27 * on UP builds: 28 * 29 * linux/spinlock_type_up.h: 30 * contains the generic, simplified UP spinlock type. 31 * (which is an empty structure on non-debug builds) 32 * 33 * linux/spinlock_types.h: 34 * defines the generic type and initializers 35 * 36 * linux/spinlock_up.h: 37 * contains the arch_spin_*()/etc. version of UP 38 * builds. (which are NOPs on non-debug, non-preempt 39 * builds) 40 * 41 * (included on UP-non-debug builds:) 42 * 43 * linux/spinlock_api_up.h: 44 * builds the _spin_*() APIs. 45 * 46 * linux/spinlock.h: builds the final spin_*() APIs. 47 */ 48 49 #include <linux/typecheck.h> 50 #include <linux/preempt.h> 51 #include <linux/linkage.h> 52 #include <linux/compiler.h> 53 #include <linux/irqflags.h> 54 #include <linux/thread_info.h> 55 #include <linux/kernel.h> 56 #include <linux/stringify.h> 57 #include <linux/bottom_half.h> 58 #include <asm/barrier.h> 59 60 61 /* 62 * Must define these before including other files, inline functions need them 63 */ 64 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME 65 66 #define LOCK_SECTION_START(extra) \ 67 ".subsection 1\n\t" \ 68 extra \ 69 ".ifndef " LOCK_SECTION_NAME "\n\t" \ 70 LOCK_SECTION_NAME ":\n\t" \ 71 ".endif\n" 72 73 #define LOCK_SECTION_END \ 74 ".previous\n\t" 75 76 #define __lockfunc __attribute__((section(".spinlock.text"))) 77 78 /* 79 * Pull the arch_spinlock_t and arch_rwlock_t definitions: 80 */ 81 #include <linux/spinlock_types.h> 82 83 /* 84 * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them): 85 */ 86 #ifdef CONFIG_SMP 87 # include <asm/spinlock.h> 88 #else 89 # include <linux/spinlock_up.h> 90 #endif 91 92 #ifdef CONFIG_DEBUG_SPINLOCK 93 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, 94 struct lock_class_key *key); 95 # define raw_spin_lock_init(lock) \ 96 do { \ 97 static struct lock_class_key __key; \ 98 \ 99 __raw_spin_lock_init((lock), #lock, &__key); \ 100 } while (0) 101 102 #else 103 # define raw_spin_lock_init(lock) \ 104 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0) 105 #endif 106 107 #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) 108 109 #ifdef CONFIG_GENERIC_LOCKBREAK 110 #define raw_spin_is_contended(lock) ((lock)->break_lock) 111 #else 112 113 #ifdef arch_spin_is_contended 114 #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) 115 #else 116 #define raw_spin_is_contended(lock) (((void)(lock), 0)) 117 #endif /*arch_spin_is_contended*/ 118 #endif 119 120 /* 121 * Despite its name it doesn't necessarily has to be a full barrier. 122 * It should only guarantee that a STORE before the critical section 123 * can not be reordered with a LOAD inside this section. 124 * spin_lock() is the one-way barrier, this LOAD can not escape out 125 * of the region. So the default implementation simply ensures that 126 * a STORE can not move into the critical section, smp_wmb() should 127 * serialize it with another STORE done by spin_lock(). 128 */ 129 #ifndef smp_mb__before_spinlock 130 #define smp_mb__before_spinlock() smp_wmb() 131 #endif 132 133 /* 134 * Place this after a lock-acquisition primitive to guarantee that 135 * an UNLOCK+LOCK pair act as a full barrier. This guarantee applies 136 * if the UNLOCK and LOCK are executed by the same CPU or if the 137 * UNLOCK and LOCK operate on the same lock variable. 138 */ 139 #ifndef smp_mb__after_unlock_lock 140 #define smp_mb__after_unlock_lock() do { } while (0) 141 #endif 142 143 /** 144 * raw_spin_unlock_wait - wait until the spinlock gets unlocked 145 * @lock: the spinlock in question. 146 */ 147 #define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock) 148 149 #ifdef CONFIG_DEBUG_SPINLOCK 150 extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); 151 #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock) 152 extern int do_raw_spin_trylock(raw_spinlock_t *lock); 153 extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); 154 #else 155 static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock) 156 { 157 __acquire(lock); 158 arch_spin_lock(&lock->raw_lock); 159 } 160 161 static inline void 162 do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock) 163 { 164 __acquire(lock); 165 arch_spin_lock_flags(&lock->raw_lock, *flags); 166 } 167 168 static inline int do_raw_spin_trylock(raw_spinlock_t *lock) 169 { 170 return arch_spin_trylock(&(lock)->raw_lock); 171 } 172 173 static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) 174 { 175 arch_spin_unlock(&lock->raw_lock); 176 __release(lock); 177 } 178 #endif 179 180 /* 181 * Define the various spin_lock methods. Note we define these 182 * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The 183 * various methods are defined as nops in the case they are not 184 * required. 185 */ 186 #define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock)) 187 188 #define raw_spin_lock(lock) _raw_spin_lock(lock) 189 190 #ifdef CONFIG_DEBUG_LOCK_ALLOC 191 # define raw_spin_lock_nested(lock, subclass) \ 192 _raw_spin_lock_nested(lock, subclass) 193 194 # define raw_spin_lock_nest_lock(lock, nest_lock) \ 195 do { \ 196 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ 197 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ 198 } while (0) 199 #else 200 # define raw_spin_lock_nested(lock, subclass) _raw_spin_lock(lock) 201 # define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock) 202 #endif 203 204 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) 205 206 #define raw_spin_lock_irqsave(lock, flags) \ 207 do { \ 208 typecheck(unsigned long, flags); \ 209 flags = _raw_spin_lock_irqsave(lock); \ 210 } while (0) 211 212 #ifdef CONFIG_DEBUG_LOCK_ALLOC 213 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ 214 do { \ 215 typecheck(unsigned long, flags); \ 216 flags = _raw_spin_lock_irqsave_nested(lock, subclass); \ 217 } while (0) 218 #else 219 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ 220 do { \ 221 typecheck(unsigned long, flags); \ 222 flags = _raw_spin_lock_irqsave(lock); \ 223 } while (0) 224 #endif 225 226 #else 227 228 #define raw_spin_lock_irqsave(lock, flags) \ 229 do { \ 230 typecheck(unsigned long, flags); \ 231 _raw_spin_lock_irqsave(lock, flags); \ 232 } while (0) 233 234 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ 235 raw_spin_lock_irqsave(lock, flags) 236 237 #endif 238 239 #define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock) 240 #define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock) 241 #define raw_spin_unlock(lock) _raw_spin_unlock(lock) 242 #define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock) 243 244 #define raw_spin_unlock_irqrestore(lock, flags) \ 245 do { \ 246 typecheck(unsigned long, flags); \ 247 _raw_spin_unlock_irqrestore(lock, flags); \ 248 } while (0) 249 #define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock) 250 251 #define raw_spin_trylock_bh(lock) \ 252 __cond_lock(lock, _raw_spin_trylock_bh(lock)) 253 254 #define raw_spin_trylock_irq(lock) \ 255 ({ \ 256 local_irq_disable(); \ 257 raw_spin_trylock(lock) ? \ 258 1 : ({ local_irq_enable(); 0; }); \ 259 }) 260 261 #define raw_spin_trylock_irqsave(lock, flags) \ 262 ({ \ 263 local_irq_save(flags); \ 264 raw_spin_trylock(lock) ? \ 265 1 : ({ local_irq_restore(flags); 0; }); \ 266 }) 267 268 /** 269 * raw_spin_can_lock - would raw_spin_trylock() succeed? 270 * @lock: the spinlock in question. 271 */ 272 #define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock)) 273 274 /* Include rwlock functions */ 275 #include <linux/rwlock.h> 276 277 /* 278 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: 279 */ 280 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) 281 # include <linux/spinlock_api_smp.h> 282 #else 283 # include <linux/spinlock_api_up.h> 284 #endif 285 286 /* 287 * Map the spin_lock functions to the raw variants for PREEMPT_RT=n 288 */ 289 290 static inline raw_spinlock_t *spinlock_check(spinlock_t *lock) 291 { 292 return &lock->rlock; 293 } 294 295 #define spin_lock_init(_lock) \ 296 do { \ 297 spinlock_check(_lock); \ 298 raw_spin_lock_init(&(_lock)->rlock); \ 299 } while (0) 300 301 static inline void spin_lock(spinlock_t *lock) 302 { 303 raw_spin_lock(&lock->rlock); 304 } 305 306 static inline void spin_lock_bh(spinlock_t *lock) 307 { 308 raw_spin_lock_bh(&lock->rlock); 309 } 310 311 static inline int spin_trylock(spinlock_t *lock) 312 { 313 return raw_spin_trylock(&lock->rlock); 314 } 315 316 #define spin_lock_nested(lock, subclass) \ 317 do { \ 318 raw_spin_lock_nested(spinlock_check(lock), subclass); \ 319 } while (0) 320 321 #define spin_lock_nest_lock(lock, nest_lock) \ 322 do { \ 323 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \ 324 } while (0) 325 326 static inline void spin_lock_irq(spinlock_t *lock) 327 { 328 raw_spin_lock_irq(&lock->rlock); 329 } 330 331 #define spin_lock_irqsave(lock, flags) \ 332 do { \ 333 raw_spin_lock_irqsave(spinlock_check(lock), flags); \ 334 } while (0) 335 336 #define spin_lock_irqsave_nested(lock, flags, subclass) \ 337 do { \ 338 raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \ 339 } while (0) 340 341 static inline void spin_unlock(spinlock_t *lock) 342 { 343 raw_spin_unlock(&lock->rlock); 344 } 345 346 static inline void spin_unlock_bh(spinlock_t *lock) 347 { 348 raw_spin_unlock_bh(&lock->rlock); 349 } 350 351 static inline void spin_unlock_irq(spinlock_t *lock) 352 { 353 raw_spin_unlock_irq(&lock->rlock); 354 } 355 356 static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) 357 { 358 raw_spin_unlock_irqrestore(&lock->rlock, flags); 359 } 360 361 static inline int spin_trylock_bh(spinlock_t *lock) 362 { 363 return raw_spin_trylock_bh(&lock->rlock); 364 } 365 366 static inline int spin_trylock_irq(spinlock_t *lock) 367 { 368 return raw_spin_trylock_irq(&lock->rlock); 369 } 370 371 #define spin_trylock_irqsave(lock, flags) \ 372 ({ \ 373 raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ 374 }) 375 376 static inline void spin_unlock_wait(spinlock_t *lock) 377 { 378 raw_spin_unlock_wait(&lock->rlock); 379 } 380 381 static inline int spin_is_locked(spinlock_t *lock) 382 { 383 return raw_spin_is_locked(&lock->rlock); 384 } 385 386 static inline int spin_is_contended(spinlock_t *lock) 387 { 388 return raw_spin_is_contended(&lock->rlock); 389 } 390 391 static inline int spin_can_lock(spinlock_t *lock) 392 { 393 return raw_spin_can_lock(&lock->rlock); 394 } 395 396 #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock) 397 398 /* 399 * Pull the atomic_t declaration: 400 * (asm-mips/atomic.h needs above definitions) 401 */ 402 #include <linux/atomic.h> 403 /** 404 * atomic_dec_and_lock - lock on reaching reference count zero 405 * @atomic: the atomic counter 406 * @lock: the spinlock in question 407 * 408 * Decrements @atomic by 1. If the result is 0, returns true and locks 409 * @lock. Returns false for all other cases. 410 */ 411 extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); 412 #define atomic_dec_and_lock(atomic, lock) \ 413 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) 414 415 #endif /* __LINUX_SPINLOCK_H */
1 #ifndef _LDV_RCV_H_ 2 #define _LDV_RCV_H_ 3 4 /* If expr evaluates to zero, ldv_assert() causes a program to reach the error 5 label like the standard assert(). */ 6 #define ldv_assert(expr) ((expr) ? 0 : ldv_error()) 7 8 /* The error label wrapper. It is used because of some static verifiers (like 9 BLAST) don't accept multiple error labels through a program. */ 10 static inline void ldv_error(void) 11 { 12 LDV_ERROR: goto LDV_ERROR; 13 } 14 15 /* If expr evaluates to zero, ldv_assume() causes an infinite loop that is 16 avoided by verifiers. */ 17 #define ldv_assume(expr) ((expr) ? 0 : ldv_stop()) 18 19 /* Infinite loop, that causes verifiers to skip such paths. */ 20 static inline void ldv_stop(void) { 21 LDV_STOP: goto LDV_STOP; 22 } 23 24 /* Special nondeterministic functions. */ 25 int ldv_undef_int(void); 26 void *ldv_undef_ptr(void); 27 unsigned long ldv_undef_ulong(void); 28 long ldv_undef_long(void); 29 /* Return nondeterministic negative integer number. */ 30 static inline int ldv_undef_int_negative(void) 31 { 32 int ret = ldv_undef_int(); 33 34 ldv_assume(ret < 0); 35 36 return ret; 37 } 38 /* Return nondeterministic nonpositive integer number. */ 39 static inline int ldv_undef_int_nonpositive(void) 40 { 41 int ret = ldv_undef_int(); 42 43 ldv_assume(ret <= 0); 44 45 return ret; 46 } 47 48 /* Add explicit model for __builin_expect GCC function. Without the model a 49 return value will be treated as nondetermined by verifiers. */ 50 static inline long __builtin_expect(long exp, long c) 51 { 52 return exp; 53 } 54 55 /* This function causes the program to exit abnormally. GCC implements this 56 function by using a target-dependent mechanism (such as intentionally executing 57 an illegal instruction) or by calling abort. The mechanism used may vary from 58 release to release so you should not rely on any particular implementation. 59 http://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html */ 60 static inline void __builtin_trap(void) 61 { 62 ldv_assert(0); 63 } 64 65 /* The constant is for simulating an error of ldv_undef_ptr() function. */ 66 #define LDV_PTR_MAX 2012 67 68 #endif /* _LDV_RCV_H_ */

Here is an explanation of a rule violation arisen while checking your driver against a corresponding kernel.

Note that it may be false positive, i.e. there isn't a real error indeed. Please analyze a given error trace and related source code to understand whether there is an error in your driver.

Error trace column contains a path on which the given rule is violated. You can expand/collapse some entity classes by clicking on corresponding checkboxes in a main menu or in an advanced Others menu. Also you can expand/collapse each particular entity by clicking on +/-. In hovering on some entities you can see some tips. Also the error trace is bound with related source code. Line numbers may be shown as links on the left. You can click on them to open corresponding lines in source code.

Source code column contains a content of files related with the error trace. There is source code of your driver (note that there are some LDV modifications at the end), kernel headers and rule model. Tabs show a currently opened file and other available files. In hovering on them you can see full file names. On clicking a corresponding file content will be shown.

Ядро Модуль Правило Верификатор Вердикт Статус Время создания Описание проблемы
linux-3.14.1.tar.xz drivers/usb/host/whci/whci-hcd.ko 331_1a CPAchecker Bug Fixed 2015-11-21 00:44:09 L0211

Комментарий

Reported: 21 Nov 2015

[В начало]