Bug
[В начало]
Ошибка # 81
Показать/спрятать трассу ошибок Error trace
{ 20 typedef unsigned char __u8; 23 typedef unsigned short __u16; 25 typedef int __s32; 26 typedef unsigned int __u32; 30 typedef unsigned long long __u64; 15 typedef signed char s8; 16 typedef unsigned char u8; 19 typedef unsigned short u16; 21 typedef int s32; 22 typedef unsigned int u32; 24 typedef long long s64; 25 typedef unsigned long long u64; 14 typedef long __kernel_long_t; 15 typedef unsigned long __kernel_ulong_t; 27 typedef int __kernel_pid_t; 48 typedef unsigned int __kernel_uid32_t; 49 typedef unsigned int __kernel_gid32_t; 71 typedef __kernel_ulong_t __kernel_size_t; 72 typedef __kernel_long_t __kernel_ssize_t; 87 typedef long long __kernel_loff_t; 88 typedef __kernel_long_t __kernel_time_t; 89 typedef __kernel_long_t __kernel_clock_t; 90 typedef int __kernel_timer_t; 91 typedef int __kernel_clockid_t; 229 struct kernel_symbol { unsigned long value; const char *name; } ; 33 struct module ; 12 typedef __u32 __kernel_dev_t; 15 typedef __kernel_dev_t dev_t; 18 typedef unsigned short umode_t; 21 typedef __kernel_pid_t pid_t; 26 typedef __kernel_clockid_t clockid_t; 29 typedef _Bool bool; 31 typedef __kernel_uid32_t uid_t; 32 typedef __kernel_gid32_t gid_t; 45 typedef __kernel_loff_t loff_t; 54 typedef __kernel_size_t size_t; 59 typedef __kernel_ssize_t ssize_t; 69 typedef __kernel_time_t time_t; 102 typedef __s32 int32_t; 108 typedef __u32 uint32_t; 157 typedef unsigned int gfp_t; 159 typedef unsigned int oom_flags_t; 162 typedef u64 phys_addr_t; 167 typedef phys_addr_t resource_size_t; 173 typedef unsigned long irq_hw_number_t; 177 struct __anonstruct_atomic_t_6 { int counter; } ; 177 typedef struct __anonstruct_atomic_t_6 atomic_t; 182 struct __anonstruct_atomic64_t_7 { long counter; } ; 182 typedef struct __anonstruct_atomic64_t_7 atomic64_t; 183 struct list_head { struct list_head *next; struct list_head *prev; } ; 188 struct hlist_node ; 188 struct hlist_head { struct hlist_node *first; } ; 192 struct hlist_node { struct hlist_node *next; struct hlist_node **pprev; } ; 203 struct callback_head { struct callback_head *next; void (*func)(struct callback_head *); } ; 67 struct pt_regs { unsigned long r15; unsigned long r14; unsigned long r13; unsigned long r12; unsigned long bp; unsigned long bx; unsigned long r11; unsigned long r10; unsigned long r9; unsigned long r8; unsigned long ax; unsigned long cx; unsigned long dx; unsigned long si; unsigned long di; unsigned long orig_ax; unsigned long ip; unsigned long cs; unsigned long flags; unsigned long sp; unsigned long ss; } ; 66 struct __anonstruct____missing_field_name_9 { unsigned int a; unsigned int b; } ; 66 struct __anonstruct____missing_field_name_10 { u16 limit0; u16 base0; unsigned char base1; unsigned char type; unsigned char s; unsigned char dpl; unsigned char p; unsigned char limit; unsigned char avl; unsigned char l; unsigned char d; unsigned char g; unsigned char base2; } ; 66 union __anonunion____missing_field_name_8 { struct __anonstruct____missing_field_name_9 __annonCompField4; struct __anonstruct____missing_field_name_10 __annonCompField5; } ; 66 struct desc_struct { union __anonunion____missing_field_name_8 __annonCompField6; } ; 15 typedef unsigned long pgdval_t; 16 typedef unsigned long pgprotval_t; 20 struct pgprot { pgprotval_t pgprot; } ; 218 typedef struct pgprot pgprot_t; 220 struct __anonstruct_pgd_t_12 { pgdval_t pgd; } ; 220 typedef struct __anonstruct_pgd_t_12 pgd_t; 361 struct page ; 361 typedef struct page *pgtable_t; 372 struct file ; 385 struct seq_file ; 423 struct thread_struct ; 425 struct mm_struct ; 426 struct task_struct ; 427 struct cpumask ; 20 struct qspinlock { atomic_t val; } ; 33 typedef struct qspinlock arch_spinlock_t; 131 typedef void (*ctor_fn_t)(); 234 struct _ddebug { const char *modname; const char *function; const char *filename; const char *format; unsigned int lineno; unsigned char flags; } ; 48 struct device ; 432 struct completion ; 555 struct bug_entry { int bug_addr_disp; int file_disp; unsigned short line; unsigned short flags; } ; 102 struct timespec ; 103 struct compat_timespec ; 104 struct __anonstruct_futex_16 { u32 *uaddr; u32 val; u32 flags; u32 bitset; u64 time; u32 *uaddr2; } ; 104 struct __anonstruct_nanosleep_17 { clockid_t clockid; struct timespec *rmtp; struct compat_timespec *compat_rmtp; u64 expires; } ; 104 struct pollfd ; 104 struct __anonstruct_poll_18 { struct pollfd *ufds; int nfds; int has_timeout; unsigned long tv_sec; unsigned long tv_nsec; } ; 104 union __anonunion____missing_field_name_15 { struct __anonstruct_futex_16 futex; struct __anonstruct_nanosleep_17 nanosleep; struct __anonstruct_poll_18 poll; } ; 104 struct restart_block { long int (*fn)(struct restart_block *); union __anonunion____missing_field_name_15 __annonCompField7; } ; 127 struct kernel_vm86_regs { struct pt_regs pt; unsigned short es; unsigned short __esh; unsigned short ds; unsigned short __dsh; unsigned short fs; unsigned short __fsh; unsigned short gs; unsigned short __gsh; } ; 79 union __anonunion____missing_field_name_19 { struct pt_regs *regs; struct kernel_vm86_regs *vm86; } ; 79 struct math_emu_info { long ___orig_eip; union __anonunion____missing_field_name_19 __annonCompField8; } ; 328 struct cpumask { unsigned long bits[128U]; } ; 15 typedef struct cpumask cpumask_t; 652 typedef struct cpumask *cpumask_var_t; 260 struct fregs_state { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u32 status; } ; 26 struct __anonstruct____missing_field_name_29 { u64 rip; u64 rdp; } ; 26 struct __anonstruct____missing_field_name_30 { u32 fip; u32 fcs; u32 foo; u32 fos; } ; 26 union __anonunion____missing_field_name_28 { struct __anonstruct____missing_field_name_29 __annonCompField12; struct __anonstruct____missing_field_name_30 __annonCompField13; } ; 26 union __anonunion____missing_field_name_31 { u32 padding1[12U]; u32 sw_reserved[12U]; } ; 26 struct fxregs_state { u16 cwd; u16 swd; u16 twd; u16 fop; union __anonunion____missing_field_name_28 __annonCompField14; u32 mxcsr; u32 mxcsr_mask; u32 st_space[32U]; u32 xmm_space[64U]; u32 padding[12U]; union __anonunion____missing_field_name_31 __annonCompField15; } ; 66 struct swregs_state { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u8 ftop; u8 changed; u8 lookahead; u8 no_update; u8 rm; u8 alimit; struct math_emu_info *info; u32 entry_eip; } ; 155 struct xstate_header { u64 xfeatures; u64 xcomp_bv; u64 reserved[6U]; } ; 161 struct xregs_state { struct fxregs_state i387; struct xstate_header header; u8 __reserved[464U]; } ; 179 union fpregs_state { struct fregs_state fsave; struct fxregs_state fxsave; struct swregs_state soft; struct xregs_state xsave; } ; 193 struct fpu { union fpregs_state state; unsigned int last_cpu; unsigned char fpstate_active; unsigned char fpregs_active; unsigned char counter; } ; 170 struct seq_operations ; 369 struct perf_event ; 370 struct thread_struct { struct desc_struct tls_array[3U]; unsigned long sp0; unsigned long sp; unsigned short es; unsigned short ds; unsigned short fsindex; unsigned short gsindex; unsigned long fs; unsigned long gs; struct fpu fpu; struct perf_event *ptrace_bps[4U]; unsigned long debugreg6; unsigned long ptrace_dr7; unsigned long cr2; unsigned long trap_nr; unsigned long error_code; unsigned long *io_bitmap_ptr; unsigned long iopl; unsigned int io_bitmap_max; } ; 23 typedef atomic64_t atomic_long_t; 33 struct lockdep_map ; 55 struct stack_trace { unsigned int nr_entries; unsigned int max_entries; unsigned long *entries; int skip; } ; 28 struct lockdep_subclass_key { char __one_byte; } ; 53 struct lock_class_key { struct lockdep_subclass_key subkeys[8U]; } ; 59 struct lock_class { struct list_head hash_entry; struct list_head lock_entry; struct lockdep_subclass_key *key; unsigned int subclass; unsigned int dep_gen_id; unsigned long usage_mask; struct stack_trace usage_traces[13U]; struct list_head locks_after; struct list_head locks_before; unsigned int version; unsigned long ops; const char *name; int name_version; unsigned long contention_point[4U]; unsigned long contending_point[4U]; } ; 144 struct lockdep_map { struct lock_class_key *key; struct lock_class *class_cache[2U]; const char *name; int cpu; unsigned long ip; } ; 205 struct held_lock { u64 prev_chain_key; unsigned long acquire_ip; struct lockdep_map *instance; struct lockdep_map *nest_lock; u64 waittime_stamp; u64 holdtime_stamp; unsigned short class_idx; unsigned char irq_context; unsigned char trylock; unsigned char read; unsigned char check; unsigned char hardirqs_off; unsigned short references; unsigned int pin_count; } ; 546 struct raw_spinlock { arch_spinlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ; 32 typedef struct raw_spinlock raw_spinlock_t; 33 struct __anonstruct____missing_field_name_35 { u8 __padding[24U]; struct lockdep_map dep_map; } ; 33 union __anonunion____missing_field_name_34 { struct raw_spinlock rlock; struct __anonstruct____missing_field_name_35 __annonCompField17; } ; 33 struct spinlock { union __anonunion____missing_field_name_34 __annonCompField18; } ; 76 typedef struct spinlock spinlock_t; 135 struct seqcount { unsigned int sequence; struct lockdep_map dep_map; } ; 52 typedef struct seqcount seqcount_t; 404 struct __anonstruct_seqlock_t_45 { struct seqcount seqcount; spinlock_t lock; } ; 404 typedef struct __anonstruct_seqlock_t_45 seqlock_t; 598 struct timespec { __kernel_time_t tv_sec; long tv_nsec; } ; 83 struct user_namespace ; 22 struct __anonstruct_kuid_t_46 { uid_t val; } ; 22 typedef struct __anonstruct_kuid_t_46 kuid_t; 27 struct __anonstruct_kgid_t_47 { gid_t val; } ; 27 typedef struct __anonstruct_kgid_t_47 kgid_t; 36 struct vm_area_struct ; 38 struct __wait_queue_head { spinlock_t lock; struct list_head task_list; } ; 43 typedef struct __wait_queue_head wait_queue_head_t; 95 struct __anonstruct_nodemask_t_48 { unsigned long bits[16U]; } ; 95 typedef struct __anonstruct_nodemask_t_48 nodemask_t; 13 struct optimistic_spin_queue { atomic_t tail; } ; 39 struct mutex { atomic_t count; spinlock_t wait_lock; struct list_head wait_list; struct task_struct *owner; void *magic; struct lockdep_map dep_map; } ; 67 struct mutex_waiter { struct list_head list; struct task_struct *task; void *magic; } ; 177 struct rw_semaphore ; 178 struct rw_semaphore { long count; struct list_head wait_list; raw_spinlock_t wait_lock; struct optimistic_spin_queue osq; struct task_struct *owner; struct lockdep_map dep_map; } ; 172 struct completion { unsigned int done; wait_queue_head_t wait; } ; 437 union ktime { s64 tv64; } ; 41 typedef union ktime ktime_t; 1121 struct timer_list { struct hlist_node entry; unsigned long expires; void (*function)(unsigned long); unsigned long data; u32 flags; int slack; int start_pid; void *start_site; char start_comm[16U]; struct lockdep_map lockdep_map; } ; 238 struct hrtimer ; 239 enum hrtimer_restart ; 240 struct rb_node { unsigned long __rb_parent_color; struct rb_node *rb_right; struct rb_node *rb_left; } ; 41 struct rb_root { struct rb_node *rb_node; } ; 838 struct nsproxy ; 259 struct workqueue_struct ; 260 struct work_struct ; 54 struct work_struct { atomic_long_t data; struct list_head entry; void (*func)(struct work_struct *); struct lockdep_map lockdep_map; } ; 107 struct delayed_work { struct work_struct work; struct timer_list timer; struct workqueue_struct *wq; int cpu; } ; 64 struct resource { resource_size_t start; resource_size_t end; const char *name; unsigned long flags; struct resource *parent; struct resource *sibling; struct resource *child; } ; 58 struct pm_message { int event; } ; 64 typedef struct pm_message pm_message_t; 65 struct dev_pm_ops { int (*prepare)(struct device *); void (*complete)(struct device *); int (*suspend)(struct device *); int (*resume)(struct device *); int (*freeze)(struct device *); int (*thaw)(struct device *); int (*poweroff)(struct device *); int (*restore)(struct device *); int (*suspend_late)(struct device *); int (*resume_early)(struct device *); int (*freeze_late)(struct device *); int (*thaw_early)(struct device *); int (*poweroff_late)(struct device *); int (*restore_early)(struct device *); int (*suspend_noirq)(struct device *); int (*resume_noirq)(struct device *); int (*freeze_noirq)(struct device *); int (*thaw_noirq)(struct device *); int (*poweroff_noirq)(struct device *); int (*restore_noirq)(struct device *); int (*runtime_suspend)(struct device *); int (*runtime_resume)(struct device *); int (*runtime_idle)(struct device *); } ; 320 enum rpm_status { RPM_ACTIVE = 0, RPM_RESUMING = 1, RPM_SUSPENDED = 2, RPM_SUSPENDING = 3 } ; 327 enum rpm_request { RPM_REQ_NONE = 0, RPM_REQ_IDLE = 1, RPM_REQ_SUSPEND = 2, RPM_REQ_AUTOSUSPEND = 3, RPM_REQ_RESUME = 4 } ; 335 struct wakeup_source ; 336 struct wake_irq ; 338 struct pm_subsys_data { spinlock_t lock; unsigned int refcount; struct list_head clock_list; } ; 553 struct dev_pm_qos ; 553 struct dev_pm_info { pm_message_t power_state; unsigned char can_wakeup; unsigned char async_suspend; bool is_prepared; bool is_suspended; bool is_noirq_suspended; bool is_late_suspended; bool ignore_children; bool early_init; bool direct_complete; spinlock_t lock; struct list_head entry; struct completion completion; struct wakeup_source *wakeup; bool wakeup_path; bool syscore; struct timer_list suspend_timer; unsigned long timer_expires; struct work_struct work; wait_queue_head_t wait_queue; struct wake_irq *wakeirq; atomic_t usage_count; atomic_t child_count; unsigned char disable_depth; unsigned char idle_notification; unsigned char request_pending; unsigned char deferred_resume; unsigned char run_wake; unsigned char runtime_auto; unsigned char no_callbacks; unsigned char irq_safe; unsigned char use_autosuspend; unsigned char timer_autosuspends; unsigned char memalloc_noio; enum rpm_request request; enum rpm_status runtime_status; int runtime_error; int autosuspend_delay; unsigned long last_busy; unsigned long active_jiffies; unsigned long suspended_jiffies; unsigned long accounting_timestamp; struct pm_subsys_data *subsys_data; void (*set_latency_tolerance)(struct device *, s32 ); struct dev_pm_qos *qos; } ; 615 struct dev_pm_domain { struct dev_pm_ops ops; void (*detach)(struct device *, bool ); int (*activate)(struct device *); void (*sync)(struct device *); void (*dismiss)(struct device *); } ; 24 struct __anonstruct_mm_context_t_115 { void *ldt; int size; unsigned short ia32_compat; struct mutex lock; void *vdso; atomic_t perf_rdpmc_allowed; } ; 24 typedef struct __anonstruct_mm_context_t_115 mm_context_t; 1281 struct llist_node ; 64 struct llist_node { struct llist_node *next; } ; 37 struct cred ; 58 struct arch_uprobe_task { unsigned long saved_scratch_register; unsigned int saved_trap_nr; unsigned int saved_tf; } ; 66 enum uprobe_task_state { UTASK_RUNNING = 0, UTASK_SSTEP = 1, UTASK_SSTEP_ACK = 2, UTASK_SSTEP_TRAPPED = 3 } ; 73 struct __anonstruct____missing_field_name_148 { struct arch_uprobe_task autask; unsigned long vaddr; } ; 73 struct __anonstruct____missing_field_name_149 { struct callback_head dup_xol_work; unsigned long dup_xol_addr; } ; 73 union __anonunion____missing_field_name_147 { struct __anonstruct____missing_field_name_148 __annonCompField33; struct __anonstruct____missing_field_name_149 __annonCompField34; } ; 73 struct uprobe ; 73 struct return_instance ; 73 struct uprobe_task { enum uprobe_task_state state; union __anonunion____missing_field_name_147 __annonCompField35; struct uprobe *active_uprobe; unsigned long xol_vaddr; struct return_instance *return_instances; unsigned int depth; } ; 94 struct xol_area ; 95 struct uprobes_state { struct xol_area *xol_area; } ; 133 struct address_space ; 134 struct mem_cgroup ; 31 typedef void compound_page_dtor(struct page *); 32 union __anonunion____missing_field_name_150 { struct address_space *mapping; void *s_mem; } ; 32 union __anonunion____missing_field_name_152 { unsigned long index; void *freelist; bool pfmemalloc; } ; 32 struct __anonstruct____missing_field_name_156 { unsigned short inuse; unsigned short objects; unsigned char frozen; } ; 32 union __anonunion____missing_field_name_155 { atomic_t _mapcount; struct __anonstruct____missing_field_name_156 __annonCompField38; int units; } ; 32 struct __anonstruct____missing_field_name_154 { union __anonunion____missing_field_name_155 __annonCompField39; atomic_t _count; } ; 32 union __anonunion____missing_field_name_153 { unsigned long counters; struct __anonstruct____missing_field_name_154 __annonCompField40; unsigned int active; } ; 32 struct __anonstruct____missing_field_name_151 { union __anonunion____missing_field_name_152 __annonCompField37; union __anonunion____missing_field_name_153 __annonCompField41; } ; 32 struct __anonstruct____missing_field_name_158 { struct page *next; int pages; int pobjects; } ; 32 struct slab ; 32 struct __anonstruct____missing_field_name_159 { compound_page_dtor *compound_dtor; unsigned long compound_order; } ; 32 union __anonunion____missing_field_name_157 { struct list_head lru; struct __anonstruct____missing_field_name_158 __annonCompField43; struct slab *slab_page; struct callback_head callback_head; struct __anonstruct____missing_field_name_159 __annonCompField44; pgtable_t pmd_huge_pte; } ; 32 struct kmem_cache ; 32 union __anonunion____missing_field_name_160 { unsigned long private; spinlock_t *ptl; struct kmem_cache *slab_cache; struct page *first_page; } ; 32 struct page { unsigned long flags; union __anonunion____missing_field_name_150 __annonCompField36; struct __anonstruct____missing_field_name_151 __annonCompField42; union __anonunion____missing_field_name_157 __annonCompField45; union __anonunion____missing_field_name_160 __annonCompField46; struct mem_cgroup *mem_cgroup; } ; 181 struct page_frag { struct page *page; __u32 offset; __u32 size; } ; 266 struct __anonstruct_shared_161 { struct rb_node rb; unsigned long rb_subtree_last; } ; 266 struct anon_vma ; 266 struct vm_operations_struct ; 266 struct mempolicy ; 266 struct vm_area_struct { unsigned long vm_start; unsigned long vm_end; struct vm_area_struct *vm_next; struct vm_area_struct *vm_prev; struct rb_node vm_rb; unsigned long rb_subtree_gap; struct mm_struct *vm_mm; pgprot_t vm_page_prot; unsigned long vm_flags; struct __anonstruct_shared_161 shared; struct list_head anon_vma_chain; struct anon_vma *anon_vma; const struct vm_operations_struct *vm_ops; unsigned long vm_pgoff; struct file *vm_file; void *vm_private_data; struct mempolicy *vm_policy; } ; 334 struct core_thread { struct task_struct *task; struct core_thread *next; } ; 340 struct core_state { atomic_t nr_threads; struct core_thread dumper; struct completion startup; } ; 353 struct task_rss_stat { int events; int count[3U]; } ; 361 struct mm_rss_stat { atomic_long_t count[3U]; } ; 366 struct kioctx_table ; 367 struct linux_binfmt ; 367 struct mmu_notifier_mm ; 367 struct mm_struct { struct vm_area_struct *mmap; struct rb_root mm_rb; u32 vmacache_seqnum; unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); unsigned long mmap_base; unsigned long mmap_legacy_base; unsigned long task_size; unsigned long highest_vm_end; pgd_t *pgd; atomic_t mm_users; atomic_t mm_count; atomic_long_t nr_ptes; atomic_long_t nr_pmds; int map_count; spinlock_t page_table_lock; struct rw_semaphore mmap_sem; struct list_head mmlist; unsigned long hiwater_rss; unsigned long hiwater_vm; unsigned long total_vm; unsigned long locked_vm; unsigned long pinned_vm; unsigned long shared_vm; unsigned long exec_vm; unsigned long stack_vm; unsigned long def_flags; unsigned long start_code; unsigned long end_code; unsigned long start_data; unsigned long end_data; unsigned long start_brk; unsigned long brk; unsigned long start_stack; unsigned long arg_start; unsigned long arg_end; unsigned long env_start; unsigned long env_end; unsigned long saved_auxv[46U]; struct mm_rss_stat rss_stat; struct linux_binfmt *binfmt; cpumask_var_t cpu_vm_mask_var; mm_context_t context; unsigned long flags; struct core_state *core_state; spinlock_t ioctx_lock; struct kioctx_table *ioctx_table; struct task_struct *owner; struct file *exe_file; struct mmu_notifier_mm *mmu_notifier_mm; struct cpumask cpumask_allocation; unsigned long numa_next_scan; unsigned long numa_scan_offset; int numa_scan_seq; bool tlb_flush_pending; struct uprobes_state uprobes_state; void *bd_addr; } ; 15 typedef __u64 Elf64_Addr; 16 typedef __u16 Elf64_Half; 20 typedef __u32 Elf64_Word; 21 typedef __u64 Elf64_Xword; 190 struct elf64_sym { Elf64_Word st_name; unsigned char st_info; unsigned char st_other; Elf64_Half st_shndx; Elf64_Addr st_value; Elf64_Xword st_size; } ; 198 typedef struct elf64_sym Elf64_Sym; 53 union __anonunion____missing_field_name_166 { unsigned long bitmap[4U]; struct callback_head callback_head; } ; 53 struct idr_layer { int prefix; int layer; struct idr_layer *ary[256U]; int count; union __anonunion____missing_field_name_166 __annonCompField47; } ; 41 struct idr { struct idr_layer *hint; struct idr_layer *top; int layers; int cur; spinlock_t lock; int id_free_cnt; struct idr_layer *id_free; } ; 124 struct ida_bitmap { long nr_busy; unsigned long bitmap[15U]; } ; 153 struct ida { struct idr idr; struct ida_bitmap *free_bitmap; } ; 189 struct kernfs_open_node ; 190 struct kernfs_iattrs ; 213 struct kernfs_root ; 213 struct kernfs_elem_dir { unsigned long subdirs; struct rb_root children; struct kernfs_root *root; } ; 85 struct kernfs_node ; 85 struct kernfs_elem_symlink { struct kernfs_node *target_kn; } ; 89 struct kernfs_ops ; 89 struct kernfs_elem_attr { const struct kernfs_ops *ops; struct kernfs_open_node *open; loff_t size; struct kernfs_node *notify_next; } ; 96 union __anonunion____missing_field_name_171 { struct kernfs_elem_dir dir; struct kernfs_elem_symlink symlink; struct kernfs_elem_attr attr; } ; 96 struct kernfs_node { atomic_t count; atomic_t active; struct lockdep_map dep_map; struct kernfs_node *parent; const char *name; struct rb_node rb; const void *ns; unsigned int hash; union __anonunion____missing_field_name_171 __annonCompField48; void *priv; unsigned short flags; umode_t mode; unsigned int ino; struct kernfs_iattrs *iattr; } ; 138 struct kernfs_syscall_ops { int (*remount_fs)(struct kernfs_root *, int *, char *); int (*show_options)(struct seq_file *, struct kernfs_root *); int (*mkdir)(struct kernfs_node *, const char *, umode_t ); int (*rmdir)(struct kernfs_node *); int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *); } ; 155 struct kernfs_root { struct kernfs_node *kn; unsigned int flags; struct ida ino_ida; struct kernfs_syscall_ops *syscall_ops; struct list_head supers; wait_queue_head_t deactivate_waitq; } ; 171 struct kernfs_open_file { struct kernfs_node *kn; struct file *file; void *priv; struct mutex mutex; int event; struct list_head list; char *prealloc_buf; size_t atomic_write_len; bool mmapped; const struct vm_operations_struct *vm_ops; } ; 188 struct kernfs_ops { int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); ssize_t (*read)(struct kernfs_open_file *, char *, size_t , loff_t ); size_t atomic_write_len; bool prealloc; ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *); struct lock_class_key lockdep_key; } ; 477 struct sock ; 478 struct kobject ; 479 enum kobj_ns_type { KOBJ_NS_TYPE_NONE = 0, KOBJ_NS_TYPE_NET = 1, KOBJ_NS_TYPES = 2 } ; 485 struct kobj_ns_type_operations { enum kobj_ns_type type; bool (*current_may_mount)(); void * (*grab_current_ns)(); const void * (*netlink_ns)(struct sock *); const void * (*initial_ns)(); void (*drop_ns)(void *); } ; 59 struct bin_attribute ; 60 struct attribute { const char *name; umode_t mode; bool ignore_lockdep; struct lock_class_key *key; struct lock_class_key skey; } ; 37 struct attribute_group { const char *name; umode_t (*is_visible)(struct kobject *, struct attribute *, int); struct attribute **attrs; struct bin_attribute **bin_attrs; } ; 82 struct bin_attribute { struct attribute attr; size_t size; void *private; ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); int (*mmap)(struct file *, struct kobject *, struct bin_attribute *, struct vm_area_struct *); } ; 155 struct sysfs_ops { ssize_t (*show)(struct kobject *, struct attribute *, char *); ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t ); } ; 509 struct kref { atomic_t refcount; } ; 52 struct kset ; 52 struct kobj_type ; 52 struct kobject { const char *name; struct list_head entry; struct kobject *parent; struct kset *kset; struct kobj_type *ktype; struct kernfs_node *sd; struct kref kref; struct delayed_work release; unsigned char state_initialized; unsigned char state_in_sysfs; unsigned char state_add_uevent_sent; unsigned char state_remove_uevent_sent; unsigned char uevent_suppress; } ; 114 struct kobj_type { void (*release)(struct kobject *); const struct sysfs_ops *sysfs_ops; struct attribute **default_attrs; const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject *); const void * (*namespace)(struct kobject *); } ; 122 struct kobj_uevent_env { char *argv[3U]; char *envp[32U]; int envp_idx; char buf[2048U]; int buflen; } ; 130 struct kset_uevent_ops { const int (*filter)(struct kset *, struct kobject *); const const char * (*name)(struct kset *, struct kobject *); const int (*uevent)(struct kset *, struct kobject *, struct kobj_uevent_env *); } ; 147 struct kset { struct list_head list; spinlock_t list_lock; struct kobject kobj; const struct kset_uevent_ops *uevent_ops; } ; 222 struct kernel_param ; 227 struct kernel_param_ops { unsigned int flags; int (*set)(const char *, const struct kernel_param *); int (*get)(char *, const struct kernel_param *); void (*free)(void *); } ; 62 struct kparam_string ; 62 struct kparam_array ; 62 union __anonunion____missing_field_name_172 { void *arg; const struct kparam_string *str; const struct kparam_array *arr; } ; 62 struct kernel_param { const char *name; struct module *mod; const struct kernel_param_ops *ops; const u16 perm; s8 level; u8 flags; union __anonunion____missing_field_name_172 __annonCompField49; } ; 83 struct kparam_string { unsigned int maxlen; char *string; } ; 89 struct kparam_array { unsigned int max; unsigned int elemsize; unsigned int *num; const struct kernel_param_ops *ops; void *elem; } ; 469 struct latch_tree_node { struct rb_node node[2U]; } ; 211 struct mod_arch_specific { } ; 37 struct module_param_attrs ; 37 struct module_kobject { struct kobject kobj; struct module *mod; struct kobject *drivers_dir; struct module_param_attrs *mp; struct completion *kobj_completion; } ; 47 struct module_attribute { struct attribute attr; ssize_t (*show)(struct module_attribute *, struct module_kobject *, char *); ssize_t (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t ); void (*setup)(struct module *, const char *); int (*test)(struct module *); void (*free)(struct module *); } ; 73 struct exception_table_entry ; 206 enum module_state { MODULE_STATE_LIVE = 0, MODULE_STATE_COMING = 1, MODULE_STATE_GOING = 2, MODULE_STATE_UNFORMED = 3 } ; 213 struct mod_tree_node { struct module *mod; struct latch_tree_node node; } ; 220 struct module_sect_attrs ; 220 struct module_notes_attrs ; 220 struct tracepoint ; 220 struct trace_event_call ; 220 struct trace_enum_map ; 220 struct module { enum module_state state; struct list_head list; char name[56U]; struct module_kobject mkobj; struct module_attribute *modinfo_attrs; const char *version; const char *srcversion; struct kobject *holders_dir; const struct kernel_symbol *syms; const unsigned long *crcs; unsigned int num_syms; struct mutex param_lock; struct kernel_param *kp; unsigned int num_kp; unsigned int num_gpl_syms; const struct kernel_symbol *gpl_syms; const unsigned long *gpl_crcs; const struct kernel_symbol *unused_syms; const unsigned long *unused_crcs; unsigned int num_unused_syms; unsigned int num_unused_gpl_syms; const struct kernel_symbol *unused_gpl_syms; const unsigned long *unused_gpl_crcs; bool sig_ok; bool async_probe_requested; const struct kernel_symbol *gpl_future_syms; const unsigned long *gpl_future_crcs; unsigned int num_gpl_future_syms; unsigned int num_exentries; struct exception_table_entry *extable; int (*init)(); void *module_init; void *module_core; unsigned int init_size; unsigned int core_size; unsigned int init_text_size; unsigned int core_text_size; struct mod_tree_node mtn_core; struct mod_tree_node mtn_init; unsigned int init_ro_size; unsigned int core_ro_size; struct mod_arch_specific arch; unsigned int taints; unsigned int num_bugs; struct list_head bug_list; struct bug_entry *bug_table; Elf64_Sym *symtab; Elf64_Sym *core_symtab; unsigned int num_symtab; unsigned int core_num_syms; char *strtab; char *core_strtab; struct module_sect_attrs *sect_attrs; struct module_notes_attrs *notes_attrs; char *args; void *percpu; unsigned int percpu_size; unsigned int num_tracepoints; const struct tracepoint **tracepoints_ptrs; unsigned int num_trace_bprintk_fmt; const char **trace_bprintk_fmt_start; struct trace_event_call **trace_events; unsigned int num_trace_events; struct trace_enum_map **trace_enums; unsigned int num_trace_enums; bool klp_alive; struct list_head source_list; struct list_head target_list; void (*exit)(); atomic_t refcnt; ctor_fn_t (**ctors)(); unsigned int num_ctors; } ; 13 typedef unsigned long kernel_ulong_t; 186 struct acpi_device_id { __u8 id[9U]; kernel_ulong_t driver_data; } ; 219 struct of_device_id { char name[32U]; char type[32U]; char compatible[128U]; const void *data; } ; 474 struct platform_device_id { char name[20U]; kernel_ulong_t driver_data; } ; 650 enum fwnode_type { FWNODE_INVALID = 0, FWNODE_OF = 1, FWNODE_ACPI = 2, FWNODE_PDATA = 3 } ; 657 struct fwnode_handle { enum fwnode_type type; struct fwnode_handle *secondary; } ; 32 typedef u32 phandle; 34 struct property { char *name; int length; void *value; struct property *next; unsigned long _flags; unsigned int unique_id; struct bin_attribute attr; } ; 44 struct device_node { const char *name; const char *type; phandle phandle; const char *full_name; struct fwnode_handle fwnode; struct property *properties; struct property *deadprops; struct device_node *parent; struct device_node *child; struct device_node *sibling; struct kobject kobj; unsigned long _flags; void *data; } ; 65 struct of_phandle_args { struct device_node *np; int args_count; uint32_t args[16U]; } ; 44 struct irq_desc ; 45 struct irq_data ; 13 enum irqreturn { IRQ_NONE = 0, IRQ_HANDLED = 1, IRQ_WAKE_THREAD = 2 } ; 16 typedef enum irqreturn irqreturn_t; 30 struct msi_msg ; 31 enum irqchip_irq_state ; 62 struct msi_desc ; 63 struct irq_domain ; 64 struct irq_common_data { unsigned int state_use_accessors; } ; 136 struct irq_chip ; 136 struct irq_data { u32 mask; unsigned int irq; unsigned long hwirq; unsigned int node; struct irq_common_data *common; struct irq_chip *chip; struct irq_domain *domain; struct irq_data *parent_data; void *handler_data; void *chip_data; struct msi_desc *msi_desc; cpumask_var_t affinity; } ; 305 struct irq_chip { const char *name; unsigned int (*irq_startup)(struct irq_data *); void (*irq_shutdown)(struct irq_data *); void (*irq_enable)(struct irq_data *); void (*irq_disable)(struct irq_data *); void (*irq_ack)(struct irq_data *); void (*irq_mask)(struct irq_data *); void (*irq_mask_ack)(struct irq_data *); void (*irq_unmask)(struct irq_data *); void (*irq_eoi)(struct irq_data *); int (*irq_set_affinity)(struct irq_data *, const struct cpumask *, bool ); int (*irq_retrigger)(struct irq_data *); int (*irq_set_type)(struct irq_data *, unsigned int); int (*irq_set_wake)(struct irq_data *, unsigned int); void (*irq_bus_lock)(struct irq_data *); void (*irq_bus_sync_unlock)(struct irq_data *); void (*irq_cpu_online)(struct irq_data *); void (*irq_cpu_offline)(struct irq_data *); void (*irq_suspend)(struct irq_data *); void (*irq_resume)(struct irq_data *); void (*irq_pm_shutdown)(struct irq_data *); void (*irq_calc_mask)(struct irq_data *); void (*irq_print_chip)(struct irq_data *, struct seq_file *); int (*irq_request_resources)(struct irq_data *); void (*irq_release_resources)(struct irq_data *); void (*irq_compose_msi_msg)(struct irq_data *, struct msi_msg *); void (*irq_write_msi_msg)(struct irq_data *, struct msi_msg *); int (*irq_get_irqchip_state)(struct irq_data *, enum irqchip_irq_state , bool *); int (*irq_set_irqchip_state)(struct irq_data *, enum irqchip_irq_state , bool ); int (*irq_set_vcpu_affinity)(struct irq_data *, void *); unsigned long flags; } ; 397 struct irq_affinity_notify ; 398 struct proc_dir_entry ; 399 struct irqaction ; 399 struct irq_desc { struct irq_common_data irq_common_data; struct irq_data irq_data; unsigned int *kstat_irqs; void (*handle_irq)(unsigned int, struct irq_desc *); struct irqaction *action; unsigned int status_use_accessors; unsigned int core_internal_state__do_not_mess_with_it; unsigned int depth; unsigned int wake_depth; unsigned int irq_count; unsigned long last_unhandled; unsigned int irqs_unhandled; atomic_t threads_handled; int threads_handled_last; raw_spinlock_t lock; struct cpumask *percpu_enabled; const struct cpumask *affinity_hint; struct irq_affinity_notify *affinity_notify; cpumask_var_t pending_mask; unsigned long threads_oneshot; atomic_t threads_active; wait_queue_head_t wait_for_threads; unsigned int nr_actions; unsigned int no_suspend_depth; unsigned int cond_suspend_depth; unsigned int force_resume_depth; struct proc_dir_entry *dir; int parent_irq; struct module *owner; const char *name; } ; 62 struct exception_table_entry { int insn; int fixup; } ; 696 struct irq_chip_regs { unsigned long enable; unsigned long disable; unsigned long mask; unsigned long ack; unsigned long eoi; unsigned long type; unsigned long polarity; } ; 735 struct irq_chip_type { struct irq_chip chip; struct irq_chip_regs regs; void (*handler)(unsigned int, struct irq_desc *); u32 type; u32 mask_cache_priv; u32 *mask_cache; } ; 757 struct irq_chip_generic { raw_spinlock_t lock; void *reg_base; u32 (*reg_readl)(void *); void (*reg_writel)(u32 , void *); unsigned int irq_base; unsigned int irq_cnt; u32 mask_cache; u32 type_cache; u32 polarity_cache; u32 wake_enabled; u32 wake_active; unsigned int num_ct; void *private; unsigned long installed; unsigned long unused; struct irq_domain *domain; struct list_head list; struct irq_chip_type chip_types[0U]; } ; 805 enum irq_gc_flags { IRQ_GC_INIT_MASK_CACHE = 1, IRQ_GC_INIT_NESTED_LOCK = 2, IRQ_GC_MASK_CACHE_PER_TYPE = 4, IRQ_GC_NO_MASK = 8, IRQ_GC_BE_IO = 16 } ; 813 struct irq_domain_chip_generic { unsigned int irqs_per_chip; unsigned int num_chips; unsigned int irq_flags_to_clear; unsigned int irq_flags_to_set; enum irq_gc_flags gc_flags; struct irq_chip_generic *gc[0U]; } ; 58 struct __anonstruct____missing_field_name_191 { struct radix_tree_node *parent; void *private_data; } ; 58 union __anonunion____missing_field_name_190 { struct __anonstruct____missing_field_name_191 __annonCompField57; struct callback_head callback_head; } ; 58 struct radix_tree_node { unsigned int path; unsigned int count; union __anonunion____missing_field_name_190 __annonCompField58; struct list_head private_list; void *slots[64U]; unsigned long tags[3U][1U]; } ; 105 struct radix_tree_root { unsigned int height; gfp_t gfp_mask; struct radix_tree_node *rnode; } ; 428 struct irq_domain_ops { int (*match)(struct irq_domain *, struct device_node *); int (*map)(struct irq_domain *, unsigned int, irq_hw_number_t ); void (*unmap)(struct irq_domain *, unsigned int); int (*xlate)(struct irq_domain *, struct device_node *, const u32 *, unsigned int, unsigned long *, unsigned int *); int (*alloc)(struct irq_domain *, unsigned int, unsigned int, void *); void (*free)(struct irq_domain *, unsigned int, unsigned int); void (*activate)(struct irq_domain *, struct irq_data *); void (*deactivate)(struct irq_domain *, struct irq_data *); } ; 83 struct irq_domain { struct list_head link; const char *name; const struct irq_domain_ops *ops; void *host_data; unsigned int flags; struct device_node *of_node; struct irq_domain_chip_generic *gc; struct irq_domain *parent; irq_hw_number_t hwirq_max; unsigned int revmap_direct_max_irq; unsigned int revmap_size; struct radix_tree_root revmap_tree; unsigned int linear_revmap[]; } ; 308 struct seq_file { char *buf; size_t size; size_t from; size_t count; size_t pad_until; loff_t index; loff_t read_pos; u64 version; struct mutex lock; const struct seq_operations *op; int poll_event; struct user_namespace *user_ns; void *private; } ; 35 struct seq_operations { void * (*start)(struct seq_file *, loff_t *); void (*stop)(struct seq_file *, void *); void * (*next)(struct seq_file *, void *, loff_t *); int (*show)(struct seq_file *, void *); } ; 192 struct gpio_chip ; 178 struct gpio_desc ; 179 struct gpio_chip { const char *label; struct device *dev; struct device *cdev; struct module *owner; struct list_head list; int (*request)(struct gpio_chip *, unsigned int); void (*free)(struct gpio_chip *, unsigned int); int (*get_direction)(struct gpio_chip *, unsigned int); int (*direction_input)(struct gpio_chip *, unsigned int); int (*direction_output)(struct gpio_chip *, unsigned int, int); int (*get)(struct gpio_chip *, unsigned int); void (*set)(struct gpio_chip *, unsigned int, int); void (*set_multiple)(struct gpio_chip *, unsigned long *, unsigned long *); int (*set_debounce)(struct gpio_chip *, unsigned int, unsigned int); int (*to_irq)(struct gpio_chip *, unsigned int); void (*dbg_show)(struct seq_file *, struct gpio_chip *); int base; u16 ngpio; struct gpio_desc *desc; const const char **names; bool can_sleep; bool irq_not_threaded; struct irq_chip *irqchip; struct irq_domain *irqdomain; unsigned int irq_base; void (*irq_handler)(unsigned int, struct irq_desc *); unsigned int irq_default_type; int irq_parent; struct device_node *of_node; int of_gpio_n_cells; int (*of_xlate)(struct gpio_chip *, const struct of_phandle_args *, u32 *); struct list_head pin_ranges; } ; 153 struct klist_node ; 37 struct klist_node { void *n_klist; struct list_head n_node; struct kref n_ref; } ; 67 struct pinctrl ; 68 struct pinctrl_state ; 194 struct dev_pin_info { struct pinctrl *p; struct pinctrl_state *default_state; struct pinctrl_state *sleep_state; struct pinctrl_state *idle_state; } ; 48 struct dma_map_ops ; 48 struct dev_archdata { struct dma_map_ops *dma_ops; void *iommu; } ; 11 struct pdev_archdata { } ; 14 struct device_private ; 15 struct device_driver ; 16 struct driver_private ; 17 struct class ; 18 struct subsys_private ; 19 struct bus_type ; 20 struct iommu_ops ; 21 struct iommu_group ; 61 struct device_attribute ; 61 struct bus_type { const char *name; const char *dev_name; struct device *dev_root; struct device_attribute *dev_attrs; const struct attribute_group **bus_groups; const struct attribute_group **dev_groups; const struct attribute_group **drv_groups; int (*match)(struct device *, struct device_driver *); int (*uevent)(struct device *, struct kobj_uevent_env *); int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*online)(struct device *); int (*offline)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct dev_pm_ops *pm; const struct iommu_ops *iommu_ops; struct subsys_private *p; struct lock_class_key lock_key; } ; 139 struct device_type ; 197 enum probe_type { PROBE_DEFAULT_STRATEGY = 0, PROBE_PREFER_ASYNCHRONOUS = 1, PROBE_FORCE_SYNCHRONOUS = 2 } ; 203 struct device_driver { const char *name; struct bus_type *bus; struct module *owner; const char *mod_name; bool suppress_bind_attrs; enum probe_type probe_type; const struct of_device_id *of_match_table; const struct acpi_device_id *acpi_match_table; int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct attribute_group **groups; const struct dev_pm_ops *pm; struct driver_private *p; } ; 353 struct class_attribute ; 353 struct class { const char *name; struct module *owner; struct class_attribute *class_attrs; const struct attribute_group **dev_groups; struct kobject *dev_kobj; int (*dev_uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *); void (*class_release)(struct class *); void (*dev_release)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct kobj_ns_type_operations *ns_type; const void * (*namespace)(struct device *); const struct dev_pm_ops *pm; struct subsys_private *p; } ; 446 struct class_attribute { struct attribute attr; ssize_t (*show)(struct class *, struct class_attribute *, char *); ssize_t (*store)(struct class *, struct class_attribute *, const char *, size_t ); } ; 514 struct device_type { const char *name; const struct attribute_group **groups; int (*uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *, kuid_t *, kgid_t *); void (*release)(struct device *); const struct dev_pm_ops *pm; } ; 542 struct device_attribute { struct attribute attr; ssize_t (*show)(struct device *, struct device_attribute *, char *); ssize_t (*store)(struct device *, struct device_attribute *, const char *, size_t ); } ; 674 struct device_dma_parameters { unsigned int max_segment_size; unsigned long segment_boundary_mask; } ; 683 struct dma_coherent_mem ; 683 struct cma ; 683 struct device { struct device *parent; struct device_private *p; struct kobject kobj; const char *init_name; const struct device_type *type; struct mutex mutex; struct bus_type *bus; struct device_driver *driver; void *platform_data; void *driver_data; struct dev_pm_info power; struct dev_pm_domain *pm_domain; struct dev_pin_info *pins; int numa_node; u64 *dma_mask; u64 coherent_dma_mask; unsigned long dma_pfn_offset; struct device_dma_parameters *dma_parms; struct list_head dma_pools; struct dma_coherent_mem *dma_mem; struct cma *cma_area; struct dev_archdata archdata; struct device_node *of_node; struct fwnode_handle *fwnode; dev_t devt; u32 id; spinlock_t devres_lock; struct list_head devres_head; struct klist_node knode_class; struct class *class; const struct attribute_group **groups; void (*release)(struct device *); struct iommu_group *iommu_group; bool offline_disabled; bool offline; } ; 829 struct wakeup_source { const char *name; struct list_head entry; spinlock_t lock; struct wake_irq *wakeirq; struct timer_list timer; unsigned long timer_expires; ktime_t total_time; ktime_t max_time; ktime_t last_time; ktime_t start_prevent_time; ktime_t prevent_sleep_time; unsigned long event_count; unsigned long active_count; unsigned long relax_count; unsigned long expire_count; unsigned long wakeup_count; bool active; bool autosleep_enabled; } ; 292 struct mfd_cell ; 293 struct platform_device { const char *name; int id; bool id_auto; struct device dev; u32 num_resources; struct resource *resource; const struct platform_device_id *id_entry; char *driver_override; struct mfd_cell *mfd_cell; struct pdev_archdata archdata; } ; 22 struct kernel_cap_struct { __u32 cap[2U]; } ; 25 typedef struct kernel_cap_struct kernel_cap_t; 84 struct plist_node { int prio; struct list_head prio_list; struct list_head node_list; } ; 4 typedef unsigned long cputime_t; 25 struct sem_undo_list ; 25 struct sysv_sem { struct sem_undo_list *undo_list; } ; 78 struct user_struct ; 26 struct sysv_shm { struct list_head shm_clist; } ; 24 struct __anonstruct_sigset_t_195 { unsigned long sig[1U]; } ; 24 typedef struct __anonstruct_sigset_t_195 sigset_t; 25 struct siginfo ; 17 typedef void __signalfn_t(int); 18 typedef __signalfn_t *__sighandler_t; 20 typedef void __restorefn_t(); 21 typedef __restorefn_t *__sigrestore_t; 34 union sigval { int sival_int; void *sival_ptr; } ; 10 typedef union sigval sigval_t; 11 struct __anonstruct__kill_197 { __kernel_pid_t _pid; __kernel_uid32_t _uid; } ; 11 struct __anonstruct__timer_198 { __kernel_timer_t _tid; int _overrun; char _pad[0U]; sigval_t _sigval; int _sys_private; } ; 11 struct __anonstruct__rt_199 { __kernel_pid_t _pid; __kernel_uid32_t _uid; sigval_t _sigval; } ; 11 struct __anonstruct__sigchld_200 { __kernel_pid_t _pid; __kernel_uid32_t _uid; int _status; __kernel_clock_t _utime; __kernel_clock_t _stime; } ; 11 struct __anonstruct__addr_bnd_202 { void *_lower; void *_upper; } ; 11 struct __anonstruct__sigfault_201 { void *_addr; short _addr_lsb; struct __anonstruct__addr_bnd_202 _addr_bnd; } ; 11 struct __anonstruct__sigpoll_203 { long _band; int _fd; } ; 11 struct __anonstruct__sigsys_204 { void *_call_addr; int _syscall; unsigned int _arch; } ; 11 union __anonunion__sifields_196 { int _pad[28U]; struct __anonstruct__kill_197 _kill; struct __anonstruct__timer_198 _timer; struct __anonstruct__rt_199 _rt; struct __anonstruct__sigchld_200 _sigchld; struct __anonstruct__sigfault_201 _sigfault; struct __anonstruct__sigpoll_203 _sigpoll; struct __anonstruct__sigsys_204 _sigsys; } ; 11 struct siginfo { int si_signo; int si_errno; int si_code; union __anonunion__sifields_196 _sifields; } ; 113 typedef struct siginfo siginfo_t; 22 struct sigpending { struct list_head list; sigset_t signal; } ; 243 struct sigaction { __sighandler_t sa_handler; unsigned long sa_flags; __sigrestore_t sa_restorer; sigset_t sa_mask; } ; 257 struct k_sigaction { struct sigaction sa; } ; 450 struct pid_namespace ; 450 struct upid { int nr; struct pid_namespace *ns; struct hlist_node pid_chain; } ; 56 struct pid { atomic_t count; unsigned int level; struct hlist_head tasks[3U]; struct callback_head rcu; struct upid numbers[1U]; } ; 68 struct pid_link { struct hlist_node node; struct pid *pid; } ; 53 struct seccomp_filter ; 54 struct seccomp { int mode; struct seccomp_filter *filter; } ; 40 struct rt_mutex_waiter ; 41 struct rlimit { __kernel_ulong_t rlim_cur; __kernel_ulong_t rlim_max; } ; 11 struct timerqueue_node { struct rb_node node; ktime_t expires; } ; 12 struct timerqueue_head { struct rb_root head; struct timerqueue_node *next; } ; 50 struct hrtimer_clock_base ; 51 struct hrtimer_cpu_base ; 60 enum hrtimer_restart { HRTIMER_NORESTART = 0, HRTIMER_RESTART = 1 } ; 65 struct hrtimer { struct timerqueue_node node; ktime_t _softexpires; enum hrtimer_restart (*function)(struct hrtimer *); struct hrtimer_clock_base *base; unsigned long state; int start_pid; void *start_site; char start_comm[16U]; } ; 123 struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base; int index; clockid_t clockid; struct timerqueue_head active; ktime_t (*get_time)(); ktime_t offset; } ; 156 struct hrtimer_cpu_base { raw_spinlock_t lock; seqcount_t seq; struct hrtimer *running; unsigned int cpu; unsigned int active_bases; unsigned int clock_was_set_seq; bool migration_enabled; bool nohz_active; unsigned char in_hrtirq; unsigned char hres_active; unsigned char hang_detected; ktime_t expires_next; struct hrtimer *next_timer; unsigned int nr_events; unsigned int nr_retries; unsigned int nr_hangs; unsigned int max_hang_time; struct hrtimer_clock_base clock_base[4U]; } ; 466 struct task_io_accounting { u64 rchar; u64 wchar; u64 syscr; u64 syscw; u64 read_bytes; u64 write_bytes; u64 cancelled_write_bytes; } ; 45 struct latency_record { unsigned long backtrace[12U]; unsigned int count; unsigned long time; unsigned long max; } ; 39 struct assoc_array_ptr ; 39 struct assoc_array { struct assoc_array_ptr *root; unsigned long nr_leaves_on_tree; } ; 31 typedef int32_t key_serial_t; 34 typedef uint32_t key_perm_t; 35 struct key ; 36 struct signal_struct ; 37 struct key_type ; 41 struct keyring_index_key { struct key_type *type; const char *description; size_t desc_len; } ; 123 union __anonunion____missing_field_name_211 { struct list_head graveyard_link; struct rb_node serial_node; } ; 123 struct key_user ; 123 union __anonunion____missing_field_name_212 { time_t expiry; time_t revoked_at; } ; 123 struct __anonstruct____missing_field_name_214 { struct key_type *type; char *description; } ; 123 union __anonunion____missing_field_name_213 { struct keyring_index_key index_key; struct __anonstruct____missing_field_name_214 __annonCompField61; } ; 123 union __anonunion_type_data_215 { struct list_head link; unsigned long x[2U]; void *p[2U]; int reject_error; } ; 123 union __anonunion_payload_217 { unsigned long value; void *rcudata; void *data; void *data2[2U]; } ; 123 union __anonunion____missing_field_name_216 { union __anonunion_payload_217 payload; struct assoc_array keys; } ; 123 struct key { atomic_t usage; key_serial_t serial; union __anonunion____missing_field_name_211 __annonCompField59; struct rw_semaphore sem; struct key_user *user; void *security; union __anonunion____missing_field_name_212 __annonCompField60; time_t last_used_at; kuid_t uid; kgid_t gid; key_perm_t perm; unsigned short quotalen; unsigned short datalen; unsigned long flags; union __anonunion____missing_field_name_213 __annonCompField62; union __anonunion_type_data_215 type_data; union __anonunion____missing_field_name_216 __annonCompField63; } ; 358 struct audit_context ; 27 struct group_info { atomic_t usage; int ngroups; int nblocks; kgid_t small_block[32U]; kgid_t *blocks[0U]; } ; 90 struct cred { atomic_t usage; atomic_t subscribers; void *put_addr; unsigned int magic; kuid_t uid; kgid_t gid; kuid_t suid; kgid_t sgid; kuid_t euid; kgid_t egid; kuid_t fsuid; kgid_t fsgid; unsigned int securebits; kernel_cap_t cap_inheritable; kernel_cap_t cap_permitted; kernel_cap_t cap_effective; kernel_cap_t cap_bset; unsigned char jit_keyring; struct key *session_keyring; struct key *process_keyring; struct key *thread_keyring; struct key *request_key_auth; void *security; struct user_struct *user; struct user_namespace *user_ns; struct group_info *group_info; struct callback_head rcu; } ; 369 struct percpu_ref ; 55 typedef void percpu_ref_func_t(struct percpu_ref *); 68 struct percpu_ref { atomic_long_t count; unsigned long percpu_count_ptr; percpu_ref_func_t *release; percpu_ref_func_t *confirm_switch; bool force_atomic; struct callback_head rcu; } ; 27 struct cgroup ; 28 struct cgroup_root ; 29 struct cgroup_subsys ; 30 struct cgroup_taskset ; 72 struct cgroup_subsys_state { struct cgroup *cgroup; struct cgroup_subsys *ss; struct percpu_ref refcnt; struct cgroup_subsys_state *parent; struct list_head sibling; struct list_head children; int id; unsigned int flags; u64 serial_nr; struct callback_head callback_head; struct work_struct destroy_work; } ; 124 struct css_set { atomic_t refcount; struct hlist_node hlist; struct list_head tasks; struct list_head mg_tasks; struct list_head cgrp_links; struct cgroup *dfl_cgrp; struct cgroup_subsys_state *subsys[12U]; struct list_head mg_preload_node; struct list_head mg_node; struct cgroup *mg_src_cgrp; struct css_set *mg_dst_cset; struct list_head e_cset_node[12U]; struct callback_head callback_head; } ; 197 struct cgroup { struct cgroup_subsys_state self; unsigned long flags; int id; int populated_cnt; struct kernfs_node *kn; struct kernfs_node *procs_kn; struct kernfs_node *populated_kn; unsigned int subtree_control; unsigned int child_subsys_mask; struct cgroup_subsys_state *subsys[12U]; struct cgroup_root *root; struct list_head cset_links; struct list_head e_csets[12U]; struct list_head pidlists; struct mutex pidlist_mutex; wait_queue_head_t offline_waitq; struct work_struct release_agent_work; } ; 270 struct cgroup_root { struct kernfs_root *kf_root; unsigned int subsys_mask; int hierarchy_id; struct cgroup cgrp; atomic_t nr_cgrps; struct list_head root_list; unsigned int flags; struct idr cgroup_idr; char release_agent_path[4096U]; char name[64U]; } ; 306 struct cftype { char name[64U]; int private; umode_t mode; size_t max_write_len; unsigned int flags; struct cgroup_subsys *ss; struct list_head node; struct kernfs_ops *kf_ops; u64 (*read_u64)(struct cgroup_subsys_state *, struct cftype *); s64 (*read_s64)(struct cgroup_subsys_state *, struct cftype *); int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); int (*write_u64)(struct cgroup_subsys_state *, struct cftype *, u64 ); int (*write_s64)(struct cgroup_subsys_state *, struct cftype *, s64 ); ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); struct lock_class_key lockdep_key; } ; 388 struct cgroup_subsys { struct cgroup_subsys_state * (*css_alloc)(struct cgroup_subsys_state *); int (*css_online)(struct cgroup_subsys_state *); void (*css_offline)(struct cgroup_subsys_state *); void (*css_released)(struct cgroup_subsys_state *); void (*css_free)(struct cgroup_subsys_state *); void (*css_reset)(struct cgroup_subsys_state *); void (*css_e_css_changed)(struct cgroup_subsys_state *); int (*can_attach)(struct cgroup_subsys_state *, struct cgroup_taskset *); void (*cancel_attach)(struct cgroup_subsys_state *, struct cgroup_taskset *); void (*attach)(struct cgroup_subsys_state *, struct cgroup_taskset *); void (*fork)(struct task_struct *); void (*exit)(struct cgroup_subsys_state *, struct cgroup_subsys_state *, struct task_struct *); void (*bind)(struct cgroup_subsys_state *); int disabled; int early_init; bool broken_hierarchy; bool warned_broken_hierarchy; int id; const char *name; struct cgroup_root *root; struct idr css_idr; struct list_head cfts; struct cftype *dfl_cftypes; struct cftype *legacy_cftypes; unsigned int depends_on; } ; 128 struct futex_pi_state ; 129 struct robust_list_head ; 130 struct bio_list ; 131 struct fs_struct ; 132 struct perf_event_context ; 133 struct blk_plug ; 135 struct nameidata ; 188 struct cfs_rq ; 189 struct task_group ; 477 struct sighand_struct { atomic_t count; struct k_sigaction action[64U]; spinlock_t siglock; wait_queue_head_t signalfd_wqh; } ; 516 struct pacct_struct { int ac_flag; long ac_exitcode; unsigned long ac_mem; cputime_t ac_utime; cputime_t ac_stime; unsigned long ac_minflt; unsigned long ac_majflt; } ; 524 struct cpu_itimer { cputime_t expires; cputime_t incr; u32 error; u32 incr_error; } ; 531 struct cputime { cputime_t utime; cputime_t stime; } ; 543 struct task_cputime { cputime_t utime; cputime_t stime; unsigned long long sum_exec_runtime; } ; 563 struct task_cputime_atomic { atomic64_t utime; atomic64_t stime; atomic64_t sum_exec_runtime; } ; 584 struct thread_group_cputimer { struct task_cputime_atomic cputime_atomic; int running; } ; 620 struct autogroup ; 621 struct tty_struct ; 621 struct taskstats ; 621 struct tty_audit_buf ; 621 struct signal_struct { atomic_t sigcnt; atomic_t live; int nr_threads; struct list_head thread_head; wait_queue_head_t wait_chldexit; struct task_struct *curr_target; struct sigpending shared_pending; int group_exit_code; int notify_count; struct task_struct *group_exit_task; int group_stop_count; unsigned int flags; unsigned char is_child_subreaper; unsigned char has_child_subreaper; int posix_timer_id; struct list_head posix_timers; struct hrtimer real_timer; struct pid *leader_pid; ktime_t it_real_incr; struct cpu_itimer it[2U]; struct thread_group_cputimer cputimer; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; struct pid *tty_old_pgrp; int leader; struct tty_struct *tty; struct autogroup *autogroup; seqlock_t stats_lock; cputime_t utime; cputime_t stime; cputime_t cutime; cputime_t cstime; cputime_t gtime; cputime_t cgtime; struct cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; unsigned long cnvcsw; unsigned long cnivcsw; unsigned long min_flt; unsigned long maj_flt; unsigned long cmin_flt; unsigned long cmaj_flt; unsigned long inblock; unsigned long oublock; unsigned long cinblock; unsigned long coublock; unsigned long maxrss; unsigned long cmaxrss; struct task_io_accounting ioac; unsigned long long sum_sched_runtime; struct rlimit rlim[16U]; struct pacct_struct pacct; struct taskstats *stats; unsigned int audit_tty; unsigned int audit_tty_log_passwd; struct tty_audit_buf *tty_audit_buf; oom_flags_t oom_flags; short oom_score_adj; short oom_score_adj_min; struct mutex cred_guard_mutex; } ; 790 struct user_struct { atomic_t __count; atomic_t processes; atomic_t sigpending; atomic_t inotify_watches; atomic_t inotify_devs; atomic_t fanotify_listeners; atomic_long_t epoll_watches; unsigned long mq_bytes; unsigned long locked_shm; struct key *uid_keyring; struct key *session_keyring; struct hlist_node uidhash_node; kuid_t uid; atomic_long_t locked_vm; } ; 833 struct backing_dev_info ; 834 struct reclaim_state ; 835 struct sched_info { unsigned long pcount; unsigned long long run_delay; unsigned long long last_arrival; unsigned long long last_queued; } ; 849 struct task_delay_info { spinlock_t lock; unsigned int flags; u64 blkio_start; u64 blkio_delay; u64 swapin_delay; u32 blkio_count; u32 swapin_count; u64 freepages_start; u64 freepages_delay; u32 freepages_count; } ; 897 struct wake_q_node { struct wake_q_node *next; } ; 1126 struct io_context ; 1160 struct pipe_inode_info ; 1162 struct load_weight { unsigned long weight; u32 inv_weight; } ; 1169 struct sched_avg { u64 last_runnable_update; s64 decay_count; unsigned long load_avg_contrib; unsigned long utilization_avg_contrib; u32 runnable_avg_sum; u32 avg_period; u32 running_avg_sum; } ; 1194 struct sched_statistics { u64 wait_start; u64 wait_max; u64 wait_count; u64 wait_sum; u64 iowait_count; u64 iowait_sum; u64 sleep_start; u64 sleep_max; s64 sum_sleep_runtime; u64 block_start; u64 block_max; u64 exec_max; u64 slice_max; u64 nr_migrations_cold; u64 nr_failed_migrations_affine; u64 nr_failed_migrations_running; u64 nr_failed_migrations_hot; u64 nr_forced_migrations; u64 nr_wakeups; u64 nr_wakeups_sync; u64 nr_wakeups_migrate; u64 nr_wakeups_local; u64 nr_wakeups_remote; u64 nr_wakeups_affine; u64 nr_wakeups_affine_attempts; u64 nr_wakeups_passive; u64 nr_wakeups_idle; } ; 1229 struct sched_entity { struct load_weight load; struct rb_node run_node; struct list_head group_node; unsigned int on_rq; u64 exec_start; u64 sum_exec_runtime; u64 vruntime; u64 prev_sum_exec_runtime; u64 nr_migrations; struct sched_statistics statistics; int depth; struct sched_entity *parent; struct cfs_rq *cfs_rq; struct cfs_rq *my_q; struct sched_avg avg; } ; 1261 struct rt_rq ; 1261 struct sched_rt_entity { struct list_head run_list; unsigned long timeout; unsigned long watchdog_stamp; unsigned int time_slice; struct sched_rt_entity *back; struct sched_rt_entity *parent; struct rt_rq *rt_rq; struct rt_rq *my_q; } ; 1277 struct sched_dl_entity { struct rb_node rb_node; u64 dl_runtime; u64 dl_deadline; u64 dl_period; u64 dl_bw; s64 runtime; u64 deadline; unsigned int flags; int dl_throttled; int dl_new; int dl_boosted; int dl_yielded; struct hrtimer dl_timer; } ; 1343 struct memcg_oom_info { struct mem_cgroup *memcg; gfp_t gfp_mask; int order; unsigned char may_oom; } ; 1769 struct sched_class ; 1769 struct files_struct ; 1769 struct compat_robust_list_head ; 1769 struct numa_group ; 1769 struct task_struct { volatile long state; void *stack; atomic_t usage; unsigned int flags; unsigned int ptrace; struct llist_node wake_entry; int on_cpu; struct task_struct *last_wakee; unsigned long wakee_flips; unsigned long wakee_flip_decay_ts; int wake_cpu; int on_rq; int prio; int static_prio; int normal_prio; unsigned int rt_priority; const struct sched_class *sched_class; struct sched_entity se; struct sched_rt_entity rt; struct task_group *sched_task_group; struct sched_dl_entity dl; struct hlist_head preempt_notifiers; unsigned int policy; int nr_cpus_allowed; cpumask_t cpus_allowed; unsigned long rcu_tasks_nvcsw; bool rcu_tasks_holdout; struct list_head rcu_tasks_holdout_list; int rcu_tasks_idle_cpu; struct sched_info sched_info; struct list_head tasks; struct plist_node pushable_tasks; struct rb_node pushable_dl_tasks; struct mm_struct *mm; struct mm_struct *active_mm; u32 vmacache_seqnum; struct vm_area_struct *vmacache[4U]; struct task_rss_stat rss_stat; int exit_state; int exit_code; int exit_signal; int pdeath_signal; unsigned long jobctl; unsigned int personality; unsigned char in_execve; unsigned char in_iowait; unsigned char sched_reset_on_fork; unsigned char sched_contributes_to_load; unsigned char sched_migrated; unsigned char memcg_kmem_skip_account; unsigned char brk_randomized; unsigned long atomic_flags; struct restart_block restart_block; pid_t pid; pid_t tgid; struct task_struct *real_parent; struct task_struct *parent; struct list_head children; struct list_head sibling; struct task_struct *group_leader; struct list_head ptraced; struct list_head ptrace_entry; struct pid_link pids[3U]; struct list_head thread_group; struct list_head thread_node; struct completion *vfork_done; int *set_child_tid; int *clear_child_tid; cputime_t utime; cputime_t stime; cputime_t utimescaled; cputime_t stimescaled; cputime_t gtime; struct cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; u64 start_time; u64 real_start_time; unsigned long min_flt; unsigned long maj_flt; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; const struct cred *real_cred; const struct cred *cred; char comm[16U]; struct nameidata *nameidata; struct sysv_sem sysvsem; struct sysv_shm sysvshm; unsigned long last_switch_count; struct thread_struct thread; struct fs_struct *fs; struct files_struct *files; struct nsproxy *nsproxy; struct signal_struct *signal; struct sighand_struct *sighand; sigset_t blocked; sigset_t real_blocked; sigset_t saved_sigmask; struct sigpending pending; unsigned long sas_ss_sp; size_t sas_ss_size; int (*notifier)(void *); void *notifier_data; sigset_t *notifier_mask; struct callback_head *task_works; struct audit_context *audit_context; kuid_t loginuid; unsigned int sessionid; struct seccomp seccomp; u32 parent_exec_id; u32 self_exec_id; spinlock_t alloc_lock; raw_spinlock_t pi_lock; struct wake_q_node wake_q; struct rb_root pi_waiters; struct rb_node *pi_waiters_leftmost; struct rt_mutex_waiter *pi_blocked_on; struct mutex_waiter *blocked_on; unsigned int irq_events; unsigned long hardirq_enable_ip; unsigned long hardirq_disable_ip; unsigned int hardirq_enable_event; unsigned int hardirq_disable_event; int hardirqs_enabled; int hardirq_context; unsigned long softirq_disable_ip; unsigned long softirq_enable_ip; unsigned int softirq_disable_event; unsigned int softirq_enable_event; int softirqs_enabled; int softirq_context; u64 curr_chain_key; int lockdep_depth; unsigned int lockdep_recursion; struct held_lock held_locks[48U]; gfp_t lockdep_reclaim_gfp; void *journal_info; struct bio_list *bio_list; struct blk_plug *plug; struct reclaim_state *reclaim_state; struct backing_dev_info *backing_dev_info; struct io_context *io_context; unsigned long ptrace_message; siginfo_t *last_siginfo; struct task_io_accounting ioac; u64 acct_rss_mem1; u64 acct_vm_mem1; cputime_t acct_timexpd; nodemask_t mems_allowed; seqcount_t mems_allowed_seq; int cpuset_mem_spread_rotor; int cpuset_slab_spread_rotor; struct css_set *cgroups; struct list_head cg_list; struct robust_list_head *robust_list; struct compat_robust_list_head *compat_robust_list; struct list_head pi_state_list; struct futex_pi_state *pi_state_cache; struct perf_event_context *perf_event_ctxp[2U]; struct mutex perf_event_mutex; struct list_head perf_event_list; struct mempolicy *mempolicy; short il_next; short pref_node_fork; int numa_scan_seq; unsigned int numa_scan_period; unsigned int numa_scan_period_max; int numa_preferred_nid; unsigned long numa_migrate_retry; u64 node_stamp; u64 last_task_numa_placement; u64 last_sum_exec_runtime; struct callback_head numa_work; struct list_head numa_entry; struct numa_group *numa_group; unsigned long *numa_faults; unsigned long total_numa_faults; unsigned long numa_faults_locality[3U]; unsigned long numa_pages_migrated; struct callback_head rcu; struct pipe_inode_info *splice_pipe; struct page_frag task_frag; struct task_delay_info *delays; int make_it_fail; int nr_dirtied; int nr_dirtied_pause; unsigned long dirty_paused_when; int latency_record_count; struct latency_record latency_record[32U]; unsigned long timer_slack_ns; unsigned long default_timer_slack_ns; unsigned int kasan_depth; unsigned long trace; unsigned long trace_recursion; struct memcg_oom_info memcg_oom; struct uprobe_task *utask; unsigned int sequential_io; unsigned int sequential_io_avg; unsigned long task_state_change; int pagefault_disabled; } ; 26 struct bgpio_chip { struct gpio_chip gc; unsigned long int (*read_reg)(void *); void (*write_reg)(void *, unsigned long); void *reg_dat; void *reg_set; void *reg_clr; void *reg_dir; int bits; unsigned long int (*pin2mask)(struct bgpio_chip *, unsigned int); spinlock_t lock; unsigned long data; unsigned long dir; } ; 93 struct irqaction { irqreturn_t (*handler)(int, void *); void *dev_id; void *percpu_dev_id; struct irqaction *next; irqreturn_t (*thread_fn)(int, void *); struct task_struct *thread; unsigned int irq; unsigned int flags; unsigned long thread_flags; unsigned long thread_mask; const char *name; struct proc_dir_entry *dir; } ; 201 struct irq_affinity_notify { unsigned int irq; struct kref kref; struct work_struct work; void (*notify)(struct irq_affinity_notify *, const cpumask_t *); void (*release)(struct kref *); } ; 363 enum irqchip_irq_state { IRQCHIP_STATE_PENDING = 0, IRQCHIP_STATE_ACTIVE = 1, IRQCHIP_STATE_MASKED = 2, IRQCHIP_STATE_LINE_LEVEL = 3 } ; 672 struct grgpio_uirq { u8 refcnt; u8 uirq; } ; 56 struct grgpio_lirq { s8 index; u8 irq; } ; 65 struct grgpio_priv { struct bgpio_chip bgc; void *regs; struct device *dev; u32 imask; struct irq_domain *domain; struct grgpio_uirq uirqs[32U]; struct grgpio_lirq lirqs[32U]; } ; 1 long int __builtin_expect(long, long); 33 extern struct module __this_module; 53 void __dynamic_dev_dbg(struct _ddebug *, const struct device *, const char *, ...); 3 bool ldv_is_err(const void *ptr); 6 long int ldv_ptr_err(const void *ptr); 71 void warn_slowpath_null(const char *, const int); 32 long int PTR_ERR(const void *ptr); 41 bool IS_ERR(const void *ptr); 45 void _raw_spin_unlock_irqrestore(raw_spinlock_t *, unsigned long); 406 void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags); 438 void ldv_spin_unlock_irqrestore_31(spinlock_t *lock, unsigned long flags); 11 void __ldv_spin_lock(spinlock_t *); 14 void ldv___ldv_spin_lock_11(spinlock_t *ldv_func_arg1); 18 void ldv___ldv_spin_lock_19(spinlock_t *ldv_func_arg1); 22 void ldv___ldv_spin_lock_21(spinlock_t *ldv_func_arg1); 26 void ldv___ldv_spin_lock_23(spinlock_t *ldv_func_arg1); 30 void ldv___ldv_spin_lock_25(spinlock_t *ldv_func_arg1); 34 void ldv___ldv_spin_lock_30(spinlock_t *ldv_func_arg1); 38 void ldv___ldv_spin_lock_32(spinlock_t *ldv_func_arg1); 42 void ldv___ldv_spin_lock_34(spinlock_t *ldv_func_arg1); 46 void ldv___ldv_spin_lock_36(spinlock_t *ldv_func_arg1); 50 void ldv___ldv_spin_lock_39(spinlock_t *ldv_func_arg1); 54 void ldv___ldv_spin_lock_41(spinlock_t *ldv_func_arg1); 70 void ldv_spin_lock_lock(); 78 void ldv_spin_lock_lock_of_NOT_ARG_SIGN(); 86 void ldv_spin_lock_lock_of_bgpio_chip(); 87 void ldv_spin_unlock_lock_of_bgpio_chip(); 94 void ldv_spin_lock_node_size_lock_of_pglist_data(); 102 void ldv_spin_lock_siglock_of_sighand_struct(); 86 const char * kobject_name(const struct kobject *kobj); 287 int of_property_read_u32_array(const struct device_node *, const char *, u32 *, size_t ); 311 const void * of_get_property(const struct device_node *, const char *, int *); 856 int of_property_read_u32(const struct device_node *np, const char *propname, u32 *out_value); 144 int generic_handle_irq(unsigned int); 466 void handle_simple_irq(unsigned int, struct irq_desc *); 505 void irq_set_chip_and_handler_name(unsigned int, struct irq_chip *, void (*)(unsigned int, struct irq_desc *), const char *); 508 void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *chip, void (*handle)(unsigned int, struct irq_desc *)); 546 void irq_modify_status(unsigned int, unsigned long, unsigned long); 553 void irq_clear_status_flags(unsigned int irq, unsigned long clr); 558 void irq_set_noprobe(unsigned int irq); 596 int irq_set_chip_data(unsigned int, void *); 620 void * irq_data_get_irq_chip_data(struct irq_data *d); 149 struct irq_domain * __irq_domain_add(struct device_node *, int, irq_hw_number_t , int, const struct irq_domain_ops *, void *); 174 struct irq_domain * irq_domain_add_linear(struct device_node *of_node, unsigned int size, const struct irq_domain_ops *ops, void *host_data); 203 void irq_domain_remove(struct irq_domain *); 213 unsigned int irq_create_mapping(struct irq_domain *, irq_hw_number_t ); 156 int gpiochip_add(struct gpio_chip *); 157 void gpiochip_remove(struct gpio_chip *); 639 void * devm_kmalloc(struct device *, size_t , gfp_t ); 644 void * devm_kzalloc(struct device *dev, size_t size, gfp_t gfp); 669 void * devm_ioremap_resource(struct device *, struct resource *); 832 const char * dev_name(const struct device *dev); 863 void * dev_get_drvdata(const struct device *dev); 868 void dev_set_drvdata(struct device *dev, void *data); 1084 void dev_err(const struct device *, const char *, ...); 1086 void dev_warn(const struct device *, const char *, ...); 1090 void _dev_info(const struct device *, const char *, ...); 51 struct resource * platform_get_resource(struct platform_device *, unsigned int, unsigned int); 53 int platform_get_irq(struct platform_device *, unsigned int); 205 void * platform_get_drvdata(const struct platform_device *pdev); 210 void platform_set_drvdata(struct platform_device *pdev, void *data); 62 struct bgpio_chip * to_bgpio_chip(struct gpio_chip *gc); 68 int bgpio_init(struct bgpio_chip *, struct device *, unsigned long, void *, void *, void *, void *, void *, unsigned long); 127 int request_threaded_irq(unsigned int, irqreturn_t (*)(int, void *), irqreturn_t (*)(int, void *), unsigned long, const char *, void *); 132 int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *), unsigned long flags, const char *name, void *dev); 146 void free_irq(unsigned int, void *); 96 struct grgpio_priv * grgpio_gc_to_priv(struct gpio_chip *gc); 103 void grgpio_set_imask(struct grgpio_priv *priv, unsigned int offset, int val); 121 int grgpio_to_irq(struct gpio_chip *gc, unsigned int offset); 136 int grgpio_irq_set_type(struct irq_data *d, unsigned int type); 180 void grgpio_irq_mask(struct irq_data *d); 188 void grgpio_irq_unmask(struct irq_data *d); 196 struct irq_chip grgpio_irq_chip = { "grgpio", 0, 0, 0, 0, 0, &grgpio_irq_mask, 0, &grgpio_irq_unmask, 0, 0, 0, &grgpio_irq_set_type, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0UL }; 203 irqreturn_t grgpio_irq_handler(int irq, void *dev); 239 int grgpio_irq_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq); 295 void grgpio_irq_unmap(struct irq_domain *d, unsigned int irq); 336 const struct irq_domain_ops grgpio_irq_domain_ops = { 0, &grgpio_irq_map, &grgpio_irq_unmap, 0, 0, 0, 0, 0 }; 343 int grgpio_probe(struct platform_device *ofdev); 456 int grgpio_remove(struct platform_device *ofdev); 491 const struct of_device_id __mod_of__grgpio_match_device_table[3U] = { }; 523 void ldv_check_final_state(); 526 void ldv_check_return_value(int); 529 void ldv_check_return_value_probe(int); 532 void ldv_initialize(); 535 void ldv_handler_precall(); 538 int nondet_int(); 541 int LDV_IN_INTERRUPT = 0; 544 void ldv_main0_sequence_infinite_withcheck_stateful(); 10 void ldv_error(); 25 int ldv_undef_int(); 14 void * ldv_err_ptr(long error); 28 bool ldv_is_err_or_null(const void *ptr); 9 int ldv_spin_alloc_lock_of_task_struct = 1; 12 void ldv_spin_lock_alloc_lock_of_task_struct(); 21 void ldv_spin_unlock_alloc_lock_of_task_struct(); 30 int ldv_spin_trylock_alloc_lock_of_task_struct(); 56 void ldv_spin_unlock_wait_alloc_lock_of_task_struct(); 63 int ldv_spin_is_locked_alloc_lock_of_task_struct(); 84 int ldv_spin_can_lock_alloc_lock_of_task_struct(); 91 int ldv_spin_is_contended_alloc_lock_of_task_struct(); 112 int ldv_atomic_dec_and_lock_alloc_lock_of_task_struct(); 134 int ldv_spin_lock = 1; 146 void ldv_spin_unlock_lock(); 155 int ldv_spin_trylock_lock(); 181 void ldv_spin_unlock_wait_lock(); 188 int ldv_spin_is_locked_lock(); 209 int ldv_spin_can_lock_lock(); 216 int ldv_spin_is_contended_lock(); 237 int ldv_atomic_dec_and_lock_lock(); 259 int ldv_spin_lock_of_NOT_ARG_SIGN = 1; 271 void ldv_spin_unlock_lock_of_NOT_ARG_SIGN(); 280 int ldv_spin_trylock_lock_of_NOT_ARG_SIGN(); 306 void ldv_spin_unlock_wait_lock_of_NOT_ARG_SIGN(); 313 int ldv_spin_is_locked_lock_of_NOT_ARG_SIGN(); 334 int ldv_spin_can_lock_lock_of_NOT_ARG_SIGN(); 341 int ldv_spin_is_contended_lock_of_NOT_ARG_SIGN(); 362 int ldv_atomic_dec_and_lock_lock_of_NOT_ARG_SIGN(); 384 int ldv_spin_lock_of_bgpio_chip = 1; 405 int ldv_spin_trylock_lock_of_bgpio_chip(); 431 void ldv_spin_unlock_wait_lock_of_bgpio_chip(); 438 int ldv_spin_is_locked_lock_of_bgpio_chip(); 459 int ldv_spin_can_lock_lock_of_bgpio_chip(); 466 int ldv_spin_is_contended_lock_of_bgpio_chip(); 487 int ldv_atomic_dec_and_lock_lock_of_bgpio_chip(); 509 int ldv_spin_node_size_lock_of_pglist_data = 1; 521 void ldv_spin_unlock_node_size_lock_of_pglist_data(); 530 int ldv_spin_trylock_node_size_lock_of_pglist_data(); 556 void ldv_spin_unlock_wait_node_size_lock_of_pglist_data(); 563 int ldv_spin_is_locked_node_size_lock_of_pglist_data(); 584 int ldv_spin_can_lock_node_size_lock_of_pglist_data(); 591 int ldv_spin_is_contended_node_size_lock_of_pglist_data(); 612 int ldv_atomic_dec_and_lock_node_size_lock_of_pglist_data(); 634 int ldv_spin_siglock_of_sighand_struct = 1; 646 void ldv_spin_unlock_siglock_of_sighand_struct(); 655 int ldv_spin_trylock_siglock_of_sighand_struct(); 681 void ldv_spin_unlock_wait_siglock_of_sighand_struct(); 688 int ldv_spin_is_locked_siglock_of_sighand_struct(); 709 int ldv_spin_can_lock_siglock_of_sighand_struct(); 716 int ldv_spin_is_contended_siglock_of_sighand_struct(); 737 int ldv_atomic_dec_and_lock_siglock_of_sighand_struct(); return ; } { 546 struct irq_data *var_group1; 547 unsigned int var_grgpio_irq_set_type_3_p1; 548 struct irq_domain *var_group2; 549 unsigned int var_grgpio_irq_map_7_p1; 550 unsigned long var_grgpio_irq_map_7_p2; 551 unsigned int var_grgpio_irq_unmap_8_p1; 552 struct platform_device *var_group3; 553 int res_grgpio_probe_9; 554 int var_grgpio_irq_handler_6_p0; 555 void *var_grgpio_irq_handler_6_p1; 556 int ldv_s_grgpio_driver_platform_driver; 557 int tmp; 558 int tmp___0; 739 ldv_s_grgpio_driver_platform_driver = 0; 725 LDV_IN_INTERRUPT = 1; 734 ldv_initialize() { /* Function call is skipped due to function is undefined */} 744 goto ldv_25278; 744 tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */} 747 goto ldv_25277; 745 ldv_25277:; 748 tmp = nondet_int() { /* Function call is skipped due to function is undefined */} 748 switch (tmp) 904 ldv_handler_precall() { /* Function call is skipped due to function is undefined */} { } 297 struct grgpio_priv *priv; 298 int index; 299 struct grgpio_lirq *lirq; 300 struct grgpio_uirq *uirq; 301 unsigned long flags; 302 int ngpio; 303 int i; 304 int __ret_warn_on; 305 long tmp; 297 struct grgpio_priv *__CPAchecker_TMP_0 = (struct grgpio_priv *)(d->host_data); 297 priv = __CPAchecker_TMP_0; 302 ngpio = (int)(priv->bgc.gc.ngpio); 308 -irq_set_chip_and_handler(irq, (struct irq_chip *)0, (void (*)(unsigned int, struct irq_desc *))0) { 511 irq_set_chip_and_handler_name(irq, chip, handle, (const char *)0) { /* Function call is skipped due to function is undefined */} } 309 irq_set_chip_data(irq, (void *)0) { /* Function call is skipped due to function is undefined */} { 368 __ldv_spin_lock(ldv_func_arg1) { /* Function call is skipped due to function is undefined */} } 314 index = -1; 315 i = 0; 315 goto ldv_25180; 317 goto ldv_25179; 316 ldv_25179:; 316 lirq = ((struct grgpio_lirq *)(&(priv->lirqs))) + ((unsigned long)i); 317 unsigned int __CPAchecker_TMP_1 = (unsigned int)(lirq->irq); { } 105 struct bgpio_chip *bgc; 106 unsigned long mask; 107 unsigned long tmp; 108 unsigned long flags; 106 bgc = &(priv->bgc); 107 tmp = (*(bgc->pin2mask))(bgc, offset); 107 mask = tmp; { }} | Source code 1
2 /*
3 * Driver for Aeroflex Gaisler GRGPIO General Purpose I/O cores.
4 *
5 * 2013 (c) Aeroflex Gaisler AB
6 *
7 * This driver supports the GRGPIO GPIO core available in the GRLIB VHDL
8 * IP core library.
9 *
10 * Full documentation of the GRGPIO core can be found here:
11 * http://www.gaisler.com/products/grlib/grip.pdf
12 *
13 * See "Documentation/devicetree/bindings/gpio/gpio-grgpio.txt" for
14 * information on open firmware properties.
15 *
16 * This program is free software; you can redistribute it and/or modify it
17 * under the terms of the GNU General Public License as published by the
18 * Free Software Foundation; either version 2 of the License, or (at your
19 * option) any later version.
20 *
21 * Contributors: Andreas Larsson <andreas@gaisler.com>
22 */
23
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/init.h>
27 #include <linux/spinlock.h>
28 #include <linux/io.h>
29 #include <linux/of.h>
30 #include <linux/of_gpio.h>
31 #include <linux/of_platform.h>
32 #include <linux/gpio.h>
33 #include <linux/slab.h>
34 #include <linux/err.h>
35 #include <linux/basic_mmio_gpio.h>
36 #include <linux/interrupt.h>
37 #include <linux/irq.h>
38 #include <linux/irqdomain.h>
39
40 #define GRGPIO_MAX_NGPIO 32
41
42 #define GRGPIO_DATA 0x00
43 #define GRGPIO_OUTPUT 0x04
44 #define GRGPIO_DIR 0x08
45 #define GRGPIO_IMASK 0x0c
46 #define GRGPIO_IPOL 0x10
47 #define GRGPIO_IEDGE 0x14
48 #define GRGPIO_BYPASS 0x18
49 #define GRGPIO_IMAP_BASE 0x20
50
51 /* Structure for an irq of the core - called an underlying irq */
52 struct grgpio_uirq {
53 u8 refcnt; /* Reference counter to manage requesting/freeing of uirq */
54 u8 uirq; /* Underlying irq of the gpio driver */
55 };
56
57 /*
58 * Structure for an irq of a gpio line handed out by this driver. The index is
59 * used to map to the corresponding underlying irq.
60 */
61 struct grgpio_lirq {
62 s8 index; /* Index into struct grgpio_priv's uirqs, or -1 */
63 u8 irq; /* irq for the gpio line */
64 };
65
66 struct grgpio_priv {
67 struct bgpio_chip bgc;
68 void __iomem *regs;
69 struct device *dev;
70
71 u32 imask; /* irq mask shadow register */
72
73 /*
74 * The grgpio core can have multiple "underlying" irqs. The gpio lines
75 * can be mapped to any one or none of these underlying irqs
76 * independently of each other. This driver sets up an irq domain and
77 * hands out separate irqs to each gpio line
78 */
79 struct irq_domain *domain;
80
81 /*
82 * This array contains information on each underlying irq, each
83 * irq of the grgpio core itself.
84 */
85 struct grgpio_uirq uirqs[GRGPIO_MAX_NGPIO];
86
87 /*
88 * This array contains information for each gpio line on the irqs
89 * obtains from this driver. An index value of -1 for a certain gpio
90 * line indicates that the line has no irq. Otherwise the index connects
91 * the irq to the underlying irq by pointing into the uirqs array.
92 */
93 struct grgpio_lirq lirqs[GRGPIO_MAX_NGPIO];
94 };
95
96 static inline struct grgpio_priv *grgpio_gc_to_priv(struct gpio_chip *gc)
97 {
98 struct bgpio_chip *bgc = to_bgpio_chip(gc);
99
100 return container_of(bgc, struct grgpio_priv, bgc);
101 }
102
103 static void grgpio_set_imask(struct grgpio_priv *priv, unsigned int offset,
104 int val)
105 {
106 struct bgpio_chip *bgc = &priv->bgc;
107 unsigned long mask = bgc->pin2mask(bgc, offset);
108 unsigned long flags;
109
110 spin_lock_irqsave(&bgc->lock, flags);
111
112 if (val)
113 priv->imask |= mask;
114 else
115 priv->imask &= ~mask;
116 bgc->write_reg(priv->regs + GRGPIO_IMASK, priv->imask);
117
118 spin_unlock_irqrestore(&bgc->lock, flags);
119 }
120
121 static int grgpio_to_irq(struct gpio_chip *gc, unsigned offset)
122 {
123 struct grgpio_priv *priv = grgpio_gc_to_priv(gc);
124
125 if (offset >= gc->ngpio)
126 return -ENXIO;
127
128 if (priv->lirqs[offset].index < 0)
129 return -ENXIO;
130
131 return irq_create_mapping(priv->domain, offset);
132 }
133
134 /* -------------------- IRQ chip functions -------------------- */
135
136 static int grgpio_irq_set_type(struct irq_data *d, unsigned int type)
137 {
138 struct grgpio_priv *priv = irq_data_get_irq_chip_data(d);
139 unsigned long flags;
140 u32 mask = BIT(d->hwirq);
141 u32 ipol;
142 u32 iedge;
143 u32 pol;
144 u32 edge;
145
146 switch (type) {
147 case IRQ_TYPE_LEVEL_LOW:
148 pol = 0;
149 edge = 0;
150 break;
151 case IRQ_TYPE_LEVEL_HIGH:
152 pol = mask;
153 edge = 0;
154 break;
155 case IRQ_TYPE_EDGE_FALLING:
156 pol = 0;
157 edge = mask;
158 break;
159 case IRQ_TYPE_EDGE_RISING:
160 pol = mask;
161 edge = mask;
162 break;
163 default:
164 return -EINVAL;
165 }
166
167 spin_lock_irqsave(&priv->bgc.lock, flags);
168
169 ipol = priv->bgc.read_reg(priv->regs + GRGPIO_IPOL) & ~mask;
170 iedge = priv->bgc.read_reg(priv->regs + GRGPIO_IEDGE) & ~mask;
171
172 priv->bgc.write_reg(priv->regs + GRGPIO_IPOL, ipol | pol);
173 priv->bgc.write_reg(priv->regs + GRGPIO_IEDGE, iedge | edge);
174
175 spin_unlock_irqrestore(&priv->bgc.lock, flags);
176
177 return 0;
178 }
179
180 static void grgpio_irq_mask(struct irq_data *d)
181 {
182 struct grgpio_priv *priv = irq_data_get_irq_chip_data(d);
183 int offset = d->hwirq;
184
185 grgpio_set_imask(priv, offset, 0);
186 }
187
188 static void grgpio_irq_unmask(struct irq_data *d)
189 {
190 struct grgpio_priv *priv = irq_data_get_irq_chip_data(d);
191 int offset = d->hwirq;
192
193 grgpio_set_imask(priv, offset, 1);
194 }
195
196 static struct irq_chip grgpio_irq_chip = {
197 .name = "grgpio",
198 .irq_mask = grgpio_irq_mask,
199 .irq_unmask = grgpio_irq_unmask,
200 .irq_set_type = grgpio_irq_set_type,
201 };
202
203 static irqreturn_t grgpio_irq_handler(int irq, void *dev)
204 {
205 struct grgpio_priv *priv = dev;
206 int ngpio = priv->bgc.gc.ngpio;
207 unsigned long flags;
208 int i;
209 int match = 0;
210
211 spin_lock_irqsave(&priv->bgc.lock, flags);
212
213 /*
214 * For each gpio line, call its interrupt handler if it its underlying
215 * irq matches the current irq that is handled.
216 */
217 for (i = 0; i < ngpio; i++) {
218 struct grgpio_lirq *lirq = &priv->lirqs[i];
219
220 if (priv->imask & BIT(i) && lirq->index >= 0 &&
221 priv->uirqs[lirq->index].uirq == irq) {
222 generic_handle_irq(lirq->irq);
223 match = 1;
224 }
225 }
226
227 spin_unlock_irqrestore(&priv->bgc.lock, flags);
228
229 if (!match)
230 dev_warn(priv->dev, "No gpio line matched irq %d\n", irq);
231
232 return IRQ_HANDLED;
233 }
234
235 /*
236 * This function will be called as a consequence of the call to
237 * irq_create_mapping in grgpio_to_irq
238 */
239 static int grgpio_irq_map(struct irq_domain *d, unsigned int irq,
240 irq_hw_number_t hwirq)
241 {
242 struct grgpio_priv *priv = d->host_data;
243 struct grgpio_lirq *lirq;
244 struct grgpio_uirq *uirq;
245 unsigned long flags;
246 int offset = hwirq;
247 int ret = 0;
248
249 if (!priv)
250 return -EINVAL;
251
252 lirq = &priv->lirqs[offset];
253 if (lirq->index < 0)
254 return -EINVAL;
255
256 dev_dbg(priv->dev, "Mapping irq %d for gpio line %d\n",
257 irq, offset);
258
259 spin_lock_irqsave(&priv->bgc.lock, flags);
260
261 /* Request underlying irq if not already requested */
262 lirq->irq = irq;
263 uirq = &priv->uirqs[lirq->index];
264 if (uirq->refcnt == 0) {
265 ret = request_irq(uirq->uirq, grgpio_irq_handler, 0,
266 dev_name(priv->dev), priv);
267 if (ret) {
268 dev_err(priv->dev,
269 "Could not request underlying irq %d\n",
270 uirq->uirq);
271
272 spin_unlock_irqrestore(&priv->bgc.lock, flags);
273
274 return ret;
275 }
276 }
277 uirq->refcnt++;
278
279 spin_unlock_irqrestore(&priv->bgc.lock, flags);
280
281 /* Setup irq */
282 irq_set_chip_data(irq, priv);
283 irq_set_chip_and_handler(irq, &grgpio_irq_chip,
284 handle_simple_irq);
285 irq_clear_status_flags(irq, IRQ_NOREQUEST);
286 #ifdef CONFIG_ARM
287 set_irq_flags(irq, IRQF_VALID);
288 #else
289 irq_set_noprobe(irq);
290 #endif
291
292 return ret;
293 }
294
295 static void grgpio_irq_unmap(struct irq_domain *d, unsigned int irq)
296 {
297 struct grgpio_priv *priv = d->host_data;
298 int index;
299 struct grgpio_lirq *lirq;
300 struct grgpio_uirq *uirq;
301 unsigned long flags;
302 int ngpio = priv->bgc.gc.ngpio;
303 int i;
304
305 #ifdef CONFIG_ARM
306 set_irq_flags(irq, 0);
307 #endif
308 irq_set_chip_and_handler(irq, NULL, NULL);
309 irq_set_chip_data(irq, NULL);
310
311 spin_lock_irqsave(&priv->bgc.lock, flags);
312
313 /* Free underlying irq if last user unmapped */
314 index = -1;
315 for (i = 0; i < ngpio; i++) {
316 lirq = &priv->lirqs[i];
317 if (lirq->irq == irq) {
318 grgpio_set_imask(priv, i, 0);
319 lirq->irq = 0;
320 index = lirq->index;
321 break;
322 }
323 }
324 WARN_ON(index < 0);
325
326 if (index >= 0) {
327 uirq = &priv->uirqs[lirq->index];
328 uirq->refcnt--;
329 if (uirq->refcnt == 0)
330 free_irq(uirq->uirq, priv);
331 }
332
333 spin_unlock_irqrestore(&priv->bgc.lock, flags);
334 }
335
336 static const struct irq_domain_ops grgpio_irq_domain_ops = {
337 .map = grgpio_irq_map,
338 .unmap = grgpio_irq_unmap,
339 };
340
341 /* ------------------------------------------------------------ */
342
343 static int grgpio_probe(struct platform_device *ofdev)
344 {
345 struct device_node *np = ofdev->dev.of_node;
346 void __iomem *regs;
347 struct gpio_chip *gc;
348 struct bgpio_chip *bgc;
349 struct grgpio_priv *priv;
350 struct resource *res;
351 int err;
352 u32 prop;
353 s32 *irqmap;
354 int size;
355 int i;
356
357 priv = devm_kzalloc(&ofdev->dev, sizeof(*priv), GFP_KERNEL);
358 if (!priv)
359 return -ENOMEM;
360
361 res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
362 regs = devm_ioremap_resource(&ofdev->dev, res);
363 if (IS_ERR(regs))
364 return PTR_ERR(regs);
365
366 bgc = &priv->bgc;
367 err = bgpio_init(bgc, &ofdev->dev, 4, regs + GRGPIO_DATA,
368 regs + GRGPIO_OUTPUT, NULL, regs + GRGPIO_DIR, NULL,
369 BGPIOF_BIG_ENDIAN_BYTE_ORDER);
370 if (err) {
371 dev_err(&ofdev->dev, "bgpio_init() failed\n");
372 return err;
373 }
374
375 priv->regs = regs;
376 priv->imask = bgc->read_reg(regs + GRGPIO_IMASK);
377 priv->dev = &ofdev->dev;
378
379 gc = &bgc->gc;
380 gc->of_node = np;
381 gc->owner = THIS_MODULE;
382 gc->to_irq = grgpio_to_irq;
383 gc->label = np->full_name;
384 gc->base = -1;
385
386 err = of_property_read_u32(np, "nbits", &prop);
387 if (err || prop <= 0 || prop > GRGPIO_MAX_NGPIO) {
388 gc->ngpio = GRGPIO_MAX_NGPIO;
389 dev_dbg(&ofdev->dev,
390 "No or invalid nbits property: assume %d\n", gc->ngpio);
391 } else {
392 gc->ngpio = prop;
393 }
394
395 /*
396 * The irqmap contains the index values indicating which underlying irq,
397 * if anyone, is connected to that line
398 */
399 irqmap = (s32 *)of_get_property(np, "irqmap", &size);
400 if (irqmap) {
401 if (size < gc->ngpio) {
402 dev_err(&ofdev->dev,
403 "irqmap shorter than ngpio (%d < %d)\n",
404 size, gc->ngpio);
405 return -EINVAL;
406 }
407
408 priv->domain = irq_domain_add_linear(np, gc->ngpio,
409 &grgpio_irq_domain_ops,
410 priv);
411 if (!priv->domain) {
412 dev_err(&ofdev->dev, "Could not add irq domain\n");
413 return -EINVAL;
414 }
415
416 for (i = 0; i < gc->ngpio; i++) {
417 struct grgpio_lirq *lirq;
418 int ret;
419
420 lirq = &priv->lirqs[i];
421 lirq->index = irqmap[i];
422
423 if (lirq->index < 0)
424 continue;
425
426 ret = platform_get_irq(ofdev, lirq->index);
427 if (ret <= 0) {
428 /*
429 * Continue without irq functionality for that
430 * gpio line
431 */
432 dev_err(priv->dev,
433 "Failed to get irq for offset %d\n", i);
434 continue;
435 }
436 priv->uirqs[lirq->index].uirq = ret;
437 }
438 }
439
440 platform_set_drvdata(ofdev, priv);
441
442 err = gpiochip_add(gc);
443 if (err) {
444 dev_err(&ofdev->dev, "Could not add gpiochip\n");
445 if (priv->domain)
446 irq_domain_remove(priv->domain);
447 return err;
448 }
449
450 dev_info(&ofdev->dev, "regs=0x%p, base=%d, ngpio=%d, irqs=%s\n",
451 priv->regs, gc->base, gc->ngpio, priv->domain ? "on" : "off");
452
453 return 0;
454 }
455
456 static int grgpio_remove(struct platform_device *ofdev)
457 {
458 struct grgpio_priv *priv = platform_get_drvdata(ofdev);
459 unsigned long flags;
460 int i;
461 int ret = 0;
462
463 spin_lock_irqsave(&priv->bgc.lock, flags);
464
465 if (priv->domain) {
466 for (i = 0; i < GRGPIO_MAX_NGPIO; i++) {
467 if (priv->uirqs[i].refcnt != 0) {
468 ret = -EBUSY;
469 goto out;
470 }
471 }
472 }
473
474 gpiochip_remove(&priv->bgc.gc);
475
476 if (priv->domain)
477 irq_domain_remove(priv->domain);
478
479 out:
480 spin_unlock_irqrestore(&priv->bgc.lock, flags);
481
482 return ret;
483 }
484
485 static const struct of_device_id grgpio_match[] = {
486 {.name = "GAISLER_GPIO"},
487 {.name = "01_01a"},
488 {},
489 };
490
491 MODULE_DEVICE_TABLE(of, grgpio_match);
492
493 static struct platform_driver grgpio_driver = {
494 .driver = {
495 .name = "grgpio",
496 .of_match_table = grgpio_match,
497 },
498 .probe = grgpio_probe,
499 .remove = grgpio_remove,
500 };
501 module_platform_driver(grgpio_driver);
502
503 MODULE_AUTHOR("Aeroflex Gaisler AB.");
504 MODULE_DESCRIPTION("Driver for Aeroflex Gaisler GRGPIO");
505 MODULE_LICENSE("GPL");
506
507
508
509
510
511 /* LDV_COMMENT_BEGIN_MAIN */
512 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful
513
514 /*###########################################################################*/
515
516 /*############## Driver Environment Generator 0.2 output ####################*/
517
518 /*###########################################################################*/
519
520
521
522 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
523 void ldv_check_final_state(void);
524
525 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
526 void ldv_check_return_value(int res);
527
528 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
529 void ldv_check_return_value_probe(int res);
530
531 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
532 void ldv_initialize(void);
533
534 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
535 void ldv_handler_precall(void);
536
537 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
538 int nondet_int(void);
539
540 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
541 int LDV_IN_INTERRUPT;
542
543 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
544 void ldv_main0_sequence_infinite_withcheck_stateful(void) {
545
546
547
548 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
549 /*============================= VARIABLE DECLARATION PART =============================*/
550 /** STRUCT: struct type: irq_chip, struct name: grgpio_irq_chip **/
551 /* content: static void grgpio_irq_mask(struct irq_data *d)*/
552 /* LDV_COMMENT_BEGIN_PREP */
553 #define GRGPIO_MAX_NGPIO 32
554 #define GRGPIO_DATA 0x00
555 #define GRGPIO_OUTPUT 0x04
556 #define GRGPIO_DIR 0x08
557 #define GRGPIO_IMASK 0x0c
558 #define GRGPIO_IPOL 0x10
559 #define GRGPIO_IEDGE 0x14
560 #define GRGPIO_BYPASS 0x18
561 #define GRGPIO_IMAP_BASE 0x20
562 /* LDV_COMMENT_END_PREP */
563 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "grgpio_irq_mask" */
564 struct irq_data * var_group1;
565 /* LDV_COMMENT_BEGIN_PREP */
566 #ifdef CONFIG_ARM
567 #else
568 #endif
569 #ifdef CONFIG_ARM
570 #endif
571 /* LDV_COMMENT_END_PREP */
572 /* content: static void grgpio_irq_unmask(struct irq_data *d)*/
573 /* LDV_COMMENT_BEGIN_PREP */
574 #define GRGPIO_MAX_NGPIO 32
575 #define GRGPIO_DATA 0x00
576 #define GRGPIO_OUTPUT 0x04
577 #define GRGPIO_DIR 0x08
578 #define GRGPIO_IMASK 0x0c
579 #define GRGPIO_IPOL 0x10
580 #define GRGPIO_IEDGE 0x14
581 #define GRGPIO_BYPASS 0x18
582 #define GRGPIO_IMAP_BASE 0x20
583 /* LDV_COMMENT_END_PREP */
584 /* LDV_COMMENT_BEGIN_PREP */
585 #ifdef CONFIG_ARM
586 #else
587 #endif
588 #ifdef CONFIG_ARM
589 #endif
590 /* LDV_COMMENT_END_PREP */
591 /* content: static int grgpio_irq_set_type(struct irq_data *d, unsigned int type)*/
592 /* LDV_COMMENT_BEGIN_PREP */
593 #define GRGPIO_MAX_NGPIO 32
594 #define GRGPIO_DATA 0x00
595 #define GRGPIO_OUTPUT 0x04
596 #define GRGPIO_DIR 0x08
597 #define GRGPIO_IMASK 0x0c
598 #define GRGPIO_IPOL 0x10
599 #define GRGPIO_IEDGE 0x14
600 #define GRGPIO_BYPASS 0x18
601 #define GRGPIO_IMAP_BASE 0x20
602 /* LDV_COMMENT_END_PREP */
603 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "grgpio_irq_set_type" */
604 unsigned int var_grgpio_irq_set_type_3_p1;
605 /* LDV_COMMENT_BEGIN_PREP */
606 #ifdef CONFIG_ARM
607 #else
608 #endif
609 #ifdef CONFIG_ARM
610 #endif
611 /* LDV_COMMENT_END_PREP */
612
613 /** STRUCT: struct type: irq_domain_ops, struct name: grgpio_irq_domain_ops **/
614 /* content: static int grgpio_irq_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq)*/
615 /* LDV_COMMENT_BEGIN_PREP */
616 #define GRGPIO_MAX_NGPIO 32
617 #define GRGPIO_DATA 0x00
618 #define GRGPIO_OUTPUT 0x04
619 #define GRGPIO_DIR 0x08
620 #define GRGPIO_IMASK 0x0c
621 #define GRGPIO_IPOL 0x10
622 #define GRGPIO_IEDGE 0x14
623 #define GRGPIO_BYPASS 0x18
624 #define GRGPIO_IMAP_BASE 0x20
625 /* LDV_COMMENT_END_PREP */
626 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "grgpio_irq_map" */
627 struct irq_domain * var_group2;
628 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "grgpio_irq_map" */
629 unsigned int var_grgpio_irq_map_7_p1;
630 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "grgpio_irq_map" */
631 irq_hw_number_t var_grgpio_irq_map_7_p2;
632 /* LDV_COMMENT_BEGIN_PREP */
633 #ifdef CONFIG_ARM
634 #endif
635 /* LDV_COMMENT_END_PREP */
636 /* content: static void grgpio_irq_unmap(struct irq_domain *d, unsigned int irq)*/
637 /* LDV_COMMENT_BEGIN_PREP */
638 #define GRGPIO_MAX_NGPIO 32
639 #define GRGPIO_DATA 0x00
640 #define GRGPIO_OUTPUT 0x04
641 #define GRGPIO_DIR 0x08
642 #define GRGPIO_IMASK 0x0c
643 #define GRGPIO_IPOL 0x10
644 #define GRGPIO_IEDGE 0x14
645 #define GRGPIO_BYPASS 0x18
646 #define GRGPIO_IMAP_BASE 0x20
647 #ifdef CONFIG_ARM
648 #else
649 #endif
650 /* LDV_COMMENT_END_PREP */
651 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "grgpio_irq_unmap" */
652 unsigned int var_grgpio_irq_unmap_8_p1;
653
654 /** STRUCT: struct type: platform_driver, struct name: grgpio_driver **/
655 /* content: static int grgpio_probe(struct platform_device *ofdev)*/
656 /* LDV_COMMENT_BEGIN_PREP */
657 #define GRGPIO_MAX_NGPIO 32
658 #define GRGPIO_DATA 0x00
659 #define GRGPIO_OUTPUT 0x04
660 #define GRGPIO_DIR 0x08
661 #define GRGPIO_IMASK 0x0c
662 #define GRGPIO_IPOL 0x10
663 #define GRGPIO_IEDGE 0x14
664 #define GRGPIO_BYPASS 0x18
665 #define GRGPIO_IMAP_BASE 0x20
666 #ifdef CONFIG_ARM
667 #else
668 #endif
669 #ifdef CONFIG_ARM
670 #endif
671 /* LDV_COMMENT_END_PREP */
672 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "grgpio_probe" */
673 struct platform_device * var_group3;
674 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "grgpio_probe" */
675 static int res_grgpio_probe_9;
676 /* content: static int grgpio_remove(struct platform_device *ofdev)*/
677 /* LDV_COMMENT_BEGIN_PREP */
678 #define GRGPIO_MAX_NGPIO 32
679 #define GRGPIO_DATA 0x00
680 #define GRGPIO_OUTPUT 0x04
681 #define GRGPIO_DIR 0x08
682 #define GRGPIO_IMASK 0x0c
683 #define GRGPIO_IPOL 0x10
684 #define GRGPIO_IEDGE 0x14
685 #define GRGPIO_BYPASS 0x18
686 #define GRGPIO_IMAP_BASE 0x20
687 #ifdef CONFIG_ARM
688 #else
689 #endif
690 #ifdef CONFIG_ARM
691 #endif
692 /* LDV_COMMENT_END_PREP */
693
694 /** CALLBACK SECTION request_irq **/
695 /* content: static irqreturn_t grgpio_irq_handler(int irq, void *dev)*/
696 /* LDV_COMMENT_BEGIN_PREP */
697 #define GRGPIO_MAX_NGPIO 32
698 #define GRGPIO_DATA 0x00
699 #define GRGPIO_OUTPUT 0x04
700 #define GRGPIO_DIR 0x08
701 #define GRGPIO_IMASK 0x0c
702 #define GRGPIO_IPOL 0x10
703 #define GRGPIO_IEDGE 0x14
704 #define GRGPIO_BYPASS 0x18
705 #define GRGPIO_IMAP_BASE 0x20
706 /* LDV_COMMENT_END_PREP */
707 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "grgpio_irq_handler" */
708 int var_grgpio_irq_handler_6_p0;
709 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "grgpio_irq_handler" */
710 void * var_grgpio_irq_handler_6_p1;
711 /* LDV_COMMENT_BEGIN_PREP */
712 #ifdef CONFIG_ARM
713 #else
714 #endif
715 #ifdef CONFIG_ARM
716 #endif
717 /* LDV_COMMENT_END_PREP */
718
719
720
721
722 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
723 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
724 /*============================= VARIABLE INITIALIZING PART =============================*/
725 LDV_IN_INTERRUPT=1;
726
727
728
729
730 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
731 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
732 /*============================= FUNCTION CALL SECTION =============================*/
733 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
734 ldv_initialize();
735
736
737
738
739 int ldv_s_grgpio_driver_platform_driver = 0;
740
741
742
743
744 while( nondet_int()
745 || !(ldv_s_grgpio_driver_platform_driver == 0)
746 ) {
747
748 switch(nondet_int()) {
749
750 case 0: {
751
752 /** STRUCT: struct type: irq_chip, struct name: grgpio_irq_chip **/
753
754
755 /* content: static void grgpio_irq_mask(struct irq_data *d)*/
756 /* LDV_COMMENT_BEGIN_PREP */
757 #define GRGPIO_MAX_NGPIO 32
758 #define GRGPIO_DATA 0x00
759 #define GRGPIO_OUTPUT 0x04
760 #define GRGPIO_DIR 0x08
761 #define GRGPIO_IMASK 0x0c
762 #define GRGPIO_IPOL 0x10
763 #define GRGPIO_IEDGE 0x14
764 #define GRGPIO_BYPASS 0x18
765 #define GRGPIO_IMAP_BASE 0x20
766 /* LDV_COMMENT_END_PREP */
767 /* LDV_COMMENT_FUNCTION_CALL Function from field "irq_mask" from driver structure with callbacks "grgpio_irq_chip" */
768 ldv_handler_precall();
769 grgpio_irq_mask( var_group1);
770 /* LDV_COMMENT_BEGIN_PREP */
771 #ifdef CONFIG_ARM
772 #else
773 #endif
774 #ifdef CONFIG_ARM
775 #endif
776 /* LDV_COMMENT_END_PREP */
777
778
779
780
781 }
782
783 break;
784 case 1: {
785
786 /** STRUCT: struct type: irq_chip, struct name: grgpio_irq_chip **/
787
788
789 /* content: static void grgpio_irq_unmask(struct irq_data *d)*/
790 /* LDV_COMMENT_BEGIN_PREP */
791 #define GRGPIO_MAX_NGPIO 32
792 #define GRGPIO_DATA 0x00
793 #define GRGPIO_OUTPUT 0x04
794 #define GRGPIO_DIR 0x08
795 #define GRGPIO_IMASK 0x0c
796 #define GRGPIO_IPOL 0x10
797 #define GRGPIO_IEDGE 0x14
798 #define GRGPIO_BYPASS 0x18
799 #define GRGPIO_IMAP_BASE 0x20
800 /* LDV_COMMENT_END_PREP */
801 /* LDV_COMMENT_FUNCTION_CALL Function from field "irq_unmask" from driver structure with callbacks "grgpio_irq_chip" */
802 ldv_handler_precall();
803 grgpio_irq_unmask( var_group1);
804 /* LDV_COMMENT_BEGIN_PREP */
805 #ifdef CONFIG_ARM
806 #else
807 #endif
808 #ifdef CONFIG_ARM
809 #endif
810 /* LDV_COMMENT_END_PREP */
811
812
813
814
815 }
816
817 break;
818 case 2: {
819
820 /** STRUCT: struct type: irq_chip, struct name: grgpio_irq_chip **/
821
822
823 /* content: static int grgpio_irq_set_type(struct irq_data *d, unsigned int type)*/
824 /* LDV_COMMENT_BEGIN_PREP */
825 #define GRGPIO_MAX_NGPIO 32
826 #define GRGPIO_DATA 0x00
827 #define GRGPIO_OUTPUT 0x04
828 #define GRGPIO_DIR 0x08
829 #define GRGPIO_IMASK 0x0c
830 #define GRGPIO_IPOL 0x10
831 #define GRGPIO_IEDGE 0x14
832 #define GRGPIO_BYPASS 0x18
833 #define GRGPIO_IMAP_BASE 0x20
834 /* LDV_COMMENT_END_PREP */
835 /* LDV_COMMENT_FUNCTION_CALL Function from field "irq_set_type" from driver structure with callbacks "grgpio_irq_chip" */
836 ldv_handler_precall();
837 grgpio_irq_set_type( var_group1, var_grgpio_irq_set_type_3_p1);
838 /* LDV_COMMENT_BEGIN_PREP */
839 #ifdef CONFIG_ARM
840 #else
841 #endif
842 #ifdef CONFIG_ARM
843 #endif
844 /* LDV_COMMENT_END_PREP */
845
846
847
848
849 }
850
851 break;
852 case 3: {
853
854 /** STRUCT: struct type: irq_domain_ops, struct name: grgpio_irq_domain_ops **/
855
856
857 /* content: static int grgpio_irq_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq)*/
858 /* LDV_COMMENT_BEGIN_PREP */
859 #define GRGPIO_MAX_NGPIO 32
860 #define GRGPIO_DATA 0x00
861 #define GRGPIO_OUTPUT 0x04
862 #define GRGPIO_DIR 0x08
863 #define GRGPIO_IMASK 0x0c
864 #define GRGPIO_IPOL 0x10
865 #define GRGPIO_IEDGE 0x14
866 #define GRGPIO_BYPASS 0x18
867 #define GRGPIO_IMAP_BASE 0x20
868 /* LDV_COMMENT_END_PREP */
869 /* LDV_COMMENT_FUNCTION_CALL Function from field "map" from driver structure with callbacks "grgpio_irq_domain_ops" */
870 ldv_handler_precall();
871 grgpio_irq_map( var_group2, var_grgpio_irq_map_7_p1, var_grgpio_irq_map_7_p2);
872 /* LDV_COMMENT_BEGIN_PREP */
873 #ifdef CONFIG_ARM
874 #endif
875 /* LDV_COMMENT_END_PREP */
876
877
878
879
880 }
881
882 break;
883 case 4: {
884
885 /** STRUCT: struct type: irq_domain_ops, struct name: grgpio_irq_domain_ops **/
886
887
888 /* content: static void grgpio_irq_unmap(struct irq_domain *d, unsigned int irq)*/
889 /* LDV_COMMENT_BEGIN_PREP */
890 #define GRGPIO_MAX_NGPIO 32
891 #define GRGPIO_DATA 0x00
892 #define GRGPIO_OUTPUT 0x04
893 #define GRGPIO_DIR 0x08
894 #define GRGPIO_IMASK 0x0c
895 #define GRGPIO_IPOL 0x10
896 #define GRGPIO_IEDGE 0x14
897 #define GRGPIO_BYPASS 0x18
898 #define GRGPIO_IMAP_BASE 0x20
899 #ifdef CONFIG_ARM
900 #else
901 #endif
902 /* LDV_COMMENT_END_PREP */
903 /* LDV_COMMENT_FUNCTION_CALL Function from field "unmap" from driver structure with callbacks "grgpio_irq_domain_ops" */
904 ldv_handler_precall();
905 grgpio_irq_unmap( var_group2, var_grgpio_irq_unmap_8_p1);
906
907
908
909
910 }
911
912 break;
913 case 5: {
914
915 /** STRUCT: struct type: platform_driver, struct name: grgpio_driver **/
916 if(ldv_s_grgpio_driver_platform_driver==0) {
917
918 /* content: static int grgpio_probe(struct platform_device *ofdev)*/
919 /* LDV_COMMENT_BEGIN_PREP */
920 #define GRGPIO_MAX_NGPIO 32
921 #define GRGPIO_DATA 0x00
922 #define GRGPIO_OUTPUT 0x04
923 #define GRGPIO_DIR 0x08
924 #define GRGPIO_IMASK 0x0c
925 #define GRGPIO_IPOL 0x10
926 #define GRGPIO_IEDGE 0x14
927 #define GRGPIO_BYPASS 0x18
928 #define GRGPIO_IMAP_BASE 0x20
929 #ifdef CONFIG_ARM
930 #else
931 #endif
932 #ifdef CONFIG_ARM
933 #endif
934 /* LDV_COMMENT_END_PREP */
935 /* LDV_COMMENT_FUNCTION_CALL Function from field "probe" from driver structure with callbacks "grgpio_driver". Standart function test for correct return result. */
936 res_grgpio_probe_9 = grgpio_probe( var_group3);
937 ldv_check_return_value(res_grgpio_probe_9);
938 ldv_check_return_value_probe(res_grgpio_probe_9);
939 if(res_grgpio_probe_9)
940 goto ldv_module_exit;
941 ldv_s_grgpio_driver_platform_driver++;
942
943 }
944
945 }
946
947 break;
948 case 6: {
949
950 /** STRUCT: struct type: platform_driver, struct name: grgpio_driver **/
951 if(ldv_s_grgpio_driver_platform_driver==1) {
952
953 /* content: static int grgpio_remove(struct platform_device *ofdev)*/
954 /* LDV_COMMENT_BEGIN_PREP */
955 #define GRGPIO_MAX_NGPIO 32
956 #define GRGPIO_DATA 0x00
957 #define GRGPIO_OUTPUT 0x04
958 #define GRGPIO_DIR 0x08
959 #define GRGPIO_IMASK 0x0c
960 #define GRGPIO_IPOL 0x10
961 #define GRGPIO_IEDGE 0x14
962 #define GRGPIO_BYPASS 0x18
963 #define GRGPIO_IMAP_BASE 0x20
964 #ifdef CONFIG_ARM
965 #else
966 #endif
967 #ifdef CONFIG_ARM
968 #endif
969 /* LDV_COMMENT_END_PREP */
970 /* LDV_COMMENT_FUNCTION_CALL Function from field "remove" from driver structure with callbacks "grgpio_driver" */
971 ldv_handler_precall();
972 grgpio_remove( var_group3);
973 ldv_s_grgpio_driver_platform_driver=0;
974
975 }
976
977 }
978
979 break;
980 case 7: {
981
982 /** CALLBACK SECTION request_irq **/
983 LDV_IN_INTERRUPT=2;
984
985 /* content: static irqreturn_t grgpio_irq_handler(int irq, void *dev)*/
986 /* LDV_COMMENT_BEGIN_PREP */
987 #define GRGPIO_MAX_NGPIO 32
988 #define GRGPIO_DATA 0x00
989 #define GRGPIO_OUTPUT 0x04
990 #define GRGPIO_DIR 0x08
991 #define GRGPIO_IMASK 0x0c
992 #define GRGPIO_IPOL 0x10
993 #define GRGPIO_IEDGE 0x14
994 #define GRGPIO_BYPASS 0x18
995 #define GRGPIO_IMAP_BASE 0x20
996 /* LDV_COMMENT_END_PREP */
997 /* LDV_COMMENT_FUNCTION_CALL */
998 ldv_handler_precall();
999 grgpio_irq_handler( var_grgpio_irq_handler_6_p0, var_grgpio_irq_handler_6_p1);
1000 /* LDV_COMMENT_BEGIN_PREP */
1001 #ifdef CONFIG_ARM
1002 #else
1003 #endif
1004 #ifdef CONFIG_ARM
1005 #endif
1006 /* LDV_COMMENT_END_PREP */
1007 LDV_IN_INTERRUPT=1;
1008
1009
1010
1011 }
1012
1013 break;
1014 default: break;
1015
1016 }
1017
1018 }
1019
1020 ldv_module_exit:
1021
1022 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
1023 ldv_final: ldv_check_final_state();
1024
1025 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
1026 return;
1027
1028 }
1029 #endif
1030
1031 /* LDV_COMMENT_END_MAIN */ 1
2 #include <linux/kernel.h>
3 bool ldv_is_err(const void *ptr);
4 bool ldv_is_err_or_null(const void *ptr);
5 void* ldv_err_ptr(long error);
6 long ldv_ptr_err(const void *ptr);
7
8 #include <linux/kernel.h>
9 #include <linux/spinlock.h>
10
11 extern void __ldv_spin_lock(spinlock_t *lock);
12 extern void __ldv_spin_unlock(spinlock_t *lock);
13 extern int __ldv_spin_trylock(spinlock_t *lock);
14 extern void __ldv_spin_unlock_wait(spinlock_t *lock);
15 extern void __ldv_spin_can_lock(spinlock_t *lock);
16 extern int __ldv_atomic_dec_and_lock(spinlock_t *lock);
17
18 extern void ldv_spin_lock_alloc_lock_of_task_struct(void);
19 extern void ldv_spin_unlock_alloc_lock_of_task_struct(void);
20 extern int ldv_spin_trylock_alloc_lock_of_task_struct(void);
21 extern void ldv_spin_unlock_wait_alloc_lock_of_task_struct(void);
22 extern int ldv_spin_is_locked_alloc_lock_of_task_struct(void);
23 extern int ldv_spin_can_lock_alloc_lock_of_task_struct(void);
24 extern int ldv_spin_is_contended_alloc_lock_of_task_struct(void);
25 extern int ldv_atomic_dec_and_lock_alloc_lock_of_task_struct(void);
26 extern void ldv_spin_lock_lock(void);
27 extern void ldv_spin_unlock_lock(void);
28 extern int ldv_spin_trylock_lock(void);
29 extern void ldv_spin_unlock_wait_lock(void);
30 extern int ldv_spin_is_locked_lock(void);
31 extern int ldv_spin_can_lock_lock(void);
32 extern int ldv_spin_is_contended_lock(void);
33 extern int ldv_atomic_dec_and_lock_lock(void);
34 extern void ldv_spin_lock_lock_of_NOT_ARG_SIGN(void);
35 extern void ldv_spin_unlock_lock_of_NOT_ARG_SIGN(void);
36 extern int ldv_spin_trylock_lock_of_NOT_ARG_SIGN(void);
37 extern void ldv_spin_unlock_wait_lock_of_NOT_ARG_SIGN(void);
38 extern int ldv_spin_is_locked_lock_of_NOT_ARG_SIGN(void);
39 extern int ldv_spin_can_lock_lock_of_NOT_ARG_SIGN(void);
40 extern int ldv_spin_is_contended_lock_of_NOT_ARG_SIGN(void);
41 extern int ldv_atomic_dec_and_lock_lock_of_NOT_ARG_SIGN(void);
42 extern void ldv_spin_lock_lock_of_bgpio_chip(void);
43 extern void ldv_spin_unlock_lock_of_bgpio_chip(void);
44 extern int ldv_spin_trylock_lock_of_bgpio_chip(void);
45 extern void ldv_spin_unlock_wait_lock_of_bgpio_chip(void);
46 extern int ldv_spin_is_locked_lock_of_bgpio_chip(void);
47 extern int ldv_spin_can_lock_lock_of_bgpio_chip(void);
48 extern int ldv_spin_is_contended_lock_of_bgpio_chip(void);
49 extern int ldv_atomic_dec_and_lock_lock_of_bgpio_chip(void);
50 extern void ldv_spin_lock_node_size_lock_of_pglist_data(void);
51 extern void ldv_spin_unlock_node_size_lock_of_pglist_data(void);
52 extern int ldv_spin_trylock_node_size_lock_of_pglist_data(void);
53 extern void ldv_spin_unlock_wait_node_size_lock_of_pglist_data(void);
54 extern int ldv_spin_is_locked_node_size_lock_of_pglist_data(void);
55 extern int ldv_spin_can_lock_node_size_lock_of_pglist_data(void);
56 extern int ldv_spin_is_contended_node_size_lock_of_pglist_data(void);
57 extern int ldv_atomic_dec_and_lock_node_size_lock_of_pglist_data(void);
58 extern void ldv_spin_lock_siglock_of_sighand_struct(void);
59 extern void ldv_spin_unlock_siglock_of_sighand_struct(void);
60 extern int ldv_spin_trylock_siglock_of_sighand_struct(void);
61 extern void ldv_spin_unlock_wait_siglock_of_sighand_struct(void);
62 extern int ldv_spin_is_locked_siglock_of_sighand_struct(void);
63 extern int ldv_spin_can_lock_siglock_of_sighand_struct(void);
64 extern int ldv_spin_is_contended_siglock_of_sighand_struct(void);
65 extern int ldv_atomic_dec_and_lock_siglock_of_sighand_struct(void);
66 #line 1 "/home/ldvuser/ref_launch/work/current--X--drivers--X--defaultlinux-4.2-rc1.tar.xz--X--39_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/1296/dscv_tempdir/dscv/ri/39_7a/drivers/gpio/gpio-grgpio.c"
67
68 /*
69 * Driver for Aeroflex Gaisler GRGPIO General Purpose I/O cores.
70 *
71 * 2013 (c) Aeroflex Gaisler AB
72 *
73 * This driver supports the GRGPIO GPIO core available in the GRLIB VHDL
74 * IP core library.
75 *
76 * Full documentation of the GRGPIO core can be found here:
77 * http://www.gaisler.com/products/grlib/grip.pdf
78 *
79 * See "Documentation/devicetree/bindings/gpio/gpio-grgpio.txt" for
80 * information on open firmware properties.
81 *
82 * This program is free software; you can redistribute it and/or modify it
83 * under the terms of the GNU General Public License as published by the
84 * Free Software Foundation; either version 2 of the License, or (at your
85 * option) any later version.
86 *
87 * Contributors: Andreas Larsson <andreas@gaisler.com>
88 */
89
90 #include <linux/kernel.h>
91 #include <linux/module.h>
92 #include <linux/init.h>
93 #include <linux/spinlock.h>
94 #include <linux/io.h>
95 #include <linux/of.h>
96 #include <linux/of_gpio.h>
97 #include <linux/of_platform.h>
98 #include <linux/gpio.h>
99 #include <linux/slab.h>
100 #include <linux/err.h>
101 #include <linux/basic_mmio_gpio.h>
102 #include <linux/interrupt.h>
103 #include <linux/irq.h>
104 #include <linux/irqdomain.h>
105
106 #define GRGPIO_MAX_NGPIO 32
107
108 #define GRGPIO_DATA 0x00
109 #define GRGPIO_OUTPUT 0x04
110 #define GRGPIO_DIR 0x08
111 #define GRGPIO_IMASK 0x0c
112 #define GRGPIO_IPOL 0x10
113 #define GRGPIO_IEDGE 0x14
114 #define GRGPIO_BYPASS 0x18
115 #define GRGPIO_IMAP_BASE 0x20
116
117 /* Structure for an irq of the core - called an underlying irq */
118 struct grgpio_uirq {
119 u8 refcnt; /* Reference counter to manage requesting/freeing of uirq */
120 u8 uirq; /* Underlying irq of the gpio driver */
121 };
122
123 /*
124 * Structure for an irq of a gpio line handed out by this driver. The index is
125 * used to map to the corresponding underlying irq.
126 */
127 struct grgpio_lirq {
128 s8 index; /* Index into struct grgpio_priv's uirqs, or -1 */
129 u8 irq; /* irq for the gpio line */
130 };
131
132 struct grgpio_priv {
133 struct bgpio_chip bgc;
134 void __iomem *regs;
135 struct device *dev;
136
137 u32 imask; /* irq mask shadow register */
138
139 /*
140 * The grgpio core can have multiple "underlying" irqs. The gpio lines
141 * can be mapped to any one or none of these underlying irqs
142 * independently of each other. This driver sets up an irq domain and
143 * hands out separate irqs to each gpio line
144 */
145 struct irq_domain *domain;
146
147 /*
148 * This array contains information on each underlying irq, each
149 * irq of the grgpio core itself.
150 */
151 struct grgpio_uirq uirqs[GRGPIO_MAX_NGPIO];
152
153 /*
154 * This array contains information for each gpio line on the irqs
155 * obtains from this driver. An index value of -1 for a certain gpio
156 * line indicates that the line has no irq. Otherwise the index connects
157 * the irq to the underlying irq by pointing into the uirqs array.
158 */
159 struct grgpio_lirq lirqs[GRGPIO_MAX_NGPIO];
160 };
161
162 static inline struct grgpio_priv *grgpio_gc_to_priv(struct gpio_chip *gc)
163 {
164 struct bgpio_chip *bgc = to_bgpio_chip(gc);
165
166 return container_of(bgc, struct grgpio_priv, bgc);
167 }
168
169 static void grgpio_set_imask(struct grgpio_priv *priv, unsigned int offset,
170 int val)
171 {
172 struct bgpio_chip *bgc = &priv->bgc;
173 unsigned long mask = bgc->pin2mask(bgc, offset);
174 unsigned long flags;
175
176 spin_lock_irqsave(&bgc->lock, flags);
177
178 if (val)
179 priv->imask |= mask;
180 else
181 priv->imask &= ~mask;
182 bgc->write_reg(priv->regs + GRGPIO_IMASK, priv->imask);
183
184 spin_unlock_irqrestore(&bgc->lock, flags);
185 }
186
187 static int grgpio_to_irq(struct gpio_chip *gc, unsigned offset)
188 {
189 struct grgpio_priv *priv = grgpio_gc_to_priv(gc);
190
191 if (offset >= gc->ngpio)
192 return -ENXIO;
193
194 if (priv->lirqs[offset].index < 0)
195 return -ENXIO;
196
197 return irq_create_mapping(priv->domain, offset);
198 }
199
200 /* -------------------- IRQ chip functions -------------------- */
201
202 static int grgpio_irq_set_type(struct irq_data *d, unsigned int type)
203 {
204 struct grgpio_priv *priv = irq_data_get_irq_chip_data(d);
205 unsigned long flags;
206 u32 mask = BIT(d->hwirq);
207 u32 ipol;
208 u32 iedge;
209 u32 pol;
210 u32 edge;
211
212 switch (type) {
213 case IRQ_TYPE_LEVEL_LOW:
214 pol = 0;
215 edge = 0;
216 break;
217 case IRQ_TYPE_LEVEL_HIGH:
218 pol = mask;
219 edge = 0;
220 break;
221 case IRQ_TYPE_EDGE_FALLING:
222 pol = 0;
223 edge = mask;
224 break;
225 case IRQ_TYPE_EDGE_RISING:
226 pol = mask;
227 edge = mask;
228 break;
229 default:
230 return -EINVAL;
231 }
232
233 spin_lock_irqsave(&priv->bgc.lock, flags);
234
235 ipol = priv->bgc.read_reg(priv->regs + GRGPIO_IPOL) & ~mask;
236 iedge = priv->bgc.read_reg(priv->regs + GRGPIO_IEDGE) & ~mask;
237
238 priv->bgc.write_reg(priv->regs + GRGPIO_IPOL, ipol | pol);
239 priv->bgc.write_reg(priv->regs + GRGPIO_IEDGE, iedge | edge);
240
241 spin_unlock_irqrestore(&priv->bgc.lock, flags);
242
243 return 0;
244 }
245
246 static void grgpio_irq_mask(struct irq_data *d)
247 {
248 struct grgpio_priv *priv = irq_data_get_irq_chip_data(d);
249 int offset = d->hwirq;
250
251 grgpio_set_imask(priv, offset, 0);
252 }
253
254 static void grgpio_irq_unmask(struct irq_data *d)
255 {
256 struct grgpio_priv *priv = irq_data_get_irq_chip_data(d);
257 int offset = d->hwirq;
258
259 grgpio_set_imask(priv, offset, 1);
260 }
261
262 static struct irq_chip grgpio_irq_chip = {
263 .name = "grgpio",
264 .irq_mask = grgpio_irq_mask,
265 .irq_unmask = grgpio_irq_unmask,
266 .irq_set_type = grgpio_irq_set_type,
267 };
268
269 static irqreturn_t grgpio_irq_handler(int irq, void *dev)
270 {
271 struct grgpio_priv *priv = dev;
272 int ngpio = priv->bgc.gc.ngpio;
273 unsigned long flags;
274 int i;
275 int match = 0;
276
277 spin_lock_irqsave(&priv->bgc.lock, flags);
278
279 /*
280 * For each gpio line, call its interrupt handler if it its underlying
281 * irq matches the current irq that is handled.
282 */
283 for (i = 0; i < ngpio; i++) {
284 struct grgpio_lirq *lirq = &priv->lirqs[i];
285
286 if (priv->imask & BIT(i) && lirq->index >= 0 &&
287 priv->uirqs[lirq->index].uirq == irq) {
288 generic_handle_irq(lirq->irq);
289 match = 1;
290 }
291 }
292
293 spin_unlock_irqrestore(&priv->bgc.lock, flags);
294
295 if (!match)
296 dev_warn(priv->dev, "No gpio line matched irq %d\n", irq);
297
298 return IRQ_HANDLED;
299 }
300
301 /*
302 * This function will be called as a consequence of the call to
303 * irq_create_mapping in grgpio_to_irq
304 */
305 static int grgpio_irq_map(struct irq_domain *d, unsigned int irq,
306 irq_hw_number_t hwirq)
307 {
308 struct grgpio_priv *priv = d->host_data;
309 struct grgpio_lirq *lirq;
310 struct grgpio_uirq *uirq;
311 unsigned long flags;
312 int offset = hwirq;
313 int ret = 0;
314
315 if (!priv)
316 return -EINVAL;
317
318 lirq = &priv->lirqs[offset];
319 if (lirq->index < 0)
320 return -EINVAL;
321
322 dev_dbg(priv->dev, "Mapping irq %d for gpio line %d\n",
323 irq, offset);
324
325 spin_lock_irqsave(&priv->bgc.lock, flags);
326
327 /* Request underlying irq if not already requested */
328 lirq->irq = irq;
329 uirq = &priv->uirqs[lirq->index];
330 if (uirq->refcnt == 0) {
331 ret = request_irq(uirq->uirq, grgpio_irq_handler, 0,
332 dev_name(priv->dev), priv);
333 if (ret) {
334 dev_err(priv->dev,
335 "Could not request underlying irq %d\n",
336 uirq->uirq);
337
338 spin_unlock_irqrestore(&priv->bgc.lock, flags);
339
340 return ret;
341 }
342 }
343 uirq->refcnt++;
344
345 spin_unlock_irqrestore(&priv->bgc.lock, flags);
346
347 /* Setup irq */
348 irq_set_chip_data(irq, priv);
349 irq_set_chip_and_handler(irq, &grgpio_irq_chip,
350 handle_simple_irq);
351 irq_clear_status_flags(irq, IRQ_NOREQUEST);
352 #ifdef CONFIG_ARM
353 set_irq_flags(irq, IRQF_VALID);
354 #else
355 irq_set_noprobe(irq);
356 #endif
357
358 return ret;
359 }
360
361 static void grgpio_irq_unmap(struct irq_domain *d, unsigned int irq)
362 {
363 struct grgpio_priv *priv = d->host_data;
364 int index;
365 struct grgpio_lirq *lirq;
366 struct grgpio_uirq *uirq;
367 unsigned long flags;
368 int ngpio = priv->bgc.gc.ngpio;
369 int i;
370
371 #ifdef CONFIG_ARM
372 set_irq_flags(irq, 0);
373 #endif
374 irq_set_chip_and_handler(irq, NULL, NULL);
375 irq_set_chip_data(irq, NULL);
376
377 spin_lock_irqsave(&priv->bgc.lock, flags);
378
379 /* Free underlying irq if last user unmapped */
380 index = -1;
381 for (i = 0; i < ngpio; i++) {
382 lirq = &priv->lirqs[i];
383 if (lirq->irq == irq) {
384 grgpio_set_imask(priv, i, 0);
385 lirq->irq = 0;
386 index = lirq->index;
387 break;
388 }
389 }
390 WARN_ON(index < 0);
391
392 if (index >= 0) {
393 uirq = &priv->uirqs[lirq->index];
394 uirq->refcnt--;
395 if (uirq->refcnt == 0)
396 free_irq(uirq->uirq, priv);
397 }
398
399 spin_unlock_irqrestore(&priv->bgc.lock, flags);
400 }
401
402 static const struct irq_domain_ops grgpio_irq_domain_ops = {
403 .map = grgpio_irq_map,
404 .unmap = grgpio_irq_unmap,
405 };
406
407 /* ------------------------------------------------------------ */
408
409 static int grgpio_probe(struct platform_device *ofdev)
410 {
411 struct device_node *np = ofdev->dev.of_node;
412 void __iomem *regs;
413 struct gpio_chip *gc;
414 struct bgpio_chip *bgc;
415 struct grgpio_priv *priv;
416 struct resource *res;
417 int err;
418 u32 prop;
419 s32 *irqmap;
420 int size;
421 int i;
422
423 priv = devm_kzalloc(&ofdev->dev, sizeof(*priv), GFP_KERNEL);
424 if (!priv)
425 return -ENOMEM;
426
427 res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
428 regs = devm_ioremap_resource(&ofdev->dev, res);
429 if (IS_ERR(regs))
430 return PTR_ERR(regs);
431
432 bgc = &priv->bgc;
433 err = bgpio_init(bgc, &ofdev->dev, 4, regs + GRGPIO_DATA,
434 regs + GRGPIO_OUTPUT, NULL, regs + GRGPIO_DIR, NULL,
435 BGPIOF_BIG_ENDIAN_BYTE_ORDER);
436 if (err) {
437 dev_err(&ofdev->dev, "bgpio_init() failed\n");
438 return err;
439 }
440
441 priv->regs = regs;
442 priv->imask = bgc->read_reg(regs + GRGPIO_IMASK);
443 priv->dev = &ofdev->dev;
444
445 gc = &bgc->gc;
446 gc->of_node = np;
447 gc->owner = THIS_MODULE;
448 gc->to_irq = grgpio_to_irq;
449 gc->label = np->full_name;
450 gc->base = -1;
451
452 err = of_property_read_u32(np, "nbits", &prop);
453 if (err || prop <= 0 || prop > GRGPIO_MAX_NGPIO) {
454 gc->ngpio = GRGPIO_MAX_NGPIO;
455 dev_dbg(&ofdev->dev,
456 "No or invalid nbits property: assume %d\n", gc->ngpio);
457 } else {
458 gc->ngpio = prop;
459 }
460
461 /*
462 * The irqmap contains the index values indicating which underlying irq,
463 * if anyone, is connected to that line
464 */
465 irqmap = (s32 *)of_get_property(np, "irqmap", &size);
466 if (irqmap) {
467 if (size < gc->ngpio) {
468 dev_err(&ofdev->dev,
469 "irqmap shorter than ngpio (%d < %d)\n",
470 size, gc->ngpio);
471 return -EINVAL;
472 }
473
474 priv->domain = irq_domain_add_linear(np, gc->ngpio,
475 &grgpio_irq_domain_ops,
476 priv);
477 if (!priv->domain) {
478 dev_err(&ofdev->dev, "Could not add irq domain\n");
479 return -EINVAL;
480 }
481
482 for (i = 0; i < gc->ngpio; i++) {
483 struct grgpio_lirq *lirq;
484 int ret;
485
486 lirq = &priv->lirqs[i];
487 lirq->index = irqmap[i];
488
489 if (lirq->index < 0)
490 continue;
491
492 ret = platform_get_irq(ofdev, lirq->index);
493 if (ret <= 0) {
494 /*
495 * Continue without irq functionality for that
496 * gpio line
497 */
498 dev_err(priv->dev,
499 "Failed to get irq for offset %d\n", i);
500 continue;
501 }
502 priv->uirqs[lirq->index].uirq = ret;
503 }
504 }
505
506 platform_set_drvdata(ofdev, priv);
507
508 err = gpiochip_add(gc);
509 if (err) {
510 dev_err(&ofdev->dev, "Could not add gpiochip\n");
511 if (priv->domain)
512 irq_domain_remove(priv->domain);
513 return err;
514 }
515
516 dev_info(&ofdev->dev, "regs=0x%p, base=%d, ngpio=%d, irqs=%s\n",
517 priv->regs, gc->base, gc->ngpio, priv->domain ? "on" : "off");
518
519 return 0;
520 }
521
522 static int grgpio_remove(struct platform_device *ofdev)
523 {
524 struct grgpio_priv *priv = platform_get_drvdata(ofdev);
525 unsigned long flags;
526 int i;
527 int ret = 0;
528
529 spin_lock_irqsave(&priv->bgc.lock, flags);
530
531 if (priv->domain) {
532 for (i = 0; i < GRGPIO_MAX_NGPIO; i++) {
533 if (priv->uirqs[i].refcnt != 0) {
534 ret = -EBUSY;
535 goto out;
536 }
537 }
538 }
539
540 gpiochip_remove(&priv->bgc.gc);
541
542 if (priv->domain)
543 irq_domain_remove(priv->domain);
544
545 out:
546 spin_unlock_irqrestore(&priv->bgc.lock, flags);
547
548 return ret;
549 }
550
551 static const struct of_device_id grgpio_match[] = {
552 {.name = "GAISLER_GPIO"},
553 {.name = "01_01a"},
554 {},
555 };
556
557 MODULE_DEVICE_TABLE(of, grgpio_match);
558
559 static struct platform_driver grgpio_driver = {
560 .driver = {
561 .name = "grgpio",
562 .of_match_table = grgpio_match,
563 },
564 .probe = grgpio_probe,
565 .remove = grgpio_remove,
566 };
567 module_platform_driver(grgpio_driver);
568
569 MODULE_AUTHOR("Aeroflex Gaisler AB.");
570 MODULE_DESCRIPTION("Driver for Aeroflex Gaisler GRGPIO");
571 MODULE_LICENSE("GPL");
572
573
574
575
576
577 /* LDV_COMMENT_BEGIN_MAIN */
578 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful
579
580 /*###########################################################################*/
581
582 /*############## Driver Environment Generator 0.2 output ####################*/
583
584 /*###########################################################################*/
585
586
587
588 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
589 void ldv_check_final_state(void);
590
591 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
592 void ldv_check_return_value(int res);
593
594 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
595 void ldv_check_return_value_probe(int res);
596
597 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
598 void ldv_initialize(void);
599
600 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
601 void ldv_handler_precall(void);
602
603 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
604 int nondet_int(void);
605
606 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
607 int LDV_IN_INTERRUPT;
608
609 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
610 void ldv_main0_sequence_infinite_withcheck_stateful(void) {
611
612
613
614 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
615 /*============================= VARIABLE DECLARATION PART =============================*/
616 /** STRUCT: struct type: irq_chip, struct name: grgpio_irq_chip **/
617 /* content: static void grgpio_irq_mask(struct irq_data *d)*/
618 /* LDV_COMMENT_BEGIN_PREP */
619 #define GRGPIO_MAX_NGPIO 32
620 #define GRGPIO_DATA 0x00
621 #define GRGPIO_OUTPUT 0x04
622 #define GRGPIO_DIR 0x08
623 #define GRGPIO_IMASK 0x0c
624 #define GRGPIO_IPOL 0x10
625 #define GRGPIO_IEDGE 0x14
626 #define GRGPIO_BYPASS 0x18
627 #define GRGPIO_IMAP_BASE 0x20
628 /* LDV_COMMENT_END_PREP */
629 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "grgpio_irq_mask" */
630 struct irq_data * var_group1;
631 /* LDV_COMMENT_BEGIN_PREP */
632 #ifdef CONFIG_ARM
633 #else
634 #endif
635 #ifdef CONFIG_ARM
636 #endif
637 /* LDV_COMMENT_END_PREP */
638 /* content: static void grgpio_irq_unmask(struct irq_data *d)*/
639 /* LDV_COMMENT_BEGIN_PREP */
640 #define GRGPIO_MAX_NGPIO 32
641 #define GRGPIO_DATA 0x00
642 #define GRGPIO_OUTPUT 0x04
643 #define GRGPIO_DIR 0x08
644 #define GRGPIO_IMASK 0x0c
645 #define GRGPIO_IPOL 0x10
646 #define GRGPIO_IEDGE 0x14
647 #define GRGPIO_BYPASS 0x18
648 #define GRGPIO_IMAP_BASE 0x20
649 /* LDV_COMMENT_END_PREP */
650 /* LDV_COMMENT_BEGIN_PREP */
651 #ifdef CONFIG_ARM
652 #else
653 #endif
654 #ifdef CONFIG_ARM
655 #endif
656 /* LDV_COMMENT_END_PREP */
657 /* content: static int grgpio_irq_set_type(struct irq_data *d, unsigned int type)*/
658 /* LDV_COMMENT_BEGIN_PREP */
659 #define GRGPIO_MAX_NGPIO 32
660 #define GRGPIO_DATA 0x00
661 #define GRGPIO_OUTPUT 0x04
662 #define GRGPIO_DIR 0x08
663 #define GRGPIO_IMASK 0x0c
664 #define GRGPIO_IPOL 0x10
665 #define GRGPIO_IEDGE 0x14
666 #define GRGPIO_BYPASS 0x18
667 #define GRGPIO_IMAP_BASE 0x20
668 /* LDV_COMMENT_END_PREP */
669 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "grgpio_irq_set_type" */
670 unsigned int var_grgpio_irq_set_type_3_p1;
671 /* LDV_COMMENT_BEGIN_PREP */
672 #ifdef CONFIG_ARM
673 #else
674 #endif
675 #ifdef CONFIG_ARM
676 #endif
677 /* LDV_COMMENT_END_PREP */
678
679 /** STRUCT: struct type: irq_domain_ops, struct name: grgpio_irq_domain_ops **/
680 /* content: static int grgpio_irq_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq)*/
681 /* LDV_COMMENT_BEGIN_PREP */
682 #define GRGPIO_MAX_NGPIO 32
683 #define GRGPIO_DATA 0x00
684 #define GRGPIO_OUTPUT 0x04
685 #define GRGPIO_DIR 0x08
686 #define GRGPIO_IMASK 0x0c
687 #define GRGPIO_IPOL 0x10
688 #define GRGPIO_IEDGE 0x14
689 #define GRGPIO_BYPASS 0x18
690 #define GRGPIO_IMAP_BASE 0x20
691 /* LDV_COMMENT_END_PREP */
692 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "grgpio_irq_map" */
693 struct irq_domain * var_group2;
694 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "grgpio_irq_map" */
695 unsigned int var_grgpio_irq_map_7_p1;
696 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "grgpio_irq_map" */
697 irq_hw_number_t var_grgpio_irq_map_7_p2;
698 /* LDV_COMMENT_BEGIN_PREP */
699 #ifdef CONFIG_ARM
700 #endif
701 /* LDV_COMMENT_END_PREP */
702 /* content: static void grgpio_irq_unmap(struct irq_domain *d, unsigned int irq)*/
703 /* LDV_COMMENT_BEGIN_PREP */
704 #define GRGPIO_MAX_NGPIO 32
705 #define GRGPIO_DATA 0x00
706 #define GRGPIO_OUTPUT 0x04
707 #define GRGPIO_DIR 0x08
708 #define GRGPIO_IMASK 0x0c
709 #define GRGPIO_IPOL 0x10
710 #define GRGPIO_IEDGE 0x14
711 #define GRGPIO_BYPASS 0x18
712 #define GRGPIO_IMAP_BASE 0x20
713 #ifdef CONFIG_ARM
714 #else
715 #endif
716 /* LDV_COMMENT_END_PREP */
717 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "grgpio_irq_unmap" */
718 unsigned int var_grgpio_irq_unmap_8_p1;
719
720 /** STRUCT: struct type: platform_driver, struct name: grgpio_driver **/
721 /* content: static int grgpio_probe(struct platform_device *ofdev)*/
722 /* LDV_COMMENT_BEGIN_PREP */
723 #define GRGPIO_MAX_NGPIO 32
724 #define GRGPIO_DATA 0x00
725 #define GRGPIO_OUTPUT 0x04
726 #define GRGPIO_DIR 0x08
727 #define GRGPIO_IMASK 0x0c
728 #define GRGPIO_IPOL 0x10
729 #define GRGPIO_IEDGE 0x14
730 #define GRGPIO_BYPASS 0x18
731 #define GRGPIO_IMAP_BASE 0x20
732 #ifdef CONFIG_ARM
733 #else
734 #endif
735 #ifdef CONFIG_ARM
736 #endif
737 /* LDV_COMMENT_END_PREP */
738 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "grgpio_probe" */
739 struct platform_device * var_group3;
740 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "grgpio_probe" */
741 static int res_grgpio_probe_9;
742 /* content: static int grgpio_remove(struct platform_device *ofdev)*/
743 /* LDV_COMMENT_BEGIN_PREP */
744 #define GRGPIO_MAX_NGPIO 32
745 #define GRGPIO_DATA 0x00
746 #define GRGPIO_OUTPUT 0x04
747 #define GRGPIO_DIR 0x08
748 #define GRGPIO_IMASK 0x0c
749 #define GRGPIO_IPOL 0x10
750 #define GRGPIO_IEDGE 0x14
751 #define GRGPIO_BYPASS 0x18
752 #define GRGPIO_IMAP_BASE 0x20
753 #ifdef CONFIG_ARM
754 #else
755 #endif
756 #ifdef CONFIG_ARM
757 #endif
758 /* LDV_COMMENT_END_PREP */
759
760 /** CALLBACK SECTION request_irq **/
761 /* content: static irqreturn_t grgpio_irq_handler(int irq, void *dev)*/
762 /* LDV_COMMENT_BEGIN_PREP */
763 #define GRGPIO_MAX_NGPIO 32
764 #define GRGPIO_DATA 0x00
765 #define GRGPIO_OUTPUT 0x04
766 #define GRGPIO_DIR 0x08
767 #define GRGPIO_IMASK 0x0c
768 #define GRGPIO_IPOL 0x10
769 #define GRGPIO_IEDGE 0x14
770 #define GRGPIO_BYPASS 0x18
771 #define GRGPIO_IMAP_BASE 0x20
772 /* LDV_COMMENT_END_PREP */
773 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "grgpio_irq_handler" */
774 int var_grgpio_irq_handler_6_p0;
775 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "grgpio_irq_handler" */
776 void * var_grgpio_irq_handler_6_p1;
777 /* LDV_COMMENT_BEGIN_PREP */
778 #ifdef CONFIG_ARM
779 #else
780 #endif
781 #ifdef CONFIG_ARM
782 #endif
783 /* LDV_COMMENT_END_PREP */
784
785
786
787
788 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
789 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
790 /*============================= VARIABLE INITIALIZING PART =============================*/
791 LDV_IN_INTERRUPT=1;
792
793
794
795
796 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
797 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
798 /*============================= FUNCTION CALL SECTION =============================*/
799 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
800 ldv_initialize();
801
802
803
804
805 int ldv_s_grgpio_driver_platform_driver = 0;
806
807
808
809
810 while( nondet_int()
811 || !(ldv_s_grgpio_driver_platform_driver == 0)
812 ) {
813
814 switch(nondet_int()) {
815
816 case 0: {
817
818 /** STRUCT: struct type: irq_chip, struct name: grgpio_irq_chip **/
819
820
821 /* content: static void grgpio_irq_mask(struct irq_data *d)*/
822 /* LDV_COMMENT_BEGIN_PREP */
823 #define GRGPIO_MAX_NGPIO 32
824 #define GRGPIO_DATA 0x00
825 #define GRGPIO_OUTPUT 0x04
826 #define GRGPIO_DIR 0x08
827 #define GRGPIO_IMASK 0x0c
828 #define GRGPIO_IPOL 0x10
829 #define GRGPIO_IEDGE 0x14
830 #define GRGPIO_BYPASS 0x18
831 #define GRGPIO_IMAP_BASE 0x20
832 /* LDV_COMMENT_END_PREP */
833 /* LDV_COMMENT_FUNCTION_CALL Function from field "irq_mask" from driver structure with callbacks "grgpio_irq_chip" */
834 ldv_handler_precall();
835 grgpio_irq_mask( var_group1);
836 /* LDV_COMMENT_BEGIN_PREP */
837 #ifdef CONFIG_ARM
838 #else
839 #endif
840 #ifdef CONFIG_ARM
841 #endif
842 /* LDV_COMMENT_END_PREP */
843
844
845
846
847 }
848
849 break;
850 case 1: {
851
852 /** STRUCT: struct type: irq_chip, struct name: grgpio_irq_chip **/
853
854
855 /* content: static void grgpio_irq_unmask(struct irq_data *d)*/
856 /* LDV_COMMENT_BEGIN_PREP */
857 #define GRGPIO_MAX_NGPIO 32
858 #define GRGPIO_DATA 0x00
859 #define GRGPIO_OUTPUT 0x04
860 #define GRGPIO_DIR 0x08
861 #define GRGPIO_IMASK 0x0c
862 #define GRGPIO_IPOL 0x10
863 #define GRGPIO_IEDGE 0x14
864 #define GRGPIO_BYPASS 0x18
865 #define GRGPIO_IMAP_BASE 0x20
866 /* LDV_COMMENT_END_PREP */
867 /* LDV_COMMENT_FUNCTION_CALL Function from field "irq_unmask" from driver structure with callbacks "grgpio_irq_chip" */
868 ldv_handler_precall();
869 grgpio_irq_unmask( var_group1);
870 /* LDV_COMMENT_BEGIN_PREP */
871 #ifdef CONFIG_ARM
872 #else
873 #endif
874 #ifdef CONFIG_ARM
875 #endif
876 /* LDV_COMMENT_END_PREP */
877
878
879
880
881 }
882
883 break;
884 case 2: {
885
886 /** STRUCT: struct type: irq_chip, struct name: grgpio_irq_chip **/
887
888
889 /* content: static int grgpio_irq_set_type(struct irq_data *d, unsigned int type)*/
890 /* LDV_COMMENT_BEGIN_PREP */
891 #define GRGPIO_MAX_NGPIO 32
892 #define GRGPIO_DATA 0x00
893 #define GRGPIO_OUTPUT 0x04
894 #define GRGPIO_DIR 0x08
895 #define GRGPIO_IMASK 0x0c
896 #define GRGPIO_IPOL 0x10
897 #define GRGPIO_IEDGE 0x14
898 #define GRGPIO_BYPASS 0x18
899 #define GRGPIO_IMAP_BASE 0x20
900 /* LDV_COMMENT_END_PREP */
901 /* LDV_COMMENT_FUNCTION_CALL Function from field "irq_set_type" from driver structure with callbacks "grgpio_irq_chip" */
902 ldv_handler_precall();
903 grgpio_irq_set_type( var_group1, var_grgpio_irq_set_type_3_p1);
904 /* LDV_COMMENT_BEGIN_PREP */
905 #ifdef CONFIG_ARM
906 #else
907 #endif
908 #ifdef CONFIG_ARM
909 #endif
910 /* LDV_COMMENT_END_PREP */
911
912
913
914
915 }
916
917 break;
918 case 3: {
919
920 /** STRUCT: struct type: irq_domain_ops, struct name: grgpio_irq_domain_ops **/
921
922
923 /* content: static int grgpio_irq_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq)*/
924 /* LDV_COMMENT_BEGIN_PREP */
925 #define GRGPIO_MAX_NGPIO 32
926 #define GRGPIO_DATA 0x00
927 #define GRGPIO_OUTPUT 0x04
928 #define GRGPIO_DIR 0x08
929 #define GRGPIO_IMASK 0x0c
930 #define GRGPIO_IPOL 0x10
931 #define GRGPIO_IEDGE 0x14
932 #define GRGPIO_BYPASS 0x18
933 #define GRGPIO_IMAP_BASE 0x20
934 /* LDV_COMMENT_END_PREP */
935 /* LDV_COMMENT_FUNCTION_CALL Function from field "map" from driver structure with callbacks "grgpio_irq_domain_ops" */
936 ldv_handler_precall();
937 grgpio_irq_map( var_group2, var_grgpio_irq_map_7_p1, var_grgpio_irq_map_7_p2);
938 /* LDV_COMMENT_BEGIN_PREP */
939 #ifdef CONFIG_ARM
940 #endif
941 /* LDV_COMMENT_END_PREP */
942
943
944
945
946 }
947
948 break;
949 case 4: {
950
951 /** STRUCT: struct type: irq_domain_ops, struct name: grgpio_irq_domain_ops **/
952
953
954 /* content: static void grgpio_irq_unmap(struct irq_domain *d, unsigned int irq)*/
955 /* LDV_COMMENT_BEGIN_PREP */
956 #define GRGPIO_MAX_NGPIO 32
957 #define GRGPIO_DATA 0x00
958 #define GRGPIO_OUTPUT 0x04
959 #define GRGPIO_DIR 0x08
960 #define GRGPIO_IMASK 0x0c
961 #define GRGPIO_IPOL 0x10
962 #define GRGPIO_IEDGE 0x14
963 #define GRGPIO_BYPASS 0x18
964 #define GRGPIO_IMAP_BASE 0x20
965 #ifdef CONFIG_ARM
966 #else
967 #endif
968 /* LDV_COMMENT_END_PREP */
969 /* LDV_COMMENT_FUNCTION_CALL Function from field "unmap" from driver structure with callbacks "grgpio_irq_domain_ops" */
970 ldv_handler_precall();
971 grgpio_irq_unmap( var_group2, var_grgpio_irq_unmap_8_p1);
972
973
974
975
976 }
977
978 break;
979 case 5: {
980
981 /** STRUCT: struct type: platform_driver, struct name: grgpio_driver **/
982 if(ldv_s_grgpio_driver_platform_driver==0) {
983
984 /* content: static int grgpio_probe(struct platform_device *ofdev)*/
985 /* LDV_COMMENT_BEGIN_PREP */
986 #define GRGPIO_MAX_NGPIO 32
987 #define GRGPIO_DATA 0x00
988 #define GRGPIO_OUTPUT 0x04
989 #define GRGPIO_DIR 0x08
990 #define GRGPIO_IMASK 0x0c
991 #define GRGPIO_IPOL 0x10
992 #define GRGPIO_IEDGE 0x14
993 #define GRGPIO_BYPASS 0x18
994 #define GRGPIO_IMAP_BASE 0x20
995 #ifdef CONFIG_ARM
996 #else
997 #endif
998 #ifdef CONFIG_ARM
999 #endif
1000 /* LDV_COMMENT_END_PREP */
1001 /* LDV_COMMENT_FUNCTION_CALL Function from field "probe" from driver structure with callbacks "grgpio_driver". Standart function test for correct return result. */
1002 res_grgpio_probe_9 = grgpio_probe( var_group3);
1003 ldv_check_return_value(res_grgpio_probe_9);
1004 ldv_check_return_value_probe(res_grgpio_probe_9);
1005 if(res_grgpio_probe_9)
1006 goto ldv_module_exit;
1007 ldv_s_grgpio_driver_platform_driver++;
1008
1009 }
1010
1011 }
1012
1013 break;
1014 case 6: {
1015
1016 /** STRUCT: struct type: platform_driver, struct name: grgpio_driver **/
1017 if(ldv_s_grgpio_driver_platform_driver==1) {
1018
1019 /* content: static int grgpio_remove(struct platform_device *ofdev)*/
1020 /* LDV_COMMENT_BEGIN_PREP */
1021 #define GRGPIO_MAX_NGPIO 32
1022 #define GRGPIO_DATA 0x00
1023 #define GRGPIO_OUTPUT 0x04
1024 #define GRGPIO_DIR 0x08
1025 #define GRGPIO_IMASK 0x0c
1026 #define GRGPIO_IPOL 0x10
1027 #define GRGPIO_IEDGE 0x14
1028 #define GRGPIO_BYPASS 0x18
1029 #define GRGPIO_IMAP_BASE 0x20
1030 #ifdef CONFIG_ARM
1031 #else
1032 #endif
1033 #ifdef CONFIG_ARM
1034 #endif
1035 /* LDV_COMMENT_END_PREP */
1036 /* LDV_COMMENT_FUNCTION_CALL Function from field "remove" from driver structure with callbacks "grgpio_driver" */
1037 ldv_handler_precall();
1038 grgpio_remove( var_group3);
1039 ldv_s_grgpio_driver_platform_driver=0;
1040
1041 }
1042
1043 }
1044
1045 break;
1046 case 7: {
1047
1048 /** CALLBACK SECTION request_irq **/
1049 LDV_IN_INTERRUPT=2;
1050
1051 /* content: static irqreturn_t grgpio_irq_handler(int irq, void *dev)*/
1052 /* LDV_COMMENT_BEGIN_PREP */
1053 #define GRGPIO_MAX_NGPIO 32
1054 #define GRGPIO_DATA 0x00
1055 #define GRGPIO_OUTPUT 0x04
1056 #define GRGPIO_DIR 0x08
1057 #define GRGPIO_IMASK 0x0c
1058 #define GRGPIO_IPOL 0x10
1059 #define GRGPIO_IEDGE 0x14
1060 #define GRGPIO_BYPASS 0x18
1061 #define GRGPIO_IMAP_BASE 0x20
1062 /* LDV_COMMENT_END_PREP */
1063 /* LDV_COMMENT_FUNCTION_CALL */
1064 ldv_handler_precall();
1065 grgpio_irq_handler( var_grgpio_irq_handler_6_p0, var_grgpio_irq_handler_6_p1);
1066 /* LDV_COMMENT_BEGIN_PREP */
1067 #ifdef CONFIG_ARM
1068 #else
1069 #endif
1070 #ifdef CONFIG_ARM
1071 #endif
1072 /* LDV_COMMENT_END_PREP */
1073 LDV_IN_INTERRUPT=1;
1074
1075
1076
1077 }
1078
1079 break;
1080 default: break;
1081
1082 }
1083
1084 }
1085
1086 ldv_module_exit:
1087
1088 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
1089 ldv_final: ldv_check_final_state();
1090
1091 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
1092 return;
1093
1094 }
1095 #endif
1096
1097 /* LDV_COMMENT_END_MAIN */
1098
1099 #line 66 "/home/ldvuser/ref_launch/work/current--X--drivers--X--defaultlinux-4.2-rc1.tar.xz--X--39_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/1296/dscv_tempdir/dscv/ri/39_7a/drivers/gpio/gpio-grgpio.o.c.prepared" 1
2
3 #include <linux/kernel.h>
4 #include <linux/spinlock.h>
5
6 #include <verifier/rcv.h>
7 #include <kernel-model/ERR.inc>
8
9 static int ldv_spin_alloc_lock_of_task_struct = 1;
10
11 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_alloc_lock_of_task_struct') Check that spin 'alloc_lock_of_task_struct' was not locked and lock it */
12 void ldv_spin_lock_alloc_lock_of_task_struct(void)
13 {
14 /* LDV_COMMENT_ASSERT Spin 'alloc_lock_of_task_struct' must be unlocked */
15 ldv_assert(ldv_spin_alloc_lock_of_task_struct == 1);
16 /* LDV_COMMENT_CHANGE_STATE Lock spin 'alloc_lock_of_task_struct' */
17 ldv_spin_alloc_lock_of_task_struct = 2;
18 }
19
20 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_alloc_lock_of_task_struct') Check that spin 'alloc_lock_of_task_struct' was locked and unlock it */
21 void ldv_spin_unlock_alloc_lock_of_task_struct(void)
22 {
23 /* LDV_COMMENT_ASSERT Spin 'alloc_lock_of_task_struct' must be locked */
24 ldv_assert(ldv_spin_alloc_lock_of_task_struct == 2);
25 /* LDV_COMMENT_CHANGE_STATE Unlock spin 'alloc_lock_of_task_struct' */
26 ldv_spin_alloc_lock_of_task_struct = 1;
27 }
28
29 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock_alloc_lock_of_task_struct') Check that spin 'alloc_lock_of_task_struct' was not locked and nondeterministically lock it. Return 0 on fails */
30 int ldv_spin_trylock_alloc_lock_of_task_struct(void)
31 {
32 int is_spin_held_by_another_thread;
33
34 /* LDV_COMMENT_ASSERT It may be an error if spin 'alloc_lock_of_task_struct' is locked at this point */
35 ldv_assert(ldv_spin_alloc_lock_of_task_struct == 1);
36
37 /* LDV_COMMENT_OTHER Construct nondetermined result */
38 is_spin_held_by_another_thread = ldv_undef_int();
39
40 /* LDV_COMMENT_ASSERT Nondeterministically lock spin 'alloc_lock_of_task_struct' */
41 if (is_spin_held_by_another_thread)
42 {
43 /* LDV_COMMENT_RETURN Spin 'alloc_lock_of_task_struct' was not locked. Finish with fail */
44 return 0;
45 }
46 else
47 {
48 /* LDV_COMMENT_CHANGE_STATE Lock spin 'alloc_lock_of_task_struct' */
49 ldv_spin_alloc_lock_of_task_struct = 2;
50 /* LDV_COMMENT_RETURN Finish with success */
51 return 1;
52 }
53 }
54
55 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_wait_alloc_lock_of_task_struct') The same process can not both lock spin 'alloc_lock_of_task_struct' and wait until it will be unlocked */
56 void ldv_spin_unlock_wait_alloc_lock_of_task_struct(void)
57 {
58 /* LDV_COMMENT_ASSERT Spin 'alloc_lock_of_task_struct' must not be locked by a current process */
59 ldv_assert(ldv_spin_alloc_lock_of_task_struct == 1);
60 }
61
62 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_locked_alloc_lock_of_task_struct') Check whether spin 'alloc_lock_of_task_struct' was locked */
63 int ldv_spin_is_locked_alloc_lock_of_task_struct(void)
64 {
65 int is_spin_held_by_another_thread;
66
67 /* LDV_COMMENT_OTHER Construct nondetermined result */
68 is_spin_held_by_another_thread = ldv_undef_int();
69
70 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'alloc_lock_of_task_struct' was locked */
71 if(ldv_spin_alloc_lock_of_task_struct == 1 && !is_spin_held_by_another_thread)
72 {
73 /* LDV_COMMENT_RETURN Spin 'alloc_lock_of_task_struct' was unlocked */
74 return 0;
75 }
76 else
77 {
78 /* LDV_COMMENT_RETURN Spin 'alloc_lock_of_task_struct' was locked */
79 return 1;
80 }
81 }
82
83 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_can_lock_alloc_lock_of_task_struct') Check whether spin 'alloc_lock_of_task_struct' was locked */
84 int ldv_spin_can_lock_alloc_lock_of_task_struct(void)
85 {
86 /* LDV_COMMENT_RETURN Inverse function for spin_is_locked() */
87 return !ldv_spin_is_locked_alloc_lock_of_task_struct();
88 }
89
90 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_contended_alloc_lock_of_task_struct') Check whether spin 'alloc_lock_of_task_struct' is contended */
91 int ldv_spin_is_contended_alloc_lock_of_task_struct(void)
92 {
93 int is_spin_contended;
94
95 /* LDV_COMMENT_OTHER Construct nondetermined result */
96 is_spin_contended = ldv_undef_int();
97
98 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'alloc_lock_of_task_struct' is contended */
99 if(is_spin_contended)
100 {
101 /* LDV_COMMENT_RETURN Spin 'alloc_lock_of_task_struct' is contended */
102 return 0;
103 }
104 else
105 {
106 /* LDV_COMMENT_RETURN Spin 'alloc_lock_of_task_struct' isn't contended */
107 return 1;
108 }
109 }
110
111 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_lock_alloc_lock_of_task_struct') Lock spin 'alloc_lock_of_task_struct' if atomic decrement result is zero */
112 int ldv_atomic_dec_and_lock_alloc_lock_of_task_struct(void)
113 {
114 int atomic_value_after_dec;
115
116 /* LDV_COMMENT_ASSERT Spin 'alloc_lock_of_task_struct' must be unlocked (since we may lock it in this function) */
117 ldv_assert(ldv_spin_alloc_lock_of_task_struct == 1);
118
119 /* LDV_COMMENT_OTHER Assign the result of atomic decrement */
120 atomic_value_after_dec = ldv_undef_int();
121
122 /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */
123 if (atomic_value_after_dec == 0)
124 {
125 /* LDV_COMMENT_CHANGE_STATE Lock spin 'alloc_lock_of_task_struct', as atomic has decremented to zero */
126 ldv_spin_alloc_lock_of_task_struct = 2;
127 /* LDV_COMMENT_RETURN Return 1 with locked spin 'alloc_lock_of_task_struct' */
128 return 1;
129 }
130
131 /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking spin 'alloc_lock_of_task_struct' */
132 return 0;
133 }
134 static int ldv_spin_lock = 1;
135
136 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_lock') Check that spin 'lock' was not locked and lock it */
137 void ldv_spin_lock_lock(void)
138 {
139 /* LDV_COMMENT_ASSERT Spin 'lock' must be unlocked */
140 ldv_assert(ldv_spin_lock == 1);
141 /* LDV_COMMENT_CHANGE_STATE Lock spin 'lock' */
142 ldv_spin_lock = 2;
143 }
144
145 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_lock') Check that spin 'lock' was locked and unlock it */
146 void ldv_spin_unlock_lock(void)
147 {
148 /* LDV_COMMENT_ASSERT Spin 'lock' must be locked */
149 ldv_assert(ldv_spin_lock == 2);
150 /* LDV_COMMENT_CHANGE_STATE Unlock spin 'lock' */
151 ldv_spin_lock = 1;
152 }
153
154 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock_lock') Check that spin 'lock' was not locked and nondeterministically lock it. Return 0 on fails */
155 int ldv_spin_trylock_lock(void)
156 {
157 int is_spin_held_by_another_thread;
158
159 /* LDV_COMMENT_ASSERT It may be an error if spin 'lock' is locked at this point */
160 ldv_assert(ldv_spin_lock == 1);
161
162 /* LDV_COMMENT_OTHER Construct nondetermined result */
163 is_spin_held_by_another_thread = ldv_undef_int();
164
165 /* LDV_COMMENT_ASSERT Nondeterministically lock spin 'lock' */
166 if (is_spin_held_by_another_thread)
167 {
168 /* LDV_COMMENT_RETURN Spin 'lock' was not locked. Finish with fail */
169 return 0;
170 }
171 else
172 {
173 /* LDV_COMMENT_CHANGE_STATE Lock spin 'lock' */
174 ldv_spin_lock = 2;
175 /* LDV_COMMENT_RETURN Finish with success */
176 return 1;
177 }
178 }
179
180 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_wait_lock') The same process can not both lock spin 'lock' and wait until it will be unlocked */
181 void ldv_spin_unlock_wait_lock(void)
182 {
183 /* LDV_COMMENT_ASSERT Spin 'lock' must not be locked by a current process */
184 ldv_assert(ldv_spin_lock == 1);
185 }
186
187 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_locked_lock') Check whether spin 'lock' was locked */
188 int ldv_spin_is_locked_lock(void)
189 {
190 int is_spin_held_by_another_thread;
191
192 /* LDV_COMMENT_OTHER Construct nondetermined result */
193 is_spin_held_by_another_thread = ldv_undef_int();
194
195 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'lock' was locked */
196 if(ldv_spin_lock == 1 && !is_spin_held_by_another_thread)
197 {
198 /* LDV_COMMENT_RETURN Spin 'lock' was unlocked */
199 return 0;
200 }
201 else
202 {
203 /* LDV_COMMENT_RETURN Spin 'lock' was locked */
204 return 1;
205 }
206 }
207
208 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_can_lock_lock') Check whether spin 'lock' was locked */
209 int ldv_spin_can_lock_lock(void)
210 {
211 /* LDV_COMMENT_RETURN Inverse function for spin_is_locked() */
212 return !ldv_spin_is_locked_lock();
213 }
214
215 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_contended_lock') Check whether spin 'lock' is contended */
216 int ldv_spin_is_contended_lock(void)
217 {
218 int is_spin_contended;
219
220 /* LDV_COMMENT_OTHER Construct nondetermined result */
221 is_spin_contended = ldv_undef_int();
222
223 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'lock' is contended */
224 if(is_spin_contended)
225 {
226 /* LDV_COMMENT_RETURN Spin 'lock' is contended */
227 return 0;
228 }
229 else
230 {
231 /* LDV_COMMENT_RETURN Spin 'lock' isn't contended */
232 return 1;
233 }
234 }
235
236 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_lock_lock') Lock spin 'lock' if atomic decrement result is zero */
237 int ldv_atomic_dec_and_lock_lock(void)
238 {
239 int atomic_value_after_dec;
240
241 /* LDV_COMMENT_ASSERT Spin 'lock' must be unlocked (since we may lock it in this function) */
242 ldv_assert(ldv_spin_lock == 1);
243
244 /* LDV_COMMENT_OTHER Assign the result of atomic decrement */
245 atomic_value_after_dec = ldv_undef_int();
246
247 /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */
248 if (atomic_value_after_dec == 0)
249 {
250 /* LDV_COMMENT_CHANGE_STATE Lock spin 'lock', as atomic has decremented to zero */
251 ldv_spin_lock = 2;
252 /* LDV_COMMENT_RETURN Return 1 with locked spin 'lock' */
253 return 1;
254 }
255
256 /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking spin 'lock' */
257 return 0;
258 }
259 static int ldv_spin_lock_of_NOT_ARG_SIGN = 1;
260
261 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_lock_of_NOT_ARG_SIGN') Check that spin 'lock_of_NOT_ARG_SIGN' was not locked and lock it */
262 void ldv_spin_lock_lock_of_NOT_ARG_SIGN(void)
263 {
264 /* LDV_COMMENT_ASSERT Spin 'lock_of_NOT_ARG_SIGN' must be unlocked */
265 ldv_assert(ldv_spin_lock_of_NOT_ARG_SIGN == 1);
266 /* LDV_COMMENT_CHANGE_STATE Lock spin 'lock_of_NOT_ARG_SIGN' */
267 ldv_spin_lock_of_NOT_ARG_SIGN = 2;
268 }
269
270 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_lock_of_NOT_ARG_SIGN') Check that spin 'lock_of_NOT_ARG_SIGN' was locked and unlock it */
271 void ldv_spin_unlock_lock_of_NOT_ARG_SIGN(void)
272 {
273 /* LDV_COMMENT_ASSERT Spin 'lock_of_NOT_ARG_SIGN' must be locked */
274 ldv_assert(ldv_spin_lock_of_NOT_ARG_SIGN == 2);
275 /* LDV_COMMENT_CHANGE_STATE Unlock spin 'lock_of_NOT_ARG_SIGN' */
276 ldv_spin_lock_of_NOT_ARG_SIGN = 1;
277 }
278
279 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock_lock_of_NOT_ARG_SIGN') Check that spin 'lock_of_NOT_ARG_SIGN' was not locked and nondeterministically lock it. Return 0 on fails */
280 int ldv_spin_trylock_lock_of_NOT_ARG_SIGN(void)
281 {
282 int is_spin_held_by_another_thread;
283
284 /* LDV_COMMENT_ASSERT It may be an error if spin 'lock_of_NOT_ARG_SIGN' is locked at this point */
285 ldv_assert(ldv_spin_lock_of_NOT_ARG_SIGN == 1);
286
287 /* LDV_COMMENT_OTHER Construct nondetermined result */
288 is_spin_held_by_another_thread = ldv_undef_int();
289
290 /* LDV_COMMENT_ASSERT Nondeterministically lock spin 'lock_of_NOT_ARG_SIGN' */
291 if (is_spin_held_by_another_thread)
292 {
293 /* LDV_COMMENT_RETURN Spin 'lock_of_NOT_ARG_SIGN' was not locked. Finish with fail */
294 return 0;
295 }
296 else
297 {
298 /* LDV_COMMENT_CHANGE_STATE Lock spin 'lock_of_NOT_ARG_SIGN' */
299 ldv_spin_lock_of_NOT_ARG_SIGN = 2;
300 /* LDV_COMMENT_RETURN Finish with success */
301 return 1;
302 }
303 }
304
305 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_wait_lock_of_NOT_ARG_SIGN') The same process can not both lock spin 'lock_of_NOT_ARG_SIGN' and wait until it will be unlocked */
306 void ldv_spin_unlock_wait_lock_of_NOT_ARG_SIGN(void)
307 {
308 /* LDV_COMMENT_ASSERT Spin 'lock_of_NOT_ARG_SIGN' must not be locked by a current process */
309 ldv_assert(ldv_spin_lock_of_NOT_ARG_SIGN == 1);
310 }
311
312 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_locked_lock_of_NOT_ARG_SIGN') Check whether spin 'lock_of_NOT_ARG_SIGN' was locked */
313 int ldv_spin_is_locked_lock_of_NOT_ARG_SIGN(void)
314 {
315 int is_spin_held_by_another_thread;
316
317 /* LDV_COMMENT_OTHER Construct nondetermined result */
318 is_spin_held_by_another_thread = ldv_undef_int();
319
320 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'lock_of_NOT_ARG_SIGN' was locked */
321 if(ldv_spin_lock_of_NOT_ARG_SIGN == 1 && !is_spin_held_by_another_thread)
322 {
323 /* LDV_COMMENT_RETURN Spin 'lock_of_NOT_ARG_SIGN' was unlocked */
324 return 0;
325 }
326 else
327 {
328 /* LDV_COMMENT_RETURN Spin 'lock_of_NOT_ARG_SIGN' was locked */
329 return 1;
330 }
331 }
332
333 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_can_lock_lock_of_NOT_ARG_SIGN') Check whether spin 'lock_of_NOT_ARG_SIGN' was locked */
334 int ldv_spin_can_lock_lock_of_NOT_ARG_SIGN(void)
335 {
336 /* LDV_COMMENT_RETURN Inverse function for spin_is_locked() */
337 return !ldv_spin_is_locked_lock_of_NOT_ARG_SIGN();
338 }
339
340 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_contended_lock_of_NOT_ARG_SIGN') Check whether spin 'lock_of_NOT_ARG_SIGN' is contended */
341 int ldv_spin_is_contended_lock_of_NOT_ARG_SIGN(void)
342 {
343 int is_spin_contended;
344
345 /* LDV_COMMENT_OTHER Construct nondetermined result */
346 is_spin_contended = ldv_undef_int();
347
348 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'lock_of_NOT_ARG_SIGN' is contended */
349 if(is_spin_contended)
350 {
351 /* LDV_COMMENT_RETURN Spin 'lock_of_NOT_ARG_SIGN' is contended */
352 return 0;
353 }
354 else
355 {
356 /* LDV_COMMENT_RETURN Spin 'lock_of_NOT_ARG_SIGN' isn't contended */
357 return 1;
358 }
359 }
360
361 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_lock_lock_of_NOT_ARG_SIGN') Lock spin 'lock_of_NOT_ARG_SIGN' if atomic decrement result is zero */
362 int ldv_atomic_dec_and_lock_lock_of_NOT_ARG_SIGN(void)
363 {
364 int atomic_value_after_dec;
365
366 /* LDV_COMMENT_ASSERT Spin 'lock_of_NOT_ARG_SIGN' must be unlocked (since we may lock it in this function) */
367 ldv_assert(ldv_spin_lock_of_NOT_ARG_SIGN == 1);
368
369 /* LDV_COMMENT_OTHER Assign the result of atomic decrement */
370 atomic_value_after_dec = ldv_undef_int();
371
372 /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */
373 if (atomic_value_after_dec == 0)
374 {
375 /* LDV_COMMENT_CHANGE_STATE Lock spin 'lock_of_NOT_ARG_SIGN', as atomic has decremented to zero */
376 ldv_spin_lock_of_NOT_ARG_SIGN = 2;
377 /* LDV_COMMENT_RETURN Return 1 with locked spin 'lock_of_NOT_ARG_SIGN' */
378 return 1;
379 }
380
381 /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking spin 'lock_of_NOT_ARG_SIGN' */
382 return 0;
383 }
384 static int ldv_spin_lock_of_bgpio_chip = 1;
385
386 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_lock_of_bgpio_chip') Check that spin 'lock_of_bgpio_chip' was not locked and lock it */
387 void ldv_spin_lock_lock_of_bgpio_chip(void)
388 {
389 /* LDV_COMMENT_ASSERT Spin 'lock_of_bgpio_chip' must be unlocked */
390 ldv_assert(ldv_spin_lock_of_bgpio_chip == 1);
391 /* LDV_COMMENT_CHANGE_STATE Lock spin 'lock_of_bgpio_chip' */
392 ldv_spin_lock_of_bgpio_chip = 2;
393 }
394
395 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_lock_of_bgpio_chip') Check that spin 'lock_of_bgpio_chip' was locked and unlock it */
396 void ldv_spin_unlock_lock_of_bgpio_chip(void)
397 {
398 /* LDV_COMMENT_ASSERT Spin 'lock_of_bgpio_chip' must be locked */
399 ldv_assert(ldv_spin_lock_of_bgpio_chip == 2);
400 /* LDV_COMMENT_CHANGE_STATE Unlock spin 'lock_of_bgpio_chip' */
401 ldv_spin_lock_of_bgpio_chip = 1;
402 }
403
404 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock_lock_of_bgpio_chip') Check that spin 'lock_of_bgpio_chip' was not locked and nondeterministically lock it. Return 0 on fails */
405 int ldv_spin_trylock_lock_of_bgpio_chip(void)
406 {
407 int is_spin_held_by_another_thread;
408
409 /* LDV_COMMENT_ASSERT It may be an error if spin 'lock_of_bgpio_chip' is locked at this point */
410 ldv_assert(ldv_spin_lock_of_bgpio_chip == 1);
411
412 /* LDV_COMMENT_OTHER Construct nondetermined result */
413 is_spin_held_by_another_thread = ldv_undef_int();
414
415 /* LDV_COMMENT_ASSERT Nondeterministically lock spin 'lock_of_bgpio_chip' */
416 if (is_spin_held_by_another_thread)
417 {
418 /* LDV_COMMENT_RETURN Spin 'lock_of_bgpio_chip' was not locked. Finish with fail */
419 return 0;
420 }
421 else
422 {
423 /* LDV_COMMENT_CHANGE_STATE Lock spin 'lock_of_bgpio_chip' */
424 ldv_spin_lock_of_bgpio_chip = 2;
425 /* LDV_COMMENT_RETURN Finish with success */
426 return 1;
427 }
428 }
429
430 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_wait_lock_of_bgpio_chip') The same process can not both lock spin 'lock_of_bgpio_chip' and wait until it will be unlocked */
431 void ldv_spin_unlock_wait_lock_of_bgpio_chip(void)
432 {
433 /* LDV_COMMENT_ASSERT Spin 'lock_of_bgpio_chip' must not be locked by a current process */
434 ldv_assert(ldv_spin_lock_of_bgpio_chip == 1);
435 }
436
437 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_locked_lock_of_bgpio_chip') Check whether spin 'lock_of_bgpio_chip' was locked */
438 int ldv_spin_is_locked_lock_of_bgpio_chip(void)
439 {
440 int is_spin_held_by_another_thread;
441
442 /* LDV_COMMENT_OTHER Construct nondetermined result */
443 is_spin_held_by_another_thread = ldv_undef_int();
444
445 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'lock_of_bgpio_chip' was locked */
446 if(ldv_spin_lock_of_bgpio_chip == 1 && !is_spin_held_by_another_thread)
447 {
448 /* LDV_COMMENT_RETURN Spin 'lock_of_bgpio_chip' was unlocked */
449 return 0;
450 }
451 else
452 {
453 /* LDV_COMMENT_RETURN Spin 'lock_of_bgpio_chip' was locked */
454 return 1;
455 }
456 }
457
458 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_can_lock_lock_of_bgpio_chip') Check whether spin 'lock_of_bgpio_chip' was locked */
459 int ldv_spin_can_lock_lock_of_bgpio_chip(void)
460 {
461 /* LDV_COMMENT_RETURN Inverse function for spin_is_locked() */
462 return !ldv_spin_is_locked_lock_of_bgpio_chip();
463 }
464
465 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_contended_lock_of_bgpio_chip') Check whether spin 'lock_of_bgpio_chip' is contended */
466 int ldv_spin_is_contended_lock_of_bgpio_chip(void)
467 {
468 int is_spin_contended;
469
470 /* LDV_COMMENT_OTHER Construct nondetermined result */
471 is_spin_contended = ldv_undef_int();
472
473 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'lock_of_bgpio_chip' is contended */
474 if(is_spin_contended)
475 {
476 /* LDV_COMMENT_RETURN Spin 'lock_of_bgpio_chip' is contended */
477 return 0;
478 }
479 else
480 {
481 /* LDV_COMMENT_RETURN Spin 'lock_of_bgpio_chip' isn't contended */
482 return 1;
483 }
484 }
485
486 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_lock_lock_of_bgpio_chip') Lock spin 'lock_of_bgpio_chip' if atomic decrement result is zero */
487 int ldv_atomic_dec_and_lock_lock_of_bgpio_chip(void)
488 {
489 int atomic_value_after_dec;
490
491 /* LDV_COMMENT_ASSERT Spin 'lock_of_bgpio_chip' must be unlocked (since we may lock it in this function) */
492 ldv_assert(ldv_spin_lock_of_bgpio_chip == 1);
493
494 /* LDV_COMMENT_OTHER Assign the result of atomic decrement */
495 atomic_value_after_dec = ldv_undef_int();
496
497 /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */
498 if (atomic_value_after_dec == 0)
499 {
500 /* LDV_COMMENT_CHANGE_STATE Lock spin 'lock_of_bgpio_chip', as atomic has decremented to zero */
501 ldv_spin_lock_of_bgpio_chip = 2;
502 /* LDV_COMMENT_RETURN Return 1 with locked spin 'lock_of_bgpio_chip' */
503 return 1;
504 }
505
506 /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking spin 'lock_of_bgpio_chip' */
507 return 0;
508 }
509 static int ldv_spin_node_size_lock_of_pglist_data = 1;
510
511 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_node_size_lock_of_pglist_data') Check that spin 'node_size_lock_of_pglist_data' was not locked and lock it */
512 void ldv_spin_lock_node_size_lock_of_pglist_data(void)
513 {
514 /* LDV_COMMENT_ASSERT Spin 'node_size_lock_of_pglist_data' must be unlocked */
515 ldv_assert(ldv_spin_node_size_lock_of_pglist_data == 1);
516 /* LDV_COMMENT_CHANGE_STATE Lock spin 'node_size_lock_of_pglist_data' */
517 ldv_spin_node_size_lock_of_pglist_data = 2;
518 }
519
520 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_node_size_lock_of_pglist_data') Check that spin 'node_size_lock_of_pglist_data' was locked and unlock it */
521 void ldv_spin_unlock_node_size_lock_of_pglist_data(void)
522 {
523 /* LDV_COMMENT_ASSERT Spin 'node_size_lock_of_pglist_data' must be locked */
524 ldv_assert(ldv_spin_node_size_lock_of_pglist_data == 2);
525 /* LDV_COMMENT_CHANGE_STATE Unlock spin 'node_size_lock_of_pglist_data' */
526 ldv_spin_node_size_lock_of_pglist_data = 1;
527 }
528
529 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock_node_size_lock_of_pglist_data') Check that spin 'node_size_lock_of_pglist_data' was not locked and nondeterministically lock it. Return 0 on fails */
530 int ldv_spin_trylock_node_size_lock_of_pglist_data(void)
531 {
532 int is_spin_held_by_another_thread;
533
534 /* LDV_COMMENT_ASSERT It may be an error if spin 'node_size_lock_of_pglist_data' is locked at this point */
535 ldv_assert(ldv_spin_node_size_lock_of_pglist_data == 1);
536
537 /* LDV_COMMENT_OTHER Construct nondetermined result */
538 is_spin_held_by_another_thread = ldv_undef_int();
539
540 /* LDV_COMMENT_ASSERT Nondeterministically lock spin 'node_size_lock_of_pglist_data' */
541 if (is_spin_held_by_another_thread)
542 {
543 /* LDV_COMMENT_RETURN Spin 'node_size_lock_of_pglist_data' was not locked. Finish with fail */
544 return 0;
545 }
546 else
547 {
548 /* LDV_COMMENT_CHANGE_STATE Lock spin 'node_size_lock_of_pglist_data' */
549 ldv_spin_node_size_lock_of_pglist_data = 2;
550 /* LDV_COMMENT_RETURN Finish with success */
551 return 1;
552 }
553 }
554
555 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_wait_node_size_lock_of_pglist_data') The same process can not both lock spin 'node_size_lock_of_pglist_data' and wait until it will be unlocked */
556 void ldv_spin_unlock_wait_node_size_lock_of_pglist_data(void)
557 {
558 /* LDV_COMMENT_ASSERT Spin 'node_size_lock_of_pglist_data' must not be locked by a current process */
559 ldv_assert(ldv_spin_node_size_lock_of_pglist_data == 1);
560 }
561
562 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_locked_node_size_lock_of_pglist_data') Check whether spin 'node_size_lock_of_pglist_data' was locked */
563 int ldv_spin_is_locked_node_size_lock_of_pglist_data(void)
564 {
565 int is_spin_held_by_another_thread;
566
567 /* LDV_COMMENT_OTHER Construct nondetermined result */
568 is_spin_held_by_another_thread = ldv_undef_int();
569
570 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'node_size_lock_of_pglist_data' was locked */
571 if(ldv_spin_node_size_lock_of_pglist_data == 1 && !is_spin_held_by_another_thread)
572 {
573 /* LDV_COMMENT_RETURN Spin 'node_size_lock_of_pglist_data' was unlocked */
574 return 0;
575 }
576 else
577 {
578 /* LDV_COMMENT_RETURN Spin 'node_size_lock_of_pglist_data' was locked */
579 return 1;
580 }
581 }
582
583 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_can_lock_node_size_lock_of_pglist_data') Check whether spin 'node_size_lock_of_pglist_data' was locked */
584 int ldv_spin_can_lock_node_size_lock_of_pglist_data(void)
585 {
586 /* LDV_COMMENT_RETURN Inverse function for spin_is_locked() */
587 return !ldv_spin_is_locked_node_size_lock_of_pglist_data();
588 }
589
590 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_contended_node_size_lock_of_pglist_data') Check whether spin 'node_size_lock_of_pglist_data' is contended */
591 int ldv_spin_is_contended_node_size_lock_of_pglist_data(void)
592 {
593 int is_spin_contended;
594
595 /* LDV_COMMENT_OTHER Construct nondetermined result */
596 is_spin_contended = ldv_undef_int();
597
598 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'node_size_lock_of_pglist_data' is contended */
599 if(is_spin_contended)
600 {
601 /* LDV_COMMENT_RETURN Spin 'node_size_lock_of_pglist_data' is contended */
602 return 0;
603 }
604 else
605 {
606 /* LDV_COMMENT_RETURN Spin 'node_size_lock_of_pglist_data' isn't contended */
607 return 1;
608 }
609 }
610
611 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_lock_node_size_lock_of_pglist_data') Lock spin 'node_size_lock_of_pglist_data' if atomic decrement result is zero */
612 int ldv_atomic_dec_and_lock_node_size_lock_of_pglist_data(void)
613 {
614 int atomic_value_after_dec;
615
616 /* LDV_COMMENT_ASSERT Spin 'node_size_lock_of_pglist_data' must be unlocked (since we may lock it in this function) */
617 ldv_assert(ldv_spin_node_size_lock_of_pglist_data == 1);
618
619 /* LDV_COMMENT_OTHER Assign the result of atomic decrement */
620 atomic_value_after_dec = ldv_undef_int();
621
622 /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */
623 if (atomic_value_after_dec == 0)
624 {
625 /* LDV_COMMENT_CHANGE_STATE Lock spin 'node_size_lock_of_pglist_data', as atomic has decremented to zero */
626 ldv_spin_node_size_lock_of_pglist_data = 2;
627 /* LDV_COMMENT_RETURN Return 1 with locked spin 'node_size_lock_of_pglist_data' */
628 return 1;
629 }
630
631 /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking spin 'node_size_lock_of_pglist_data' */
632 return 0;
633 }
634 static int ldv_spin_siglock_of_sighand_struct = 1;
635
636 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_siglock_of_sighand_struct') Check that spin 'siglock_of_sighand_struct' was not locked and lock it */
637 void ldv_spin_lock_siglock_of_sighand_struct(void)
638 {
639 /* LDV_COMMENT_ASSERT Spin 'siglock_of_sighand_struct' must be unlocked */
640 ldv_assert(ldv_spin_siglock_of_sighand_struct == 1);
641 /* LDV_COMMENT_CHANGE_STATE Lock spin 'siglock_of_sighand_struct' */
642 ldv_spin_siglock_of_sighand_struct = 2;
643 }
644
645 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_siglock_of_sighand_struct') Check that spin 'siglock_of_sighand_struct' was locked and unlock it */
646 void ldv_spin_unlock_siglock_of_sighand_struct(void)
647 {
648 /* LDV_COMMENT_ASSERT Spin 'siglock_of_sighand_struct' must be locked */
649 ldv_assert(ldv_spin_siglock_of_sighand_struct == 2);
650 /* LDV_COMMENT_CHANGE_STATE Unlock spin 'siglock_of_sighand_struct' */
651 ldv_spin_siglock_of_sighand_struct = 1;
652 }
653
654 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock_siglock_of_sighand_struct') Check that spin 'siglock_of_sighand_struct' was not locked and nondeterministically lock it. Return 0 on fails */
655 int ldv_spin_trylock_siglock_of_sighand_struct(void)
656 {
657 int is_spin_held_by_another_thread;
658
659 /* LDV_COMMENT_ASSERT It may be an error if spin 'siglock_of_sighand_struct' is locked at this point */
660 ldv_assert(ldv_spin_siglock_of_sighand_struct == 1);
661
662 /* LDV_COMMENT_OTHER Construct nondetermined result */
663 is_spin_held_by_another_thread = ldv_undef_int();
664
665 /* LDV_COMMENT_ASSERT Nondeterministically lock spin 'siglock_of_sighand_struct' */
666 if (is_spin_held_by_another_thread)
667 {
668 /* LDV_COMMENT_RETURN Spin 'siglock_of_sighand_struct' was not locked. Finish with fail */
669 return 0;
670 }
671 else
672 {
673 /* LDV_COMMENT_CHANGE_STATE Lock spin 'siglock_of_sighand_struct' */
674 ldv_spin_siglock_of_sighand_struct = 2;
675 /* LDV_COMMENT_RETURN Finish with success */
676 return 1;
677 }
678 }
679
680 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_wait_siglock_of_sighand_struct') The same process can not both lock spin 'siglock_of_sighand_struct' and wait until it will be unlocked */
681 void ldv_spin_unlock_wait_siglock_of_sighand_struct(void)
682 {
683 /* LDV_COMMENT_ASSERT Spin 'siglock_of_sighand_struct' must not be locked by a current process */
684 ldv_assert(ldv_spin_siglock_of_sighand_struct == 1);
685 }
686
687 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_locked_siglock_of_sighand_struct') Check whether spin 'siglock_of_sighand_struct' was locked */
688 int ldv_spin_is_locked_siglock_of_sighand_struct(void)
689 {
690 int is_spin_held_by_another_thread;
691
692 /* LDV_COMMENT_OTHER Construct nondetermined result */
693 is_spin_held_by_another_thread = ldv_undef_int();
694
695 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'siglock_of_sighand_struct' was locked */
696 if(ldv_spin_siglock_of_sighand_struct == 1 && !is_spin_held_by_another_thread)
697 {
698 /* LDV_COMMENT_RETURN Spin 'siglock_of_sighand_struct' was unlocked */
699 return 0;
700 }
701 else
702 {
703 /* LDV_COMMENT_RETURN Spin 'siglock_of_sighand_struct' was locked */
704 return 1;
705 }
706 }
707
708 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_can_lock_siglock_of_sighand_struct') Check whether spin 'siglock_of_sighand_struct' was locked */
709 int ldv_spin_can_lock_siglock_of_sighand_struct(void)
710 {
711 /* LDV_COMMENT_RETURN Inverse function for spin_is_locked() */
712 return !ldv_spin_is_locked_siglock_of_sighand_struct();
713 }
714
715 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_contended_siglock_of_sighand_struct') Check whether spin 'siglock_of_sighand_struct' is contended */
716 int ldv_spin_is_contended_siglock_of_sighand_struct(void)
717 {
718 int is_spin_contended;
719
720 /* LDV_COMMENT_OTHER Construct nondetermined result */
721 is_spin_contended = ldv_undef_int();
722
723 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'siglock_of_sighand_struct' is contended */
724 if(is_spin_contended)
725 {
726 /* LDV_COMMENT_RETURN Spin 'siglock_of_sighand_struct' is contended */
727 return 0;
728 }
729 else
730 {
731 /* LDV_COMMENT_RETURN Spin 'siglock_of_sighand_struct' isn't contended */
732 return 1;
733 }
734 }
735
736 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_lock_siglock_of_sighand_struct') Lock spin 'siglock_of_sighand_struct' if atomic decrement result is zero */
737 int ldv_atomic_dec_and_lock_siglock_of_sighand_struct(void)
738 {
739 int atomic_value_after_dec;
740
741 /* LDV_COMMENT_ASSERT Spin 'siglock_of_sighand_struct' must be unlocked (since we may lock it in this function) */
742 ldv_assert(ldv_spin_siglock_of_sighand_struct == 1);
743
744 /* LDV_COMMENT_OTHER Assign the result of atomic decrement */
745 atomic_value_after_dec = ldv_undef_int();
746
747 /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */
748 if (atomic_value_after_dec == 0)
749 {
750 /* LDV_COMMENT_CHANGE_STATE Lock spin 'siglock_of_sighand_struct', as atomic has decremented to zero */
751 ldv_spin_siglock_of_sighand_struct = 2;
752 /* LDV_COMMENT_RETURN Return 1 with locked spin 'siglock_of_sighand_struct' */
753 return 1;
754 }
755
756 /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking spin 'siglock_of_sighand_struct' */
757 return 0;
758 }
759
760 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_check_final_state') Check that all spins are unlocked at the end */
761 void ldv_check_final_state(void)
762 {
763 /* LDV_COMMENT_ASSERT Spin 'alloc_lock_of_task_struct' must be unlocked at the end */
764 ldv_assert(ldv_spin_alloc_lock_of_task_struct == 1);
765 /* LDV_COMMENT_ASSERT Spin 'lock' must be unlocked at the end */
766 ldv_assert(ldv_spin_lock == 1);
767 /* LDV_COMMENT_ASSERT Spin 'lock_of_NOT_ARG_SIGN' must be unlocked at the end */
768 ldv_assert(ldv_spin_lock_of_NOT_ARG_SIGN == 1);
769 /* LDV_COMMENT_ASSERT Spin 'lock_of_bgpio_chip' must be unlocked at the end */
770 ldv_assert(ldv_spin_lock_of_bgpio_chip == 1);
771 /* LDV_COMMENT_ASSERT Spin 'node_size_lock_of_pglist_data' must be unlocked at the end */
772 ldv_assert(ldv_spin_node_size_lock_of_pglist_data == 1);
773 /* LDV_COMMENT_ASSERT Spin 'siglock_of_sighand_struct' must be unlocked at the end */
774 ldv_assert(ldv_spin_siglock_of_sighand_struct == 1);
775 } 1 #ifndef _LDV_RCV_H_
2 #define _LDV_RCV_H_
3
4 /* If expr evaluates to zero, ldv_assert() causes a program to reach the error
5 label like the standard assert(). */
6 #define ldv_assert(expr) ((expr) ? 0 : ldv_error())
7
8 /* The error label wrapper. It is used because of some static verifiers (like
9 BLAST) don't accept multiple error labels through a program. */
10 static inline void ldv_error(void)
11 {
12 LDV_ERROR: goto LDV_ERROR;
13 }
14
15 /* If expr evaluates to zero, ldv_assume() causes an infinite loop that is
16 avoided by verifiers. */
17 #define ldv_assume(expr) ((expr) ? 0 : ldv_stop())
18
19 /* Infinite loop, that causes verifiers to skip such paths. */
20 static inline void ldv_stop(void) {
21 LDV_STOP: goto LDV_STOP;
22 }
23
24 /* Special nondeterministic functions. */
25 int ldv_undef_int(void);
26 void *ldv_undef_ptr(void);
27 unsigned long ldv_undef_ulong(void);
28 long ldv_undef_long(void);
29 /* Return nondeterministic negative integer number. */
30 static inline int ldv_undef_int_negative(void)
31 {
32 int ret = ldv_undef_int();
33
34 ldv_assume(ret < 0);
35
36 return ret;
37 }
38 /* Return nondeterministic nonpositive integer number. */
39 static inline int ldv_undef_int_nonpositive(void)
40 {
41 int ret = ldv_undef_int();
42
43 ldv_assume(ret <= 0);
44
45 return ret;
46 }
47
48 /* Add explicit model for __builin_expect GCC function. Without the model a
49 return value will be treated as nondetermined by verifiers. */
50 static inline long __builtin_expect(long exp, long c)
51 {
52 return exp;
53 }
54
55 /* This function causes the program to exit abnormally. GCC implements this
56 function by using a target-dependent mechanism (such as intentionally executing
57 an illegal instruction) or by calling abort. The mechanism used may vary from
58 release to release so you should not rely on any particular implementation.
59 http://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html */
60 static inline void __builtin_trap(void)
61 {
62 ldv_assert(0);
63 }
64
65 /* The constant is for simulating an error of ldv_undef_ptr() function. */
66 #define LDV_PTR_MAX 2012
67
68 #endif /* _LDV_RCV_H_ */ 1 #ifndef _LINUX_IRQ_H
2 #define _LINUX_IRQ_H
3
4 /*
5 * Please do not include this file in generic code. There is currently
6 * no requirement for any architecture to implement anything held
7 * within this file.
8 *
9 * Thanks. --rmk
10 */
11
12 #include <linux/smp.h>
13 #include <linux/linkage.h>
14 #include <linux/cache.h>
15 #include <linux/spinlock.h>
16 #include <linux/cpumask.h>
17 #include <linux/gfp.h>
18 #include <linux/irqhandler.h>
19 #include <linux/irqreturn.h>
20 #include <linux/irqnr.h>
21 #include <linux/errno.h>
22 #include <linux/topology.h>
23 #include <linux/wait.h>
24 #include <linux/io.h>
25
26 #include <asm/irq.h>
27 #include <asm/ptrace.h>
28 #include <asm/irq_regs.h>
29
30 struct seq_file;
31 struct module;
32 struct msi_msg;
33 enum irqchip_irq_state;
34
35 /*
36 * IRQ line status.
37 *
38 * Bits 0-7 are the same as the IRQF_* bits in linux/interrupt.h
39 *
40 * IRQ_TYPE_NONE - default, unspecified type
41 * IRQ_TYPE_EDGE_RISING - rising edge triggered
42 * IRQ_TYPE_EDGE_FALLING - falling edge triggered
43 * IRQ_TYPE_EDGE_BOTH - rising and falling edge triggered
44 * IRQ_TYPE_LEVEL_HIGH - high level triggered
45 * IRQ_TYPE_LEVEL_LOW - low level triggered
46 * IRQ_TYPE_LEVEL_MASK - Mask to filter out the level bits
47 * IRQ_TYPE_SENSE_MASK - Mask for all the above bits
48 * IRQ_TYPE_DEFAULT - For use by some PICs to ask irq_set_type
49 * to setup the HW to a sane default (used
50 * by irqdomain map() callbacks to synchronize
51 * the HW state and SW flags for a newly
52 * allocated descriptor).
53 *
54 * IRQ_TYPE_PROBE - Special flag for probing in progress
55 *
56 * Bits which can be modified via irq_set/clear/modify_status_flags()
57 * IRQ_LEVEL - Interrupt is level type. Will be also
58 * updated in the code when the above trigger
59 * bits are modified via irq_set_irq_type()
60 * IRQ_PER_CPU - Mark an interrupt PER_CPU. Will protect
61 * it from affinity setting
62 * IRQ_NOPROBE - Interrupt cannot be probed by autoprobing
63 * IRQ_NOREQUEST - Interrupt cannot be requested via
64 * request_irq()
65 * IRQ_NOTHREAD - Interrupt cannot be threaded
66 * IRQ_NOAUTOEN - Interrupt is not automatically enabled in
67 * request/setup_irq()
68 * IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set)
69 * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context
70 * IRQ_NESTED_TRHEAD - Interrupt nests into another thread
71 * IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable
72 * IRQ_IS_POLLED - Always polled by another interrupt. Exclude
73 * it from the spurious interrupt detection
74 * mechanism and from core side polling.
75 */
76 enum {
77 IRQ_TYPE_NONE = 0x00000000,
78 IRQ_TYPE_EDGE_RISING = 0x00000001,
79 IRQ_TYPE_EDGE_FALLING = 0x00000002,
80 IRQ_TYPE_EDGE_BOTH = (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING),
81 IRQ_TYPE_LEVEL_HIGH = 0x00000004,
82 IRQ_TYPE_LEVEL_LOW = 0x00000008,
83 IRQ_TYPE_LEVEL_MASK = (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH),
84 IRQ_TYPE_SENSE_MASK = 0x0000000f,
85 IRQ_TYPE_DEFAULT = IRQ_TYPE_SENSE_MASK,
86
87 IRQ_TYPE_PROBE = 0x00000010,
88
89 IRQ_LEVEL = (1 << 8),
90 IRQ_PER_CPU = (1 << 9),
91 IRQ_NOPROBE = (1 << 10),
92 IRQ_NOREQUEST = (1 << 11),
93 IRQ_NOAUTOEN = (1 << 12),
94 IRQ_NO_BALANCING = (1 << 13),
95 IRQ_MOVE_PCNTXT = (1 << 14),
96 IRQ_NESTED_THREAD = (1 << 15),
97 IRQ_NOTHREAD = (1 << 16),
98 IRQ_PER_CPU_DEVID = (1 << 17),
99 IRQ_IS_POLLED = (1 << 18),
100 };
101
102 #define IRQF_MODIFY_MASK \
103 (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
104 IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
105 IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
106 IRQ_IS_POLLED)
107
108 #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
109
110 /*
111 * Return value for chip->irq_set_affinity()
112 *
113 * IRQ_SET_MASK_OK - OK, core updates irq_data.affinity
114 * IRQ_SET_MASK_NOCPY - OK, chip did update irq_data.affinity
115 * IRQ_SET_MASK_OK_DONE - Same as IRQ_SET_MASK_OK for core. Special code to
116 * support stacked irqchips, which indicates skipping
117 * all descendent irqchips.
118 */
119 enum {
120 IRQ_SET_MASK_OK = 0,
121 IRQ_SET_MASK_OK_NOCOPY,
122 IRQ_SET_MASK_OK_DONE,
123 };
124
125 struct msi_desc;
126 struct irq_domain;
127
128 /**
129 * struct irq_common_data - per irq data shared by all irqchips
130 * @state_use_accessors: status information for irq chip functions.
131 * Use accessor functions to deal with it
132 */
133 struct irq_common_data {
134 unsigned int state_use_accessors;
135 };
136
137 /**
138 * struct irq_data - per irq chip data passed down to chip functions
139 * @mask: precomputed bitmask for accessing the chip registers
140 * @irq: interrupt number
141 * @hwirq: hardware interrupt number, local to the interrupt domain
142 * @node: node index useful for balancing
143 * @common: point to data shared by all irqchips
144 * @chip: low level interrupt hardware access
145 * @domain: Interrupt translation domain; responsible for mapping
146 * between hwirq number and linux irq number.
147 * @parent_data: pointer to parent struct irq_data to support hierarchy
148 * irq_domain
149 * @handler_data: per-IRQ data for the irq_chip methods
150 * @chip_data: platform-specific per-chip private data for the chip
151 * methods, to allow shared chip implementations
152 * @msi_desc: MSI descriptor
153 * @affinity: IRQ affinity on SMP
154 *
155 * The fields here need to overlay the ones in irq_desc until we
156 * cleaned up the direct references and switched everything over to
157 * irq_data.
158 */
159 struct irq_data {
160 u32 mask;
161 unsigned int irq;
162 unsigned long hwirq;
163 unsigned int node;
164 struct irq_common_data *common;
165 struct irq_chip *chip;
166 struct irq_domain *domain;
167 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
168 struct irq_data *parent_data;
169 #endif
170 void *handler_data;
171 void *chip_data;
172 struct msi_desc *msi_desc;
173 cpumask_var_t affinity;
174 };
175
176 /*
177 * Bit masks for irq_common_data.state_use_accessors
178 *
179 * IRQD_TRIGGER_MASK - Mask for the trigger type bits
180 * IRQD_SETAFFINITY_PENDING - Affinity setting is pending
181 * IRQD_NO_BALANCING - Balancing disabled for this IRQ
182 * IRQD_PER_CPU - Interrupt is per cpu
183 * IRQD_AFFINITY_SET - Interrupt affinity was set
184 * IRQD_LEVEL - Interrupt is level triggered
185 * IRQD_WAKEUP_STATE - Interrupt is configured for wakeup
186 * from suspend
187 * IRDQ_MOVE_PCNTXT - Interrupt can be moved in process
188 * context
189 * IRQD_IRQ_DISABLED - Disabled state of the interrupt
190 * IRQD_IRQ_MASKED - Masked state of the interrupt
191 * IRQD_IRQ_INPROGRESS - In progress state of the interrupt
192 * IRQD_WAKEUP_ARMED - Wakeup mode armed
193 */
194 enum {
195 IRQD_TRIGGER_MASK = 0xf,
196 IRQD_SETAFFINITY_PENDING = (1 << 8),
197 IRQD_NO_BALANCING = (1 << 10),
198 IRQD_PER_CPU = (1 << 11),
199 IRQD_AFFINITY_SET = (1 << 12),
200 IRQD_LEVEL = (1 << 13),
201 IRQD_WAKEUP_STATE = (1 << 14),
202 IRQD_MOVE_PCNTXT = (1 << 15),
203 IRQD_IRQ_DISABLED = (1 << 16),
204 IRQD_IRQ_MASKED = (1 << 17),
205 IRQD_IRQ_INPROGRESS = (1 << 18),
206 IRQD_WAKEUP_ARMED = (1 << 19),
207 };
208
209 #define __irqd_to_state(d) ((d)->common->state_use_accessors)
210
211 static inline bool irqd_is_setaffinity_pending(struct irq_data *d)
212 {
213 return __irqd_to_state(d) & IRQD_SETAFFINITY_PENDING;
214 }
215
216 static inline bool irqd_is_per_cpu(struct irq_data *d)
217 {
218 return __irqd_to_state(d) & IRQD_PER_CPU;
219 }
220
221 static inline bool irqd_can_balance(struct irq_data *d)
222 {
223 return !(__irqd_to_state(d) & (IRQD_PER_CPU | IRQD_NO_BALANCING));
224 }
225
226 static inline bool irqd_affinity_was_set(struct irq_data *d)
227 {
228 return __irqd_to_state(d) & IRQD_AFFINITY_SET;
229 }
230
231 static inline void irqd_mark_affinity_was_set(struct irq_data *d)
232 {
233 __irqd_to_state(d) |= IRQD_AFFINITY_SET;
234 }
235
236 static inline u32 irqd_get_trigger_type(struct irq_data *d)
237 {
238 return __irqd_to_state(d) & IRQD_TRIGGER_MASK;
239 }
240
241 /*
242 * Must only be called inside irq_chip.irq_set_type() functions.
243 */
244 static inline void irqd_set_trigger_type(struct irq_data *d, u32 type)
245 {
246 __irqd_to_state(d) &= ~IRQD_TRIGGER_MASK;
247 __irqd_to_state(d) |= type & IRQD_TRIGGER_MASK;
248 }
249
250 static inline bool irqd_is_level_type(struct irq_data *d)
251 {
252 return __irqd_to_state(d) & IRQD_LEVEL;
253 }
254
255 static inline bool irqd_is_wakeup_set(struct irq_data *d)
256 {
257 return __irqd_to_state(d) & IRQD_WAKEUP_STATE;
258 }
259
260 static inline bool irqd_can_move_in_process_context(struct irq_data *d)
261 {
262 return __irqd_to_state(d) & IRQD_MOVE_PCNTXT;
263 }
264
265 static inline bool irqd_irq_disabled(struct irq_data *d)
266 {
267 return __irqd_to_state(d) & IRQD_IRQ_DISABLED;
268 }
269
270 static inline bool irqd_irq_masked(struct irq_data *d)
271 {
272 return __irqd_to_state(d) & IRQD_IRQ_MASKED;
273 }
274
275 static inline bool irqd_irq_inprogress(struct irq_data *d)
276 {
277 return __irqd_to_state(d) & IRQD_IRQ_INPROGRESS;
278 }
279
280 static inline bool irqd_is_wakeup_armed(struct irq_data *d)
281 {
282 return __irqd_to_state(d) & IRQD_WAKEUP_ARMED;
283 }
284
285
286 /*
287 * Functions for chained handlers which can be enabled/disabled by the
288 * standard disable_irq/enable_irq calls. Must be called with
289 * irq_desc->lock held.
290 */
291 static inline void irqd_set_chained_irq_inprogress(struct irq_data *d)
292 {
293 __irqd_to_state(d) |= IRQD_IRQ_INPROGRESS;
294 }
295
296 static inline void irqd_clr_chained_irq_inprogress(struct irq_data *d)
297 {
298 __irqd_to_state(d) &= ~IRQD_IRQ_INPROGRESS;
299 }
300
301 static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
302 {
303 return d->hwirq;
304 }
305
306 /**
307 * struct irq_chip - hardware interrupt chip descriptor
308 *
309 * @name: name for /proc/interrupts
310 * @irq_startup: start up the interrupt (defaults to ->enable if NULL)
311 * @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL)
312 * @irq_enable: enable the interrupt (defaults to chip->unmask if NULL)
313 * @irq_disable: disable the interrupt
314 * @irq_ack: start of a new interrupt
315 * @irq_mask: mask an interrupt source
316 * @irq_mask_ack: ack and mask an interrupt source
317 * @irq_unmask: unmask an interrupt source
318 * @irq_eoi: end of interrupt
319 * @irq_set_affinity: set the CPU affinity on SMP machines
320 * @irq_retrigger: resend an IRQ to the CPU
321 * @irq_set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ
322 * @irq_set_wake: enable/disable power-management wake-on of an IRQ
323 * @irq_bus_lock: function to lock access to slow bus (i2c) chips
324 * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips
325 * @irq_cpu_online: configure an interrupt source for a secondary CPU
326 * @irq_cpu_offline: un-configure an interrupt source for a secondary CPU
327 * @irq_suspend: function called from core code on suspend once per chip
328 * @irq_resume: function called from core code on resume once per chip
329 * @irq_pm_shutdown: function called from core code on shutdown once per chip
330 * @irq_calc_mask: Optional function to set irq_data.mask for special cases
331 * @irq_print_chip: optional to print special chip info in show_interrupts
332 * @irq_request_resources: optional to request resources before calling
333 * any other callback related to this irq
334 * @irq_release_resources: optional to release resources acquired with
335 * irq_request_resources
336 * @irq_compose_msi_msg: optional to compose message content for MSI
337 * @irq_write_msi_msg: optional to write message content for MSI
338 * @irq_get_irqchip_state: return the internal state of an interrupt
339 * @irq_set_irqchip_state: set the internal state of a interrupt
340 * @irq_set_vcpu_affinity: optional to target a vCPU in a virtual machine
341 * @flags: chip specific flags
342 */
343 struct irq_chip {
344 const char *name;
345 unsigned int (*irq_startup)(struct irq_data *data);
346 void (*irq_shutdown)(struct irq_data *data);
347 void (*irq_enable)(struct irq_data *data);
348 void (*irq_disable)(struct irq_data *data);
349
350 void (*irq_ack)(struct irq_data *data);
351 void (*irq_mask)(struct irq_data *data);
352 void (*irq_mask_ack)(struct irq_data *data);
353 void (*irq_unmask)(struct irq_data *data);
354 void (*irq_eoi)(struct irq_data *data);
355
356 int (*irq_set_affinity)(struct irq_data *data, const struct cpumask *dest, bool force);
357 int (*irq_retrigger)(struct irq_data *data);
358 int (*irq_set_type)(struct irq_data *data, unsigned int flow_type);
359 int (*irq_set_wake)(struct irq_data *data, unsigned int on);
360
361 void (*irq_bus_lock)(struct irq_data *data);
362 void (*irq_bus_sync_unlock)(struct irq_data *data);
363
364 void (*irq_cpu_online)(struct irq_data *data);
365 void (*irq_cpu_offline)(struct irq_data *data);
366
367 void (*irq_suspend)(struct irq_data *data);
368 void (*irq_resume)(struct irq_data *data);
369 void (*irq_pm_shutdown)(struct irq_data *data);
370
371 void (*irq_calc_mask)(struct irq_data *data);
372
373 void (*irq_print_chip)(struct irq_data *data, struct seq_file *p);
374 int (*irq_request_resources)(struct irq_data *data);
375 void (*irq_release_resources)(struct irq_data *data);
376
377 void (*irq_compose_msi_msg)(struct irq_data *data, struct msi_msg *msg);
378 void (*irq_write_msi_msg)(struct irq_data *data, struct msi_msg *msg);
379
380 int (*irq_get_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool *state);
381 int (*irq_set_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool state);
382
383 int (*irq_set_vcpu_affinity)(struct irq_data *data, void *vcpu_info);
384
385 unsigned long flags;
386 };
387
388 /*
389 * irq_chip specific flags
390 *
391 * IRQCHIP_SET_TYPE_MASKED: Mask before calling chip.irq_set_type()
392 * IRQCHIP_EOI_IF_HANDLED: Only issue irq_eoi() when irq was handled
393 * IRQCHIP_MASK_ON_SUSPEND: Mask non wake irqs in the suspend path
394 * IRQCHIP_ONOFFLINE_ENABLED: Only call irq_on/off_line callbacks
395 * when irq enabled
396 * IRQCHIP_SKIP_SET_WAKE: Skip chip.irq_set_wake(), for this irq chip
397 * IRQCHIP_ONESHOT_SAFE: One shot does not require mask/unmask
398 * IRQCHIP_EOI_THREADED: Chip requires eoi() on unmask in threaded mode
399 */
400 enum {
401 IRQCHIP_SET_TYPE_MASKED = (1 << 0),
402 IRQCHIP_EOI_IF_HANDLED = (1 << 1),
403 IRQCHIP_MASK_ON_SUSPEND = (1 << 2),
404 IRQCHIP_ONOFFLINE_ENABLED = (1 << 3),
405 IRQCHIP_SKIP_SET_WAKE = (1 << 4),
406 IRQCHIP_ONESHOT_SAFE = (1 << 5),
407 IRQCHIP_EOI_THREADED = (1 << 6),
408 };
409
410 #include <linux/irqdesc.h>
411
412 /*
413 * Pick up the arch-dependent methods:
414 */
415 #include <asm/hw_irq.h>
416
417 #ifndef NR_IRQS_LEGACY
418 # define NR_IRQS_LEGACY 0
419 #endif
420
421 #ifndef ARCH_IRQ_INIT_FLAGS
422 # define ARCH_IRQ_INIT_FLAGS 0
423 #endif
424
425 #define IRQ_DEFAULT_INIT_FLAGS ARCH_IRQ_INIT_FLAGS
426
427 struct irqaction;
428 extern int setup_irq(unsigned int irq, struct irqaction *new);
429 extern void remove_irq(unsigned int irq, struct irqaction *act);
430 extern int setup_percpu_irq(unsigned int irq, struct irqaction *new);
431 extern void remove_percpu_irq(unsigned int irq, struct irqaction *act);
432
433 extern void irq_cpu_online(void);
434 extern void irq_cpu_offline(void);
435 extern int irq_set_affinity_locked(struct irq_data *data,
436 const struct cpumask *cpumask, bool force);
437 extern int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info);
438
439 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
440 void irq_move_irq(struct irq_data *data);
441 void irq_move_masked_irq(struct irq_data *data);
442 #else
443 static inline void irq_move_irq(struct irq_data *data) { }
444 static inline void irq_move_masked_irq(struct irq_data *data) { }
445 #endif
446
447 extern int no_irq_affinity;
448
449 #ifdef CONFIG_HARDIRQS_SW_RESEND
450 int irq_set_parent(int irq, int parent_irq);
451 #else
452 static inline int irq_set_parent(int irq, int parent_irq)
453 {
454 return 0;
455 }
456 #endif
457
458 /*
459 * Built-in IRQ handlers for various IRQ types,
460 * callable via desc->handle_irq()
461 */
462 extern void handle_level_irq(unsigned int irq, struct irq_desc *desc);
463 extern void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc);
464 extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc);
465 extern void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc);
466 extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc);
467 extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc);
468 extern void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc);
469 extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc);
470 extern void handle_nested_irq(unsigned int irq);
471
472 extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
473 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
474 extern void irq_chip_enable_parent(struct irq_data *data);
475 extern void irq_chip_disable_parent(struct irq_data *data);
476 extern void irq_chip_ack_parent(struct irq_data *data);
477 extern int irq_chip_retrigger_hierarchy(struct irq_data *data);
478 extern void irq_chip_mask_parent(struct irq_data *data);
479 extern void irq_chip_unmask_parent(struct irq_data *data);
480 extern void irq_chip_eoi_parent(struct irq_data *data);
481 extern int irq_chip_set_affinity_parent(struct irq_data *data,
482 const struct cpumask *dest,
483 bool force);
484 extern int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on);
485 extern int irq_chip_set_vcpu_affinity_parent(struct irq_data *data,
486 void *vcpu_info);
487 #endif
488
489 /* Handling of unhandled and spurious interrupts: */
490 extern void note_interrupt(unsigned int irq, struct irq_desc *desc,
491 irqreturn_t action_ret);
492
493
494 /* Enable/disable irq debugging output: */
495 extern int noirqdebug_setup(char *str);
496
497 /* Checks whether the interrupt can be requested by request_irq(): */
498 extern int can_request_irq(unsigned int irq, unsigned long irqflags);
499
500 /* Dummy irq-chip implementations: */
501 extern struct irq_chip no_irq_chip;
502 extern struct irq_chip dummy_irq_chip;
503
504 extern void
505 irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
506 irq_flow_handler_t handle, const char *name);
507
508 static inline void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *chip,
509 irq_flow_handler_t handle)
510 {
511 irq_set_chip_and_handler_name(irq, chip, handle, NULL);
512 }
513
514 extern int irq_set_percpu_devid(unsigned int irq);
515
516 extern void
517 __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
518 const char *name);
519
520 static inline void
521 irq_set_handler(unsigned int irq, irq_flow_handler_t handle)
522 {
523 __irq_set_handler(irq, handle, 0, NULL);
524 }
525
526 /*
527 * Set a highlevel chained flow handler for a given IRQ.
528 * (a chained handler is automatically enabled and set to
529 * IRQ_NOREQUEST, IRQ_NOPROBE, and IRQ_NOTHREAD)
530 */
531 static inline void
532 irq_set_chained_handler(unsigned int irq, irq_flow_handler_t handle)
533 {
534 __irq_set_handler(irq, handle, 1, NULL);
535 }
536
537 /*
538 * Set a highlevel chained flow handler and its data for a given IRQ.
539 * (a chained handler is automatically enabled and set to
540 * IRQ_NOREQUEST, IRQ_NOPROBE, and IRQ_NOTHREAD)
541 */
542 void
543 irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle,
544 void *data);
545
546 void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set);
547
548 static inline void irq_set_status_flags(unsigned int irq, unsigned long set)
549 {
550 irq_modify_status(irq, 0, set);
551 }
552
553 static inline void irq_clear_status_flags(unsigned int irq, unsigned long clr)
554 {
555 irq_modify_status(irq, clr, 0);
556 }
557
558 static inline void irq_set_noprobe(unsigned int irq)
559 {
560 irq_modify_status(irq, 0, IRQ_NOPROBE);
561 }
562
563 static inline void irq_set_probe(unsigned int irq)
564 {
565 irq_modify_status(irq, IRQ_NOPROBE, 0);
566 }
567
568 static inline void irq_set_nothread(unsigned int irq)
569 {
570 irq_modify_status(irq, 0, IRQ_NOTHREAD);
571 }
572
573 static inline void irq_set_thread(unsigned int irq)
574 {
575 irq_modify_status(irq, IRQ_NOTHREAD, 0);
576 }
577
578 static inline void irq_set_nested_thread(unsigned int irq, bool nest)
579 {
580 if (nest)
581 irq_set_status_flags(irq, IRQ_NESTED_THREAD);
582 else
583 irq_clear_status_flags(irq, IRQ_NESTED_THREAD);
584 }
585
586 static inline void irq_set_percpu_devid_flags(unsigned int irq)
587 {
588 irq_set_status_flags(irq,
589 IRQ_NOAUTOEN | IRQ_PER_CPU | IRQ_NOTHREAD |
590 IRQ_NOPROBE | IRQ_PER_CPU_DEVID);
591 }
592
593 /* Set/get chip/data for an IRQ: */
594 extern int irq_set_chip(unsigned int irq, struct irq_chip *chip);
595 extern int irq_set_handler_data(unsigned int irq, void *data);
596 extern int irq_set_chip_data(unsigned int irq, void *data);
597 extern int irq_set_irq_type(unsigned int irq, unsigned int type);
598 extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry);
599 extern int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
600 struct msi_desc *entry);
601 extern struct irq_data *irq_get_irq_data(unsigned int irq);
602
603 static inline struct irq_chip *irq_get_chip(unsigned int irq)
604 {
605 struct irq_data *d = irq_get_irq_data(irq);
606 return d ? d->chip : NULL;
607 }
608
609 static inline struct irq_chip *irq_data_get_irq_chip(struct irq_data *d)
610 {
611 return d->chip;
612 }
613
614 static inline void *irq_get_chip_data(unsigned int irq)
615 {
616 struct irq_data *d = irq_get_irq_data(irq);
617 return d ? d->chip_data : NULL;
618 }
619
620 static inline void *irq_data_get_irq_chip_data(struct irq_data *d)
621 {
622 return d->chip_data;
623 }
624
625 static inline void *irq_get_handler_data(unsigned int irq)
626 {
627 struct irq_data *d = irq_get_irq_data(irq);
628 return d ? d->handler_data : NULL;
629 }
630
631 static inline void *irq_data_get_irq_handler_data(struct irq_data *d)
632 {
633 return d->handler_data;
634 }
635
636 static inline struct msi_desc *irq_get_msi_desc(unsigned int irq)
637 {
638 struct irq_data *d = irq_get_irq_data(irq);
639 return d ? d->msi_desc : NULL;
640 }
641
642 static inline struct msi_desc *irq_data_get_msi(struct irq_data *d)
643 {
644 return d->msi_desc;
645 }
646
647 static inline u32 irq_get_trigger_type(unsigned int irq)
648 {
649 struct irq_data *d = irq_get_irq_data(irq);
650 return d ? irqd_get_trigger_type(d) : 0;
651 }
652
653 static inline int irq_data_get_node(struct irq_data *d)
654 {
655 return d->node;
656 }
657
658 static inline struct cpumask *irq_get_affinity_mask(int irq)
659 {
660 struct irq_data *d = irq_get_irq_data(irq);
661
662 return d ? d->affinity : NULL;
663 }
664
665 static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
666 {
667 return d->affinity;
668 }
669
670 unsigned int arch_dynirq_lower_bound(unsigned int from);
671
672 int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
673 struct module *owner);
674
675 /* use macros to avoid needing export.h for THIS_MODULE */
676 #define irq_alloc_descs(irq, from, cnt, node) \
677 __irq_alloc_descs(irq, from, cnt, node, THIS_MODULE)
678
679 #define irq_alloc_desc(node) \
680 irq_alloc_descs(-1, 0, 1, node)
681
682 #define irq_alloc_desc_at(at, node) \
683 irq_alloc_descs(at, at, 1, node)
684
685 #define irq_alloc_desc_from(from, node) \
686 irq_alloc_descs(-1, from, 1, node)
687
688 #define irq_alloc_descs_from(from, cnt, node) \
689 irq_alloc_descs(-1, from, cnt, node)
690
691 void irq_free_descs(unsigned int irq, unsigned int cnt);
692 static inline void irq_free_desc(unsigned int irq)
693 {
694 irq_free_descs(irq, 1);
695 }
696
697 #ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
698 unsigned int irq_alloc_hwirqs(int cnt, int node);
699 static inline unsigned int irq_alloc_hwirq(int node)
700 {
701 return irq_alloc_hwirqs(1, node);
702 }
703 void irq_free_hwirqs(unsigned int from, int cnt);
704 static inline void irq_free_hwirq(unsigned int irq)
705 {
706 return irq_free_hwirqs(irq, 1);
707 }
708 int arch_setup_hwirq(unsigned int irq, int node);
709 void arch_teardown_hwirq(unsigned int irq);
710 #endif
711
712 #ifdef CONFIG_GENERIC_IRQ_LEGACY
713 void irq_init_desc(unsigned int irq);
714 #endif
715
716 /**
717 * struct irq_chip_regs - register offsets for struct irq_gci
718 * @enable: Enable register offset to reg_base
719 * @disable: Disable register offset to reg_base
720 * @mask: Mask register offset to reg_base
721 * @ack: Ack register offset to reg_base
722 * @eoi: Eoi register offset to reg_base
723 * @type: Type configuration register offset to reg_base
724 * @polarity: Polarity configuration register offset to reg_base
725 */
726 struct irq_chip_regs {
727 unsigned long enable;
728 unsigned long disable;
729 unsigned long mask;
730 unsigned long ack;
731 unsigned long eoi;
732 unsigned long type;
733 unsigned long polarity;
734 };
735
736 /**
737 * struct irq_chip_type - Generic interrupt chip instance for a flow type
738 * @chip: The real interrupt chip which provides the callbacks
739 * @regs: Register offsets for this chip
740 * @handler: Flow handler associated with this chip
741 * @type: Chip can handle these flow types
742 * @mask_cache_priv: Cached mask register private to the chip type
743 * @mask_cache: Pointer to cached mask register
744 *
745 * A irq_generic_chip can have several instances of irq_chip_type when
746 * it requires different functions and register offsets for different
747 * flow types.
748 */
749 struct irq_chip_type {
750 struct irq_chip chip;
751 struct irq_chip_regs regs;
752 irq_flow_handler_t handler;
753 u32 type;
754 u32 mask_cache_priv;
755 u32 *mask_cache;
756 };
757
758 /**
759 * struct irq_chip_generic - Generic irq chip data structure
760 * @lock: Lock to protect register and cache data access
761 * @reg_base: Register base address (virtual)
762 * @reg_readl: Alternate I/O accessor (defaults to readl if NULL)
763 * @reg_writel: Alternate I/O accessor (defaults to writel if NULL)
764 * @irq_base: Interrupt base nr for this chip
765 * @irq_cnt: Number of interrupts handled by this chip
766 * @mask_cache: Cached mask register shared between all chip types
767 * @type_cache: Cached type register
768 * @polarity_cache: Cached polarity register
769 * @wake_enabled: Interrupt can wakeup from suspend
770 * @wake_active: Interrupt is marked as an wakeup from suspend source
771 * @num_ct: Number of available irq_chip_type instances (usually 1)
772 * @private: Private data for non generic chip callbacks
773 * @installed: bitfield to denote installed interrupts
774 * @unused: bitfield to denote unused interrupts
775 * @domain: irq domain pointer
776 * @list: List head for keeping track of instances
777 * @chip_types: Array of interrupt irq_chip_types
778 *
779 * Note, that irq_chip_generic can have multiple irq_chip_type
780 * implementations which can be associated to a particular irq line of
781 * an irq_chip_generic instance. That allows to share and protect
782 * state in an irq_chip_generic instance when we need to implement
783 * different flow mechanisms (level/edge) for it.
784 */
785 struct irq_chip_generic {
786 raw_spinlock_t lock;
787 void __iomem *reg_base;
788 u32 (*reg_readl)(void __iomem *addr);
789 void (*reg_writel)(u32 val, void __iomem *addr);
790 unsigned int irq_base;
791 unsigned int irq_cnt;
792 u32 mask_cache;
793 u32 type_cache;
794 u32 polarity_cache;
795 u32 wake_enabled;
796 u32 wake_active;
797 unsigned int num_ct;
798 void *private;
799 unsigned long installed;
800 unsigned long unused;
801 struct irq_domain *domain;
802 struct list_head list;
803 struct irq_chip_type chip_types[0];
804 };
805
806 /**
807 * enum irq_gc_flags - Initialization flags for generic irq chips
808 * @IRQ_GC_INIT_MASK_CACHE: Initialize the mask_cache by reading mask reg
809 * @IRQ_GC_INIT_NESTED_LOCK: Set the lock class of the irqs to nested for
810 * irq chips which need to call irq_set_wake() on
811 * the parent irq. Usually GPIO implementations
812 * @IRQ_GC_MASK_CACHE_PER_TYPE: Mask cache is chip type private
813 * @IRQ_GC_NO_MASK: Do not calculate irq_data->mask
814 * @IRQ_GC_BE_IO: Use big-endian register accesses (default: LE)
815 */
816 enum irq_gc_flags {
817 IRQ_GC_INIT_MASK_CACHE = 1 << 0,
818 IRQ_GC_INIT_NESTED_LOCK = 1 << 1,
819 IRQ_GC_MASK_CACHE_PER_TYPE = 1 << 2,
820 IRQ_GC_NO_MASK = 1 << 3,
821 IRQ_GC_BE_IO = 1 << 4,
822 };
823
824 /*
825 * struct irq_domain_chip_generic - Generic irq chip data structure for irq domains
826 * @irqs_per_chip: Number of interrupts per chip
827 * @num_chips: Number of chips
828 * @irq_flags_to_set: IRQ* flags to set on irq setup
829 * @irq_flags_to_clear: IRQ* flags to clear on irq setup
830 * @gc_flags: Generic chip specific setup flags
831 * @gc: Array of pointers to generic interrupt chips
832 */
833 struct irq_domain_chip_generic {
834 unsigned int irqs_per_chip;
835 unsigned int num_chips;
836 unsigned int irq_flags_to_clear;
837 unsigned int irq_flags_to_set;
838 enum irq_gc_flags gc_flags;
839 struct irq_chip_generic *gc[0];
840 };
841
842 /* Generic chip callback functions */
843 void irq_gc_noop(struct irq_data *d);
844 void irq_gc_mask_disable_reg(struct irq_data *d);
845 void irq_gc_mask_set_bit(struct irq_data *d);
846 void irq_gc_mask_clr_bit(struct irq_data *d);
847 void irq_gc_unmask_enable_reg(struct irq_data *d);
848 void irq_gc_ack_set_bit(struct irq_data *d);
849 void irq_gc_ack_clr_bit(struct irq_data *d);
850 void irq_gc_mask_disable_reg_and_ack(struct irq_data *d);
851 void irq_gc_eoi(struct irq_data *d);
852 int irq_gc_set_wake(struct irq_data *d, unsigned int on);
853
854 /* Setup functions for irq_chip_generic */
855 int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
856 irq_hw_number_t hw_irq);
857 struct irq_chip_generic *
858 irq_alloc_generic_chip(const char *name, int nr_ct, unsigned int irq_base,
859 void __iomem *reg_base, irq_flow_handler_t handler);
860 void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk,
861 enum irq_gc_flags flags, unsigned int clr,
862 unsigned int set);
863 int irq_setup_alt_chip(struct irq_data *d, unsigned int type);
864 void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk,
865 unsigned int clr, unsigned int set);
866
867 struct irq_chip_generic *irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq);
868 int irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
869 int num_ct, const char *name,
870 irq_flow_handler_t handler,
871 unsigned int clr, unsigned int set,
872 enum irq_gc_flags flags);
873
874
875 static inline struct irq_chip_type *irq_data_get_chip_type(struct irq_data *d)
876 {
877 return container_of(d->chip, struct irq_chip_type, chip);
878 }
879
880 #define IRQ_MSK(n) (u32)((n) < 32 ? ((1 << (n)) - 1) : UINT_MAX)
881
882 #ifdef CONFIG_SMP
883 static inline void irq_gc_lock(struct irq_chip_generic *gc)
884 {
885 raw_spin_lock(&gc->lock);
886 }
887
888 static inline void irq_gc_unlock(struct irq_chip_generic *gc)
889 {
890 raw_spin_unlock(&gc->lock);
891 }
892 #else
893 static inline void irq_gc_lock(struct irq_chip_generic *gc) { }
894 static inline void irq_gc_unlock(struct irq_chip_generic *gc) { }
895 #endif
896
897 static inline void irq_reg_writel(struct irq_chip_generic *gc,
898 u32 val, int reg_offset)
899 {
900 if (gc->reg_writel)
901 gc->reg_writel(val, gc->reg_base + reg_offset);
902 else
903 writel(val, gc->reg_base + reg_offset);
904 }
905
906 static inline u32 irq_reg_readl(struct irq_chip_generic *gc,
907 int reg_offset)
908 {
909 if (gc->reg_readl)
910 return gc->reg_readl(gc->reg_base + reg_offset);
911 else
912 return readl(gc->reg_base + reg_offset);
913 }
914
915 #endif /* _LINUX_IRQ_H */ |
Here is an explanation of a rule violation arisen while checking your driver against a corresponding kernel.
Note that it may be false positive, i.e. there isn't a real error indeed. Please analyze a given error trace and related source code to understand whether there is an error in your driver.
Error trace column contains a path on which the given rule is violated. You can expand/collapse some entity classes by clicking on corresponding checkboxes in a main menu or in an advanced Others menu. Also you can expand/collapse each particular entity by clicking on +/-. In hovering on some entities you can see some tips. Also the error trace is bound with related source code. Line numbers may be shown as links on the left. You can click on them to open corresponding lines in source code.
Source code column contains a content of files related with the error trace. There is source code of your driver (note that there are some LDV modifications at the end), kernel headers and rule model. Tabs show a currently opened file and other available files. In hovering on them you can see full file names. On clicking a corresponding file content will be shown.
Ядро | Модуль | Правило | Верификатор | Вердикт | Статус | Время создания | Описание проблемы |
linux-4.2-rc1.tar.xz | drivers/gpio/gpio-grgpio.ko | 39_7a | CPAchecker | Bug | Fixed | 2015-08-02 19:16:03 | L0202 |
Комментарий
reported: 2 Aug 2015
[В начало]