Bug

[В начало]

Ошибка # 180

Показать/спрятать трассу ошибок
Error trace
Function bodies
Blocks
  • Others...
    Function bodies without model function calls
    Initialization function calls
    Initialization function bodies
    Entry point
    Entry point body
    Function calls
    Skipped function calls
    Formal parameter names
    Declarations
    Assumes
    Assume conditions
    Returns
    Return values
    DEG initialization
    DEG function calls
    Model function calls
    Model function bodies
    Model asserts
    Model state changes
    Model function function calls
    Model function function bodies
    Model returns
    Model others
    Identation
    Line numbers
    Expand signs
-__CPAchecker_initialize()
{
20 typedef unsigned char __u8;
23 typedef unsigned short __u16;
25 typedef int __s32;
26 typedef unsigned int __u32;
29 typedef long long __s64;
30 typedef unsigned long long __u64;
15 typedef signed char s8;
16 typedef unsigned char u8;
19 typedef unsigned short u16;
21 typedef int s32;
22 typedef unsigned int u32;
24 typedef long long s64;
25 typedef unsigned long long u64;
14 typedef long __kernel_long_t;
15 typedef unsigned long __kernel_ulong_t;
27 typedef int __kernel_pid_t;
48 typedef unsigned int __kernel_uid32_t;
49 typedef unsigned int __kernel_gid32_t;
71 typedef __kernel_ulong_t __kernel_size_t;
72 typedef __kernel_long_t __kernel_ssize_t;
87 typedef long long __kernel_loff_t;
88 typedef __kernel_long_t __kernel_time_t;
89 typedef __kernel_long_t __kernel_clock_t;
90 typedef int __kernel_timer_t;
91 typedef int __kernel_clockid_t;
257 struct kernel_symbol { unsigned long value; const char *name; } ;
33 struct module ;
12 typedef __u32 __kernel_dev_t;
15 typedef __kernel_dev_t dev_t;
18 typedef unsigned short umode_t;
21 typedef __kernel_pid_t pid_t;
26 typedef __kernel_clockid_t clockid_t;
29 typedef _Bool bool;
31 typedef __kernel_uid32_t uid_t;
32 typedef __kernel_gid32_t gid_t;
45 typedef __kernel_loff_t loff_t;
54 typedef __kernel_size_t size_t;
59 typedef __kernel_ssize_t ssize_t;
69 typedef __kernel_time_t time_t;
102 typedef __s32 int32_t;
108 typedef __u32 uint32_t;
133 typedef unsigned long sector_t;
134 typedef unsigned long blkcnt_t;
157 typedef unsigned int gfp_t;
158 typedef unsigned int fmode_t;
161 typedef u64 phys_addr_t;
166 typedef phys_addr_t resource_size_t;
176 struct __anonstruct_atomic_t_6 { int counter; } ;
176 typedef struct __anonstruct_atomic_t_6 atomic_t;
181 struct __anonstruct_atomic64_t_7 { long counter; } ;
181 typedef struct __anonstruct_atomic64_t_7 atomic64_t;
182 struct list_head { struct list_head *next; struct list_head *prev; } ;
187 struct hlist_node ;
187 struct hlist_head { struct hlist_node *first; } ;
191 struct hlist_node { struct hlist_node *next; struct hlist_node **pprev; } ;
202 struct callback_head { struct callback_head *next; void (*func)(struct callback_head *); } ;
125 typedef void (*ctor_fn_t)();
279 struct _ddebug { const char *modname; const char *function; const char *filename; const char *format; unsigned int lineno; unsigned char flags; } ;
58 struct device ;
467 struct file_operations ;
479 struct completion ;
480 struct pt_regs ;
556 struct bug_entry { int bug_addr_disp; int file_disp; unsigned short line; unsigned short flags; } ;
111 struct timespec ;
112 struct compat_timespec ;
113 struct __anonstruct_futex_25 { u32 *uaddr; u32 val; u32 flags; u32 bitset; u64 time; u32 *uaddr2; } ;
113 struct __anonstruct_nanosleep_26 { clockid_t clockid; struct timespec *rmtp; struct compat_timespec *compat_rmtp; u64 expires; } ;
113 struct pollfd ;
113 struct __anonstruct_poll_27 { struct pollfd *ufds; int nfds; int has_timeout; unsigned long tv_sec; unsigned long tv_nsec; } ;
113 union __anonunion____missing_field_name_24 { struct __anonstruct_futex_25 futex; struct __anonstruct_nanosleep_26 nanosleep; struct __anonstruct_poll_27 poll; } ;
113 struct restart_block { long int (*fn)(struct restart_block *); union __anonunion____missing_field_name_24 __annonCompField4; } ;
39 struct page ;
26 struct task_struct ;
27 struct mm_struct ;
288 struct pt_regs { unsigned long r15; unsigned long r14; unsigned long r13; unsigned long r12; unsigned long bp; unsigned long bx; unsigned long r11; unsigned long r10; unsigned long r9; unsigned long r8; unsigned long ax; unsigned long cx; unsigned long dx; unsigned long si; unsigned long di; unsigned long orig_ax; unsigned long ip; unsigned long cs; unsigned long flags; unsigned long sp; unsigned long ss; } ;
66 struct __anonstruct____missing_field_name_30 { unsigned int a; unsigned int b; } ;
66 struct __anonstruct____missing_field_name_31 { u16 limit0; u16 base0; unsigned char base1; unsigned char type; unsigned char s; unsigned char dpl; unsigned char p; unsigned char limit; unsigned char avl; unsigned char l; unsigned char d; unsigned char g; unsigned char base2; } ;
66 union __anonunion____missing_field_name_29 { struct __anonstruct____missing_field_name_30 __annonCompField5; struct __anonstruct____missing_field_name_31 __annonCompField6; } ;
66 struct desc_struct { union __anonunion____missing_field_name_29 __annonCompField7; } ;
16 typedef unsigned long pgdval_t;
17 typedef unsigned long pgprotval_t;
21 struct pgprot { pgprotval_t pgprot; } ;
256 typedef struct pgprot pgprot_t;
258 struct __anonstruct_pgd_t_33 { pgdval_t pgd; } ;
258 typedef struct __anonstruct_pgd_t_33 pgd_t;
423 typedef struct page *pgtable_t;
434 struct file ;
447 struct seq_file ;
483 struct thread_struct ;
485 struct cpumask ;
20 struct qspinlock { atomic_t val; } ;
33 typedef struct qspinlock arch_spinlock_t;
34 struct qrwlock { atomic_t cnts; arch_spinlock_t wait_lock; } ;
14 typedef struct qrwlock arch_rwlock_t;
247 struct math_emu_info { long ___orig_eip; struct pt_regs *regs; } ;
341 struct cpumask { unsigned long bits[128U]; } ;
15 typedef struct cpumask cpumask_t;
654 typedef struct cpumask *cpumask_var_t;
23 typedef atomic64_t atomic_long_t;
81 struct static_key { atomic_t enabled; } ;
22 struct tracepoint_func { void *func; void *data; int prio; } ;
28 struct tracepoint { const char *name; struct static_key key; void (*regfunc)(); void (*unregfunc)(); struct tracepoint_func *funcs; } ;
254 struct fregs_state { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u32 status; } ;
26 struct __anonstruct____missing_field_name_59 { u64 rip; u64 rdp; } ;
26 struct __anonstruct____missing_field_name_60 { u32 fip; u32 fcs; u32 foo; u32 fos; } ;
26 union __anonunion____missing_field_name_58 { struct __anonstruct____missing_field_name_59 __annonCompField13; struct __anonstruct____missing_field_name_60 __annonCompField14; } ;
26 union __anonunion____missing_field_name_61 { u32 padding1[12U]; u32 sw_reserved[12U]; } ;
26 struct fxregs_state { u16 cwd; u16 swd; u16 twd; u16 fop; union __anonunion____missing_field_name_58 __annonCompField15; u32 mxcsr; u32 mxcsr_mask; u32 st_space[32U]; u32 xmm_space[64U]; u32 padding[12U]; union __anonunion____missing_field_name_61 __annonCompField16; } ;
66 struct swregs_state { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u8 ftop; u8 changed; u8 lookahead; u8 no_update; u8 rm; u8 alimit; struct math_emu_info *info; u32 entry_eip; } ;
227 struct xstate_header { u64 xfeatures; u64 xcomp_bv; u64 reserved[6U]; } ;
233 struct xregs_state { struct fxregs_state i387; struct xstate_header header; u8 extended_state_area[0U]; } ;
254 union fpregs_state { struct fregs_state fsave; struct fxregs_state fxsave; struct swregs_state soft; struct xregs_state xsave; u8 __padding[4096U]; } ;
271 struct fpu { unsigned int last_cpu; unsigned char fpstate_active; unsigned char fpregs_active; unsigned char counter; union fpregs_state state; } ;
169 struct seq_operations ;
372 struct perf_event ;
377 struct __anonstruct_mm_segment_t_73 { unsigned long seg; } ;
377 typedef struct __anonstruct_mm_segment_t_73 mm_segment_t;
378 struct thread_struct { struct desc_struct tls_array[3U]; unsigned long sp0; unsigned long sp; unsigned short es; unsigned short ds; unsigned short fsindex; unsigned short gsindex; unsigned long fsbase; unsigned long gsbase; struct perf_event *ptrace_bps[4U]; unsigned long debugreg6; unsigned long ptrace_dr7; unsigned long cr2; unsigned long trap_nr; unsigned long error_code; unsigned long *io_bitmap_ptr; unsigned long iopl; unsigned int io_bitmap_max; mm_segment_t addr_limit; unsigned char sig_on_uaccess_err; unsigned char uaccess_err; struct fpu fpu; } ;
33 struct lockdep_map ;
55 struct stack_trace { unsigned int nr_entries; unsigned int max_entries; unsigned long *entries; int skip; } ;
28 struct lockdep_subclass_key { char __one_byte; } ;
53 struct lock_class_key { struct lockdep_subclass_key subkeys[8U]; } ;
59 struct lock_class { struct hlist_node hash_entry; struct list_head lock_entry; struct lockdep_subclass_key *key; unsigned int subclass; unsigned int dep_gen_id; unsigned long usage_mask; struct stack_trace usage_traces[13U]; struct list_head locks_after; struct list_head locks_before; unsigned int version; unsigned long ops; const char *name; int name_version; unsigned long contention_point[4U]; unsigned long contending_point[4U]; } ;
144 struct lockdep_map { struct lock_class_key *key; struct lock_class *class_cache[2U]; const char *name; int cpu; unsigned long ip; } ;
207 struct held_lock { u64 prev_chain_key; unsigned long acquire_ip; struct lockdep_map *instance; struct lockdep_map *nest_lock; u64 waittime_stamp; u64 holdtime_stamp; unsigned short class_idx; unsigned char irq_context; unsigned char trylock; unsigned char read; unsigned char check; unsigned char hardirqs_off; unsigned short references; unsigned int pin_count; } ;
572 struct raw_spinlock { arch_spinlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ;
32 typedef struct raw_spinlock raw_spinlock_t;
33 struct __anonstruct____missing_field_name_75 { u8 __padding[24U]; struct lockdep_map dep_map; } ;
33 union __anonunion____missing_field_name_74 { struct raw_spinlock rlock; struct __anonstruct____missing_field_name_75 __annonCompField19; } ;
33 struct spinlock { union __anonunion____missing_field_name_74 __annonCompField20; } ;
76 typedef struct spinlock spinlock_t;
23 struct __anonstruct_rwlock_t_76 { arch_rwlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ;
23 typedef struct __anonstruct_rwlock_t_76 rwlock_t;
416 struct seqcount { unsigned int sequence; struct lockdep_map dep_map; } ;
52 typedef struct seqcount seqcount_t;
407 struct __anonstruct_seqlock_t_91 { struct seqcount seqcount; spinlock_t lock; } ;
407 typedef struct __anonstruct_seqlock_t_91 seqlock_t;
601 struct timespec { __kernel_time_t tv_sec; long tv_nsec; } ;
7 typedef __s64 time64_t;
83 struct user_namespace ;
22 struct __anonstruct_kuid_t_92 { uid_t val; } ;
22 typedef struct __anonstruct_kuid_t_92 kuid_t;
27 struct __anonstruct_kgid_t_93 { gid_t val; } ;
27 typedef struct __anonstruct_kgid_t_93 kgid_t;
139 struct kstat { u64 ino; dev_t dev; umode_t mode; unsigned int nlink; kuid_t uid; kgid_t gid; dev_t rdev; loff_t size; struct timespec atime; struct timespec mtime; struct timespec ctime; unsigned long blksize; unsigned long long blocks; } ;
36 struct vm_area_struct ;
38 struct __wait_queue_head { spinlock_t lock; struct list_head task_list; } ;
43 typedef struct __wait_queue_head wait_queue_head_t;
97 struct __anonstruct_nodemask_t_94 { unsigned long bits[16U]; } ;
97 typedef struct __anonstruct_nodemask_t_94 nodemask_t;
247 typedef unsigned int isolate_mode_t;
13 struct optimistic_spin_queue { atomic_t tail; } ;
39 struct mutex { atomic_t count; spinlock_t wait_lock; struct list_head wait_list; struct task_struct *owner; void *magic; struct lockdep_map dep_map; } ;
67 struct mutex_waiter { struct list_head list; struct task_struct *task; void *magic; } ;
177 struct rw_semaphore ;
178 struct rw_semaphore { atomic_long_t count; struct list_head wait_list; raw_spinlock_t wait_lock; struct optimistic_spin_queue osq; struct task_struct *owner; struct lockdep_map dep_map; } ;
178 struct completion { unsigned int done; wait_queue_head_t wait; } ;
446 union ktime { s64 tv64; } ;
41 typedef union ktime ktime_t;
1144 struct timer_list { struct hlist_node entry; unsigned long expires; void (*function)(unsigned long); unsigned long data; u32 flags; int start_pid; void *start_site; char start_comm[16U]; struct lockdep_map lockdep_map; } ;
254 struct hrtimer ;
255 enum hrtimer_restart ;
256 struct rb_node { unsigned long __rb_parent_color; struct rb_node *rb_right; struct rb_node *rb_left; } ;
41 struct rb_root { struct rb_node *rb_node; } ;
835 struct nsproxy ;
278 struct workqueue_struct ;
279 struct work_struct ;
54 struct work_struct { atomic_long_t data; struct list_head entry; void (*func)(struct work_struct *); struct lockdep_map lockdep_map; } ;
107 struct delayed_work { struct work_struct work; struct timer_list timer; struct workqueue_struct *wq; int cpu; } ;
217 struct resource ;
64 struct resource { resource_size_t start; resource_size_t end; const char *name; unsigned long flags; unsigned long desc; struct resource *parent; struct resource *sibling; struct resource *child; } ;
58 struct pm_message { int event; } ;
64 typedef struct pm_message pm_message_t;
65 struct dev_pm_ops { int (*prepare)(struct device *); void (*complete)(struct device *); int (*suspend)(struct device *); int (*resume)(struct device *); int (*freeze)(struct device *); int (*thaw)(struct device *); int (*poweroff)(struct device *); int (*restore)(struct device *); int (*suspend_late)(struct device *); int (*resume_early)(struct device *); int (*freeze_late)(struct device *); int (*thaw_early)(struct device *); int (*poweroff_late)(struct device *); int (*restore_early)(struct device *); int (*suspend_noirq)(struct device *); int (*resume_noirq)(struct device *); int (*freeze_noirq)(struct device *); int (*thaw_noirq)(struct device *); int (*poweroff_noirq)(struct device *); int (*restore_noirq)(struct device *); int (*runtime_suspend)(struct device *); int (*runtime_resume)(struct device *); int (*runtime_idle)(struct device *); } ;
320 enum rpm_status { RPM_ACTIVE = 0, RPM_RESUMING = 1, RPM_SUSPENDED = 2, RPM_SUSPENDING = 3 } ;
327 enum rpm_request { RPM_REQ_NONE = 0, RPM_REQ_IDLE = 1, RPM_REQ_SUSPEND = 2, RPM_REQ_AUTOSUSPEND = 3, RPM_REQ_RESUME = 4 } ;
335 struct wakeup_source ;
336 struct wake_irq ;
337 struct pm_domain_data ;
338 struct pm_subsys_data { spinlock_t lock; unsigned int refcount; struct list_head clock_list; struct pm_domain_data *domain_data; } ;
556 struct dev_pm_qos ;
556 struct dev_pm_info { pm_message_t power_state; unsigned char can_wakeup; unsigned char async_suspend; bool is_prepared; bool is_suspended; bool is_noirq_suspended; bool is_late_suspended; bool early_init; bool direct_complete; spinlock_t lock; struct list_head entry; struct completion completion; struct wakeup_source *wakeup; bool wakeup_path; bool syscore; bool no_pm_callbacks; struct timer_list suspend_timer; unsigned long timer_expires; struct work_struct work; wait_queue_head_t wait_queue; struct wake_irq *wakeirq; atomic_t usage_count; atomic_t child_count; unsigned char disable_depth; unsigned char idle_notification; unsigned char request_pending; unsigned char deferred_resume; unsigned char run_wake; unsigned char runtime_auto; bool ignore_children; unsigned char no_callbacks; unsigned char irq_safe; unsigned char use_autosuspend; unsigned char timer_autosuspends; unsigned char memalloc_noio; enum rpm_request request; enum rpm_status runtime_status; int runtime_error; int autosuspend_delay; unsigned long last_busy; unsigned long active_jiffies; unsigned long suspended_jiffies; unsigned long accounting_timestamp; struct pm_subsys_data *subsys_data; void (*set_latency_tolerance)(struct device *, s32 ); struct dev_pm_qos *qos; } ;
616 struct dev_pm_domain { struct dev_pm_ops ops; void (*detach)(struct device *, bool ); int (*activate)(struct device *); void (*sync)(struct device *); void (*dismiss)(struct device *); } ;
26 struct ldt_struct ;
26 struct vdso_image ;
26 struct __anonstruct_mm_context_t_165 { struct ldt_struct *ldt; unsigned short ia32_compat; struct mutex lock; void *vdso; const struct vdso_image *vdso_image; atomic_t perf_rdpmc_allowed; } ;
26 typedef struct __anonstruct_mm_context_t_165 mm_context_t;
1276 struct llist_node ;
64 struct llist_node { struct llist_node *next; } ;
37 struct cred ;
19 struct inode ;
58 struct arch_uprobe_task { unsigned long saved_scratch_register; unsigned int saved_trap_nr; unsigned int saved_tf; } ;
66 enum uprobe_task_state { UTASK_RUNNING = 0, UTASK_SSTEP = 1, UTASK_SSTEP_ACK = 2, UTASK_SSTEP_TRAPPED = 3 } ;
73 struct __anonstruct____missing_field_name_211 { struct arch_uprobe_task autask; unsigned long vaddr; } ;
73 struct __anonstruct____missing_field_name_212 { struct callback_head dup_xol_work; unsigned long dup_xol_addr; } ;
73 union __anonunion____missing_field_name_210 { struct __anonstruct____missing_field_name_211 __annonCompField35; struct __anonstruct____missing_field_name_212 __annonCompField36; } ;
73 struct uprobe ;
73 struct return_instance ;
73 struct uprobe_task { enum uprobe_task_state state; union __anonunion____missing_field_name_210 __annonCompField37; struct uprobe *active_uprobe; unsigned long xol_vaddr; struct return_instance *return_instances; unsigned int depth; } ;
94 struct return_instance { struct uprobe *uprobe; unsigned long func; unsigned long stack; unsigned long orig_ret_vaddr; bool chained; struct return_instance *next; } ;
110 struct xol_area ;
111 struct uprobes_state { struct xol_area *xol_area; } ;
150 struct address_space ;
151 struct mem_cgroup ;
152 union __anonunion____missing_field_name_213 { struct address_space *mapping; void *s_mem; atomic_t compound_mapcount; } ;
152 union __anonunion____missing_field_name_214 { unsigned long index; void *freelist; } ;
152 struct __anonstruct____missing_field_name_218 { unsigned short inuse; unsigned short objects; unsigned char frozen; } ;
152 union __anonunion____missing_field_name_217 { atomic_t _mapcount; unsigned int active; struct __anonstruct____missing_field_name_218 __annonCompField40; int units; } ;
152 struct __anonstruct____missing_field_name_216 { union __anonunion____missing_field_name_217 __annonCompField41; atomic_t _refcount; } ;
152 union __anonunion____missing_field_name_215 { unsigned long counters; struct __anonstruct____missing_field_name_216 __annonCompField42; } ;
152 struct dev_pagemap ;
152 struct __anonstruct____missing_field_name_220 { struct page *next; int pages; int pobjects; } ;
152 struct __anonstruct____missing_field_name_221 { unsigned long compound_head; unsigned int compound_dtor; unsigned int compound_order; } ;
152 struct __anonstruct____missing_field_name_222 { unsigned long __pad; pgtable_t pmd_huge_pte; } ;
152 union __anonunion____missing_field_name_219 { struct list_head lru; struct dev_pagemap *pgmap; struct __anonstruct____missing_field_name_220 __annonCompField44; struct callback_head callback_head; struct __anonstruct____missing_field_name_221 __annonCompField45; struct __anonstruct____missing_field_name_222 __annonCompField46; } ;
152 struct kmem_cache ;
152 union __anonunion____missing_field_name_223 { unsigned long private; spinlock_t *ptl; struct kmem_cache *slab_cache; } ;
152 struct page { unsigned long flags; union __anonunion____missing_field_name_213 __annonCompField38; union __anonunion____missing_field_name_214 __annonCompField39; union __anonunion____missing_field_name_215 __annonCompField43; union __anonunion____missing_field_name_219 __annonCompField47; union __anonunion____missing_field_name_223 __annonCompField48; struct mem_cgroup *mem_cgroup; } ;
197 struct page_frag { struct page *page; __u32 offset; __u32 size; } ;
282 struct userfaultfd_ctx ;
282 struct vm_userfaultfd_ctx { struct userfaultfd_ctx *ctx; } ;
289 struct __anonstruct_shared_224 { struct rb_node rb; unsigned long rb_subtree_last; } ;
289 struct anon_vma ;
289 struct vm_operations_struct ;
289 struct mempolicy ;
289 struct vm_area_struct { unsigned long vm_start; unsigned long vm_end; struct vm_area_struct *vm_next; struct vm_area_struct *vm_prev; struct rb_node vm_rb; unsigned long rb_subtree_gap; struct mm_struct *vm_mm; pgprot_t vm_page_prot; unsigned long vm_flags; struct __anonstruct_shared_224 shared; struct list_head anon_vma_chain; struct anon_vma *anon_vma; const struct vm_operations_struct *vm_ops; unsigned long vm_pgoff; struct file *vm_file; void *vm_private_data; struct mempolicy *vm_policy; struct vm_userfaultfd_ctx vm_userfaultfd_ctx; } ;
362 struct core_thread { struct task_struct *task; struct core_thread *next; } ;
367 struct core_state { atomic_t nr_threads; struct core_thread dumper; struct completion startup; } ;
381 struct task_rss_stat { int events; int count[4U]; } ;
389 struct mm_rss_stat { atomic_long_t count[4U]; } ;
394 struct kioctx_table ;
395 struct linux_binfmt ;
395 struct mmu_notifier_mm ;
395 struct mm_struct { struct vm_area_struct *mmap; struct rb_root mm_rb; u32 vmacache_seqnum; unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); unsigned long mmap_base; unsigned long mmap_legacy_base; unsigned long task_size; unsigned long highest_vm_end; pgd_t *pgd; atomic_t mm_users; atomic_t mm_count; atomic_long_t nr_ptes; atomic_long_t nr_pmds; int map_count; spinlock_t page_table_lock; struct rw_semaphore mmap_sem; struct list_head mmlist; unsigned long hiwater_rss; unsigned long hiwater_vm; unsigned long total_vm; unsigned long locked_vm; unsigned long pinned_vm; unsigned long data_vm; unsigned long exec_vm; unsigned long stack_vm; unsigned long def_flags; unsigned long start_code; unsigned long end_code; unsigned long start_data; unsigned long end_data; unsigned long start_brk; unsigned long brk; unsigned long start_stack; unsigned long arg_start; unsigned long arg_end; unsigned long env_start; unsigned long env_end; unsigned long saved_auxv[46U]; struct mm_rss_stat rss_stat; struct linux_binfmt *binfmt; cpumask_var_t cpu_vm_mask_var; mm_context_t context; unsigned long flags; struct core_state *core_state; spinlock_t ioctx_lock; struct kioctx_table *ioctx_table; struct task_struct *owner; struct file *exe_file; struct mmu_notifier_mm *mmu_notifier_mm; struct cpumask cpumask_allocation; unsigned long numa_next_scan; unsigned long numa_scan_offset; int numa_scan_seq; bool tlb_flush_pending; struct uprobes_state uprobes_state; void *bd_addr; atomic_long_t hugetlb_usage; struct work_struct async_put_work; } ;
619 struct vdso_image { void *data; unsigned long size; unsigned long alt; unsigned long alt_len; long sym_vvar_start; long sym_vvar_page; long sym_hpet_page; long sym_pvclock_page; long sym_VDSO32_NOTE_MASK; long sym___kernel_sigreturn; long sym___kernel_rt_sigreturn; long sym___kernel_vsyscall; long sym_int80_landing_pad; } ;
15 typedef __u64 Elf64_Addr;
16 typedef __u16 Elf64_Half;
18 typedef __u64 Elf64_Off;
20 typedef __u32 Elf64_Word;
21 typedef __u64 Elf64_Xword;
190 struct elf64_sym { Elf64_Word st_name; unsigned char st_info; unsigned char st_other; Elf64_Half st_shndx; Elf64_Addr st_value; Elf64_Xword st_size; } ;
198 typedef struct elf64_sym Elf64_Sym;
219 struct elf64_hdr { unsigned char e_ident[16U]; Elf64_Half e_type; Elf64_Half e_machine; Elf64_Word e_version; Elf64_Addr e_entry; Elf64_Off e_phoff; Elf64_Off e_shoff; Elf64_Word e_flags; Elf64_Half e_ehsize; Elf64_Half e_phentsize; Elf64_Half e_phnum; Elf64_Half e_shentsize; Elf64_Half e_shnum; Elf64_Half e_shstrndx; } ;
235 typedef struct elf64_hdr Elf64_Ehdr;
314 struct elf64_shdr { Elf64_Word sh_name; Elf64_Word sh_type; Elf64_Xword sh_flags; Elf64_Addr sh_addr; Elf64_Off sh_offset; Elf64_Xword sh_size; Elf64_Word sh_link; Elf64_Word sh_info; Elf64_Xword sh_addralign; Elf64_Xword sh_entsize; } ;
326 typedef struct elf64_shdr Elf64_Shdr;
53 union __anonunion____missing_field_name_229 { unsigned long bitmap[4U]; struct callback_head callback_head; } ;
53 struct idr_layer { int prefix; int layer; struct idr_layer *ary[256U]; int count; union __anonunion____missing_field_name_229 __annonCompField49; } ;
41 struct idr { struct idr_layer *hint; struct idr_layer *top; int layers; int cur; spinlock_t lock; int id_free_cnt; struct idr_layer *id_free; } ;
124 struct ida_bitmap { long nr_busy; unsigned long bitmap[15U]; } ;
167 struct ida { struct idr idr; struct ida_bitmap *free_bitmap; } ;
199 struct dentry ;
200 struct iattr ;
201 struct super_block ;
202 struct file_system_type ;
203 struct kernfs_open_node ;
204 struct kernfs_iattrs ;
227 struct kernfs_root ;
227 struct kernfs_elem_dir { unsigned long subdirs; struct rb_root children; struct kernfs_root *root; } ;
85 struct kernfs_node ;
85 struct kernfs_elem_symlink { struct kernfs_node *target_kn; } ;
89 struct kernfs_ops ;
89 struct kernfs_elem_attr { const struct kernfs_ops *ops; struct kernfs_open_node *open; loff_t size; struct kernfs_node *notify_next; } ;
96 union __anonunion____missing_field_name_234 { struct kernfs_elem_dir dir; struct kernfs_elem_symlink symlink; struct kernfs_elem_attr attr; } ;
96 struct kernfs_node { atomic_t count; atomic_t active; struct lockdep_map dep_map; struct kernfs_node *parent; const char *name; struct rb_node rb; const void *ns; unsigned int hash; union __anonunion____missing_field_name_234 __annonCompField50; void *priv; unsigned short flags; umode_t mode; unsigned int ino; struct kernfs_iattrs *iattr; } ;
138 struct kernfs_syscall_ops { int (*remount_fs)(struct kernfs_root *, int *, char *); int (*show_options)(struct seq_file *, struct kernfs_root *); int (*mkdir)(struct kernfs_node *, const char *, umode_t ); int (*rmdir)(struct kernfs_node *); int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *); int (*show_path)(struct seq_file *, struct kernfs_node *, struct kernfs_root *); } ;
157 struct kernfs_root { struct kernfs_node *kn; unsigned int flags; struct ida ino_ida; struct kernfs_syscall_ops *syscall_ops; struct list_head supers; wait_queue_head_t deactivate_waitq; } ;
173 struct kernfs_open_file { struct kernfs_node *kn; struct file *file; void *priv; struct mutex mutex; struct mutex prealloc_mutex; int event; struct list_head list; char *prealloc_buf; size_t atomic_write_len; bool mmapped; const struct vm_operations_struct *vm_ops; } ;
191 struct kernfs_ops { int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); ssize_t (*read)(struct kernfs_open_file *, char *, size_t , loff_t ); size_t atomic_write_len; bool prealloc; ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *); struct lock_class_key lockdep_key; } ;
499 struct sock ;
500 struct kobject ;
501 enum kobj_ns_type { KOBJ_NS_TYPE_NONE = 0, KOBJ_NS_TYPE_NET = 1, KOBJ_NS_TYPES = 2 } ;
507 struct kobj_ns_type_operations { enum kobj_ns_type type; bool (*current_may_mount)(); void * (*grab_current_ns)(); const void * (*netlink_ns)(struct sock *); const void * (*initial_ns)(); void (*drop_ns)(void *); } ;
59 struct bin_attribute ;
60 struct attribute { const char *name; umode_t mode; bool ignore_lockdep; struct lock_class_key *key; struct lock_class_key skey; } ;
37 struct attribute_group { const char *name; umode_t (*is_visible)(struct kobject *, struct attribute *, int); umode_t (*is_bin_visible)(struct kobject *, struct bin_attribute *, int); struct attribute **attrs; struct bin_attribute **bin_attrs; } ;
92 struct bin_attribute { struct attribute attr; size_t size; void *private; ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); int (*mmap)(struct file *, struct kobject *, struct bin_attribute *, struct vm_area_struct *); } ;
165 struct sysfs_ops { ssize_t (*show)(struct kobject *, struct attribute *, char *); ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t ); } ;
530 struct kref { atomic_t refcount; } ;
52 struct kset ;
52 struct kobj_type ;
52 struct kobject { const char *name; struct list_head entry; struct kobject *parent; struct kset *kset; struct kobj_type *ktype; struct kernfs_node *sd; struct kref kref; struct delayed_work release; unsigned char state_initialized; unsigned char state_in_sysfs; unsigned char state_add_uevent_sent; unsigned char state_remove_uevent_sent; unsigned char uevent_suppress; } ;
115 struct kobj_type { void (*release)(struct kobject *); const struct sysfs_ops *sysfs_ops; struct attribute **default_attrs; const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject *); const void * (*namespace)(struct kobject *); } ;
123 struct kobj_uevent_env { char *argv[3U]; char *envp[32U]; int envp_idx; char buf[2048U]; int buflen; } ;
131 struct kset_uevent_ops { const int (*filter)(struct kset *, struct kobject *); const const char * (*name)(struct kset *, struct kobject *); const int (*uevent)(struct kset *, struct kobject *, struct kobj_uevent_env *); } ;
148 struct kset { struct list_head list; spinlock_t list_lock; struct kobject kobj; const struct kset_uevent_ops *uevent_ops; } ;
223 struct kernel_param ;
228 struct kernel_param_ops { unsigned int flags; int (*set)(const char *, const struct kernel_param *); int (*get)(char *, const struct kernel_param *); void (*free)(void *); } ;
62 struct kparam_string ;
62 struct kparam_array ;
62 union __anonunion____missing_field_name_237 { void *arg; const struct kparam_string *str; const struct kparam_array *arr; } ;
62 struct kernel_param { const char *name; struct module *mod; const struct kernel_param_ops *ops; const u16 perm; s8 level; u8 flags; union __anonunion____missing_field_name_237 __annonCompField51; } ;
83 struct kparam_string { unsigned int maxlen; char *string; } ;
89 struct kparam_array { unsigned int max; unsigned int elemsize; unsigned int *num; const struct kernel_param_ops *ops; void *elem; } ;
470 struct exception_table_entry ;
24 struct latch_tree_node { struct rb_node node[2U]; } ;
211 struct mod_arch_specific { } ;
39 struct module_param_attrs ;
39 struct module_kobject { struct kobject kobj; struct module *mod; struct kobject *drivers_dir; struct module_param_attrs *mp; struct completion *kobj_completion; } ;
50 struct module_attribute { struct attribute attr; ssize_t (*show)(struct module_attribute *, struct module_kobject *, char *); ssize_t (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t ); void (*setup)(struct module *, const char *); int (*test)(struct module *); void (*free)(struct module *); } ;
277 enum module_state { MODULE_STATE_LIVE = 0, MODULE_STATE_COMING = 1, MODULE_STATE_GOING = 2, MODULE_STATE_UNFORMED = 3 } ;
284 struct mod_tree_node { struct module *mod; struct latch_tree_node node; } ;
291 struct module_layout { void *base; unsigned int size; unsigned int text_size; unsigned int ro_size; unsigned int ro_after_init_size; struct mod_tree_node mtn; } ;
307 struct mod_kallsyms { Elf64_Sym *symtab; unsigned int num_symtab; char *strtab; } ;
321 struct klp_modinfo { Elf64_Ehdr hdr; Elf64_Shdr *sechdrs; char *secstrings; unsigned int symndx; } ;
329 struct module_sect_attrs ;
329 struct module_notes_attrs ;
329 struct trace_event_call ;
329 struct trace_enum_map ;
329 struct module { enum module_state state; struct list_head list; char name[56U]; struct module_kobject mkobj; struct module_attribute *modinfo_attrs; const char *version; const char *srcversion; struct kobject *holders_dir; const struct kernel_symbol *syms; const unsigned long *crcs; unsigned int num_syms; struct mutex param_lock; struct kernel_param *kp; unsigned int num_kp; unsigned int num_gpl_syms; const struct kernel_symbol *gpl_syms; const unsigned long *gpl_crcs; const struct kernel_symbol *unused_syms; const unsigned long *unused_crcs; unsigned int num_unused_syms; unsigned int num_unused_gpl_syms; const struct kernel_symbol *unused_gpl_syms; const unsigned long *unused_gpl_crcs; bool sig_ok; bool async_probe_requested; const struct kernel_symbol *gpl_future_syms; const unsigned long *gpl_future_crcs; unsigned int num_gpl_future_syms; unsigned int num_exentries; struct exception_table_entry *extable; int (*init)(); struct module_layout core_layout; struct module_layout init_layout; struct mod_arch_specific arch; unsigned int taints; unsigned int num_bugs; struct list_head bug_list; struct bug_entry *bug_table; struct mod_kallsyms *kallsyms; struct mod_kallsyms core_kallsyms; struct module_sect_attrs *sect_attrs; struct module_notes_attrs *notes_attrs; char *args; void *percpu; unsigned int percpu_size; unsigned int num_tracepoints; const struct tracepoint **tracepoints_ptrs; unsigned int num_trace_bprintk_fmt; const char **trace_bprintk_fmt_start; struct trace_event_call **trace_events; unsigned int num_trace_events; struct trace_enum_map **trace_enums; unsigned int num_trace_enums; bool klp; bool klp_alive; struct klp_modinfo *klp_info; struct list_head source_list; struct list_head target_list; void (*exit)(); atomic_t refcnt; ctor_fn_t (**ctors)(); unsigned int num_ctors; } ;
799 struct clk ;
512 struct device_node ;
22 struct kernel_cap_struct { __u32 cap[2U]; } ;
25 typedef struct kernel_cap_struct kernel_cap_t;
84 struct plist_node { int prio; struct list_head prio_list; struct list_head node_list; } ;
4 typedef unsigned long cputime_t;
25 struct sem_undo_list ;
25 struct sysv_sem { struct sem_undo_list *undo_list; } ;
78 struct user_struct ;
26 struct sysv_shm { struct list_head shm_clist; } ;
24 struct __anonstruct_sigset_t_245 { unsigned long sig[1U]; } ;
24 typedef struct __anonstruct_sigset_t_245 sigset_t;
25 struct siginfo ;
17 typedef void __signalfn_t(int);
18 typedef __signalfn_t *__sighandler_t;
20 typedef void __restorefn_t();
21 typedef __restorefn_t *__sigrestore_t;
34 union sigval { int sival_int; void *sival_ptr; } ;
10 typedef union sigval sigval_t;
11 struct __anonstruct__kill_247 { __kernel_pid_t _pid; __kernel_uid32_t _uid; } ;
11 struct __anonstruct__timer_248 { __kernel_timer_t _tid; int _overrun; char _pad[0U]; sigval_t _sigval; int _sys_private; } ;
11 struct __anonstruct__rt_249 { __kernel_pid_t _pid; __kernel_uid32_t _uid; sigval_t _sigval; } ;
11 struct __anonstruct__sigchld_250 { __kernel_pid_t _pid; __kernel_uid32_t _uid; int _status; __kernel_clock_t _utime; __kernel_clock_t _stime; } ;
11 struct __anonstruct__addr_bnd_253 { void *_lower; void *_upper; } ;
11 union __anonunion____missing_field_name_252 { struct __anonstruct__addr_bnd_253 _addr_bnd; __u32 _pkey; } ;
11 struct __anonstruct__sigfault_251 { void *_addr; short _addr_lsb; union __anonunion____missing_field_name_252 __annonCompField52; } ;
11 struct __anonstruct__sigpoll_254 { long _band; int _fd; } ;
11 struct __anonstruct__sigsys_255 { void *_call_addr; int _syscall; unsigned int _arch; } ;
11 union __anonunion__sifields_246 { int _pad[28U]; struct __anonstruct__kill_247 _kill; struct __anonstruct__timer_248 _timer; struct __anonstruct__rt_249 _rt; struct __anonstruct__sigchld_250 _sigchld; struct __anonstruct__sigfault_251 _sigfault; struct __anonstruct__sigpoll_254 _sigpoll; struct __anonstruct__sigsys_255 _sigsys; } ;
11 struct siginfo { int si_signo; int si_errno; int si_code; union __anonunion__sifields_246 _sifields; } ;
118 typedef struct siginfo siginfo_t;
22 struct sigpending { struct list_head list; sigset_t signal; } ;
257 struct sigaction { __sighandler_t sa_handler; unsigned long sa_flags; __sigrestore_t sa_restorer; sigset_t sa_mask; } ;
271 struct k_sigaction { struct sigaction sa; } ;
457 enum pid_type { PIDTYPE_PID = 0, PIDTYPE_PGID = 1, PIDTYPE_SID = 2, PIDTYPE_MAX = 3 } ;
464 struct pid_namespace ;
464 struct upid { int nr; struct pid_namespace *ns; struct hlist_node pid_chain; } ;
56 struct pid { atomic_t count; unsigned int level; struct hlist_head tasks[3U]; struct callback_head rcu; struct upid numbers[1U]; } ;
68 struct pid_link { struct hlist_node node; struct pid *pid; } ;
43 struct seccomp_filter ;
44 struct seccomp { int mode; struct seccomp_filter *filter; } ;
20 struct rt_mutex { raw_spinlock_t wait_lock; struct rb_root waiters; struct rb_node *waiters_leftmost; struct task_struct *owner; int save_state; const char *name; const char *file; int line; void *magic; } ;
40 struct rt_mutex_waiter ;
41 struct rlimit { __kernel_ulong_t rlim_cur; __kernel_ulong_t rlim_max; } ;
11 struct timerqueue_node { struct rb_node node; ktime_t expires; } ;
12 struct timerqueue_head { struct rb_root head; struct timerqueue_node *next; } ;
50 struct hrtimer_clock_base ;
51 struct hrtimer_cpu_base ;
60 enum hrtimer_restart { HRTIMER_NORESTART = 0, HRTIMER_RESTART = 1 } ;
65 struct hrtimer { struct timerqueue_node node; ktime_t _softexpires; enum hrtimer_restart (*function)(struct hrtimer *); struct hrtimer_clock_base *base; u8 state; u8 is_rel; int start_pid; void *start_site; char start_comm[16U]; } ;
125 struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base; int index; clockid_t clockid; struct timerqueue_head active; ktime_t (*get_time)(); ktime_t offset; } ;
158 struct hrtimer_cpu_base { raw_spinlock_t lock; seqcount_t seq; struct hrtimer *running; unsigned int cpu; unsigned int active_bases; unsigned int clock_was_set_seq; bool migration_enabled; bool nohz_active; unsigned char in_hrtirq; unsigned char hres_active; unsigned char hang_detected; ktime_t expires_next; struct hrtimer *next_timer; unsigned int nr_events; unsigned int nr_retries; unsigned int nr_hangs; unsigned int max_hang_time; struct hrtimer_clock_base clock_base[4U]; } ;
12 enum kcov_mode { KCOV_MODE_DISABLED = 0, KCOV_MODE_TRACE = 1 } ;
17 struct task_io_accounting { u64 rchar; u64 wchar; u64 syscr; u64 syscw; u64 read_bytes; u64 write_bytes; u64 cancelled_write_bytes; } ;
45 struct latency_record { unsigned long backtrace[12U]; unsigned int count; unsigned long time; unsigned long max; } ;
41 struct assoc_array_ptr ;
41 struct assoc_array { struct assoc_array_ptr *root; unsigned long nr_leaves_on_tree; } ;
31 typedef int32_t key_serial_t;
34 typedef uint32_t key_perm_t;
35 struct key ;
36 struct signal_struct ;
37 struct key_type ;
41 struct keyring_index_key { struct key_type *type; const char *description; size_t desc_len; } ;
91 union key_payload { void *rcu_data0; void *data[4U]; } ;
128 union __anonunion____missing_field_name_290 { struct list_head graveyard_link; struct rb_node serial_node; } ;
128 struct key_user ;
128 union __anonunion____missing_field_name_291 { time_t expiry; time_t revoked_at; } ;
128 struct __anonstruct____missing_field_name_293 { struct key_type *type; char *description; } ;
128 union __anonunion____missing_field_name_292 { struct keyring_index_key index_key; struct __anonstruct____missing_field_name_293 __annonCompField55; } ;
128 struct __anonstruct____missing_field_name_295 { struct list_head name_link; struct assoc_array keys; } ;
128 union __anonunion____missing_field_name_294 { union key_payload payload; struct __anonstruct____missing_field_name_295 __annonCompField57; int reject_error; } ;
128 struct key { atomic_t usage; key_serial_t serial; union __anonunion____missing_field_name_290 __annonCompField53; struct rw_semaphore sem; struct key_user *user; void *security; union __anonunion____missing_field_name_291 __annonCompField54; time_t last_used_at; kuid_t uid; kgid_t gid; key_perm_t perm; unsigned short quotalen; unsigned short datalen; unsigned long flags; union __anonunion____missing_field_name_292 __annonCompField56; union __anonunion____missing_field_name_294 __annonCompField58; int (*restrict_link)(struct key *, const struct key_type *, const union key_payload *); } ;
377 struct audit_context ;
27 struct group_info { atomic_t usage; int ngroups; int nblocks; kgid_t small_block[32U]; kgid_t *blocks[0U]; } ;
90 struct cred { atomic_t usage; atomic_t subscribers; void *put_addr; unsigned int magic; kuid_t uid; kgid_t gid; kuid_t suid; kgid_t sgid; kuid_t euid; kgid_t egid; kuid_t fsuid; kgid_t fsgid; unsigned int securebits; kernel_cap_t cap_inheritable; kernel_cap_t cap_permitted; kernel_cap_t cap_effective; kernel_cap_t cap_bset; kernel_cap_t cap_ambient; unsigned char jit_keyring; struct key *session_keyring; struct key *process_keyring; struct key *thread_keyring; struct key *request_key_auth; void *security; struct user_struct *user; struct user_namespace *user_ns; struct group_info *group_info; struct callback_head rcu; } ;
377 struct percpu_ref ;
55 typedef void percpu_ref_func_t(struct percpu_ref *);
68 struct percpu_ref { atomic_long_t count; unsigned long percpu_count_ptr; percpu_ref_func_t *release; percpu_ref_func_t *confirm_switch; bool force_atomic; struct callback_head rcu; } ;
325 enum rcu_sync_type { RCU_SYNC = 0, RCU_SCHED_SYNC = 1, RCU_BH_SYNC = 2 } ;
331 struct rcu_sync { int gp_state; int gp_count; wait_queue_head_t gp_wait; int cb_state; struct callback_head cb_head; enum rcu_sync_type gp_type; } ;
65 struct percpu_rw_semaphore { struct rcu_sync rss; unsigned int *fast_read_ctr; struct rw_semaphore rw_sem; atomic_t slow_read_ctr; wait_queue_head_t write_waitq; } ;
54 struct cgroup ;
55 struct cgroup_root ;
56 struct cgroup_subsys ;
57 struct cgroup_taskset ;
101 struct cgroup_file { struct kernfs_node *kn; } ;
90 struct cgroup_subsys_state { struct cgroup *cgroup; struct cgroup_subsys *ss; struct percpu_ref refcnt; struct cgroup_subsys_state *parent; struct list_head sibling; struct list_head children; int id; unsigned int flags; u64 serial_nr; atomic_t online_cnt; struct callback_head callback_head; struct work_struct destroy_work; } ;
141 struct css_set { atomic_t refcount; struct hlist_node hlist; struct list_head tasks; struct list_head mg_tasks; struct list_head cgrp_links; struct cgroup *dfl_cgrp; struct cgroup_subsys_state *subsys[13U]; struct list_head mg_preload_node; struct list_head mg_node; struct cgroup *mg_src_cgrp; struct cgroup *mg_dst_cgrp; struct css_set *mg_dst_cset; struct list_head e_cset_node[13U]; struct list_head task_iters; bool dead; struct callback_head callback_head; } ;
221 struct cgroup { struct cgroup_subsys_state self; unsigned long flags; int id; int level; int populated_cnt; struct kernfs_node *kn; struct cgroup_file procs_file; struct cgroup_file events_file; u16 subtree_control; u16 subtree_ss_mask; u16 old_subtree_control; u16 old_subtree_ss_mask; struct cgroup_subsys_state *subsys[13U]; struct cgroup_root *root; struct list_head cset_links; struct list_head e_csets[13U]; struct list_head pidlists; struct mutex pidlist_mutex; wait_queue_head_t offline_waitq; struct work_struct release_agent_work; int ancestor_ids[]; } ;
306 struct cgroup_root { struct kernfs_root *kf_root; unsigned int subsys_mask; int hierarchy_id; struct cgroup cgrp; int cgrp_ancestor_id_storage; atomic_t nr_cgrps; struct list_head root_list; unsigned int flags; struct idr cgroup_idr; char release_agent_path[4096U]; char name[64U]; } ;
345 struct cftype { char name[64U]; unsigned long private; size_t max_write_len; unsigned int flags; unsigned int file_offset; struct cgroup_subsys *ss; struct list_head node; struct kernfs_ops *kf_ops; u64 (*read_u64)(struct cgroup_subsys_state *, struct cftype *); s64 (*read_s64)(struct cgroup_subsys_state *, struct cftype *); int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); int (*write_u64)(struct cgroup_subsys_state *, struct cftype *, u64 ); int (*write_s64)(struct cgroup_subsys_state *, struct cftype *, s64 ); ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); struct lock_class_key lockdep_key; } ;
430 struct cgroup_subsys { struct cgroup_subsys_state * (*css_alloc)(struct cgroup_subsys_state *); int (*css_online)(struct cgroup_subsys_state *); void (*css_offline)(struct cgroup_subsys_state *); void (*css_released)(struct cgroup_subsys_state *); void (*css_free)(struct cgroup_subsys_state *); void (*css_reset)(struct cgroup_subsys_state *); int (*can_attach)(struct cgroup_taskset *); void (*cancel_attach)(struct cgroup_taskset *); void (*attach)(struct cgroup_taskset *); void (*post_attach)(); int (*can_fork)(struct task_struct *); void (*cancel_fork)(struct task_struct *); void (*fork)(struct task_struct *); void (*exit)(struct task_struct *); void (*free)(struct task_struct *); void (*bind)(struct cgroup_subsys_state *); bool early_init; bool implicit_on_dfl; bool broken_hierarchy; bool warned_broken_hierarchy; int id; const char *name; const char *legacy_name; struct cgroup_root *root; struct idr css_idr; struct list_head cfts; struct cftype *dfl_cftypes; struct cftype *legacy_cftypes; unsigned int depends_on; } ;
128 struct futex_pi_state ;
129 struct robust_list_head ;
130 struct bio_list ;
131 struct fs_struct ;
132 struct perf_event_context ;
133 struct blk_plug ;
135 struct nameidata ;
188 struct cfs_rq ;
189 struct task_group ;
493 struct sighand_struct { atomic_t count; struct k_sigaction action[64U]; spinlock_t siglock; wait_queue_head_t signalfd_wqh; } ;
536 struct pacct_struct { int ac_flag; long ac_exitcode; unsigned long ac_mem; cputime_t ac_utime; cputime_t ac_stime; unsigned long ac_minflt; unsigned long ac_majflt; } ;
544 struct cpu_itimer { cputime_t expires; cputime_t incr; u32 error; u32 incr_error; } ;
551 struct prev_cputime { cputime_t utime; cputime_t stime; raw_spinlock_t lock; } ;
576 struct task_cputime { cputime_t utime; cputime_t stime; unsigned long long sum_exec_runtime; } ;
592 struct task_cputime_atomic { atomic64_t utime; atomic64_t stime; atomic64_t sum_exec_runtime; } ;
614 struct thread_group_cputimer { struct task_cputime_atomic cputime_atomic; bool running; bool checking_timer; } ;
659 struct autogroup ;
660 struct tty_struct ;
660 struct taskstats ;
660 struct tty_audit_buf ;
660 struct signal_struct { atomic_t sigcnt; atomic_t live; int nr_threads; atomic_t oom_victims; struct list_head thread_head; wait_queue_head_t wait_chldexit; struct task_struct *curr_target; struct sigpending shared_pending; int group_exit_code; int notify_count; struct task_struct *group_exit_task; int group_stop_count; unsigned int flags; unsigned char is_child_subreaper; unsigned char has_child_subreaper; int posix_timer_id; struct list_head posix_timers; struct hrtimer real_timer; struct pid *leader_pid; ktime_t it_real_incr; struct cpu_itimer it[2U]; struct thread_group_cputimer cputimer; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; struct pid *tty_old_pgrp; int leader; struct tty_struct *tty; struct autogroup *autogroup; seqlock_t stats_lock; cputime_t utime; cputime_t stime; cputime_t cutime; cputime_t cstime; cputime_t gtime; cputime_t cgtime; struct prev_cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; unsigned long cnvcsw; unsigned long cnivcsw; unsigned long min_flt; unsigned long maj_flt; unsigned long cmin_flt; unsigned long cmaj_flt; unsigned long inblock; unsigned long oublock; unsigned long cinblock; unsigned long coublock; unsigned long maxrss; unsigned long cmaxrss; struct task_io_accounting ioac; unsigned long long sum_sched_runtime; struct rlimit rlim[16U]; struct pacct_struct pacct; struct taskstats *stats; unsigned int audit_tty; struct tty_audit_buf *tty_audit_buf; bool oom_flag_origin; short oom_score_adj; short oom_score_adj_min; struct mutex cred_guard_mutex; } ;
835 struct user_struct { atomic_t __count; atomic_t processes; atomic_t sigpending; atomic_t inotify_watches; atomic_t inotify_devs; atomic_t fanotify_listeners; atomic_long_t epoll_watches; unsigned long mq_bytes; unsigned long locked_shm; unsigned long unix_inflight; atomic_long_t pipe_bufs; struct key *uid_keyring; struct key *session_keyring; struct hlist_node uidhash_node; kuid_t uid; atomic_long_t locked_vm; } ;
880 struct backing_dev_info ;
881 struct reclaim_state ;
882 struct sched_info { unsigned long pcount; unsigned long long run_delay; unsigned long long last_arrival; unsigned long long last_queued; } ;
896 struct task_delay_info { spinlock_t lock; unsigned int flags; u64 blkio_start; u64 blkio_delay; u64 swapin_delay; u32 blkio_count; u32 swapin_count; u64 freepages_start; u64 freepages_delay; u32 freepages_count; } ;
953 struct wake_q_node { struct wake_q_node *next; } ;
1185 struct io_context ;
1219 struct pipe_inode_info ;
1221 struct load_weight { unsigned long weight; u32 inv_weight; } ;
1228 struct sched_avg { u64 last_update_time; u64 load_sum; u32 util_sum; u32 period_contrib; unsigned long load_avg; unsigned long util_avg; } ;
1286 struct sched_statistics { u64 wait_start; u64 wait_max; u64 wait_count; u64 wait_sum; u64 iowait_count; u64 iowait_sum; u64 sleep_start; u64 sleep_max; s64 sum_sleep_runtime; u64 block_start; u64 block_max; u64 exec_max; u64 slice_max; u64 nr_migrations_cold; u64 nr_failed_migrations_affine; u64 nr_failed_migrations_running; u64 nr_failed_migrations_hot; u64 nr_forced_migrations; u64 nr_wakeups; u64 nr_wakeups_sync; u64 nr_wakeups_migrate; u64 nr_wakeups_local; u64 nr_wakeups_remote; u64 nr_wakeups_affine; u64 nr_wakeups_affine_attempts; u64 nr_wakeups_passive; u64 nr_wakeups_idle; } ;
1321 struct sched_entity { struct load_weight load; struct rb_node run_node; struct list_head group_node; unsigned int on_rq; u64 exec_start; u64 sum_exec_runtime; u64 vruntime; u64 prev_sum_exec_runtime; u64 nr_migrations; struct sched_statistics statistics; int depth; struct sched_entity *parent; struct cfs_rq *cfs_rq; struct cfs_rq *my_q; struct sched_avg avg; } ;
1358 struct rt_rq ;
1358 struct sched_rt_entity { struct list_head run_list; unsigned long timeout; unsigned long watchdog_stamp; unsigned int time_slice; unsigned short on_rq; unsigned short on_list; struct sched_rt_entity *back; struct sched_rt_entity *parent; struct rt_rq *rt_rq; struct rt_rq *my_q; } ;
1376 struct sched_dl_entity { struct rb_node rb_node; u64 dl_runtime; u64 dl_deadline; u64 dl_period; u64 dl_bw; s64 runtime; u64 deadline; unsigned int flags; int dl_throttled; int dl_boosted; int dl_yielded; struct hrtimer dl_timer; } ;
1440 struct tlbflush_unmap_batch { struct cpumask cpumask; bool flush_required; bool writable; } ;
1459 struct sched_class ;
1459 struct files_struct ;
1459 struct compat_robust_list_head ;
1459 struct numa_group ;
1459 struct kcov ;
1459 struct task_struct { volatile long state; void *stack; atomic_t usage; unsigned int flags; unsigned int ptrace; struct llist_node wake_entry; int on_cpu; unsigned int wakee_flips; unsigned long wakee_flip_decay_ts; struct task_struct *last_wakee; int wake_cpu; int on_rq; int prio; int static_prio; int normal_prio; unsigned int rt_priority; const struct sched_class *sched_class; struct sched_entity se; struct sched_rt_entity rt; struct task_group *sched_task_group; struct sched_dl_entity dl; struct hlist_head preempt_notifiers; unsigned int policy; int nr_cpus_allowed; cpumask_t cpus_allowed; unsigned long rcu_tasks_nvcsw; bool rcu_tasks_holdout; struct list_head rcu_tasks_holdout_list; int rcu_tasks_idle_cpu; struct sched_info sched_info; struct list_head tasks; struct plist_node pushable_tasks; struct rb_node pushable_dl_tasks; struct mm_struct *mm; struct mm_struct *active_mm; u32 vmacache_seqnum; struct vm_area_struct *vmacache[4U]; struct task_rss_stat rss_stat; int exit_state; int exit_code; int exit_signal; int pdeath_signal; unsigned long jobctl; unsigned int personality; unsigned char sched_reset_on_fork; unsigned char sched_contributes_to_load; unsigned char sched_migrated; unsigned char sched_remote_wakeup; unsigned char; unsigned char in_execve; unsigned char in_iowait; unsigned char restore_sigmask; unsigned char memcg_may_oom; unsigned char memcg_kmem_skip_account; unsigned char brk_randomized; unsigned long atomic_flags; struct restart_block restart_block; pid_t pid; pid_t tgid; struct task_struct *real_parent; struct task_struct *parent; struct list_head children; struct list_head sibling; struct task_struct *group_leader; struct list_head ptraced; struct list_head ptrace_entry; struct pid_link pids[3U]; struct list_head thread_group; struct list_head thread_node; struct completion *vfork_done; int *set_child_tid; int *clear_child_tid; cputime_t utime; cputime_t stime; cputime_t utimescaled; cputime_t stimescaled; cputime_t gtime; struct prev_cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; u64 start_time; u64 real_start_time; unsigned long min_flt; unsigned long maj_flt; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; const struct cred *real_cred; const struct cred *cred; char comm[16U]; struct nameidata *nameidata; struct sysv_sem sysvsem; struct sysv_shm sysvshm; unsigned long last_switch_count; struct fs_struct *fs; struct files_struct *files; struct nsproxy *nsproxy; struct signal_struct *signal; struct sighand_struct *sighand; sigset_t blocked; sigset_t real_blocked; sigset_t saved_sigmask; struct sigpending pending; unsigned long sas_ss_sp; size_t sas_ss_size; unsigned int sas_ss_flags; struct callback_head *task_works; struct audit_context *audit_context; kuid_t loginuid; unsigned int sessionid; struct seccomp seccomp; u32 parent_exec_id; u32 self_exec_id; spinlock_t alloc_lock; raw_spinlock_t pi_lock; struct wake_q_node wake_q; struct rb_root pi_waiters; struct rb_node *pi_waiters_leftmost; struct rt_mutex_waiter *pi_blocked_on; struct mutex_waiter *blocked_on; unsigned int irq_events; unsigned long hardirq_enable_ip; unsigned long hardirq_disable_ip; unsigned int hardirq_enable_event; unsigned int hardirq_disable_event; int hardirqs_enabled; int hardirq_context; unsigned long softirq_disable_ip; unsigned long softirq_enable_ip; unsigned int softirq_disable_event; unsigned int softirq_enable_event; int softirqs_enabled; int softirq_context; u64 curr_chain_key; int lockdep_depth; unsigned int lockdep_recursion; struct held_lock held_locks[48U]; gfp_t lockdep_reclaim_gfp; unsigned int in_ubsan; void *journal_info; struct bio_list *bio_list; struct blk_plug *plug; struct reclaim_state *reclaim_state; struct backing_dev_info *backing_dev_info; struct io_context *io_context; unsigned long ptrace_message; siginfo_t *last_siginfo; struct task_io_accounting ioac; u64 acct_rss_mem1; u64 acct_vm_mem1; cputime_t acct_timexpd; nodemask_t mems_allowed; seqcount_t mems_allowed_seq; int cpuset_mem_spread_rotor; int cpuset_slab_spread_rotor; struct css_set *cgroups; struct list_head cg_list; struct robust_list_head *robust_list; struct compat_robust_list_head *compat_robust_list; struct list_head pi_state_list; struct futex_pi_state *pi_state_cache; struct perf_event_context *perf_event_ctxp[2U]; struct mutex perf_event_mutex; struct list_head perf_event_list; struct mempolicy *mempolicy; short il_next; short pref_node_fork; int numa_scan_seq; unsigned int numa_scan_period; unsigned int numa_scan_period_max; int numa_preferred_nid; unsigned long numa_migrate_retry; u64 node_stamp; u64 last_task_numa_placement; u64 last_sum_exec_runtime; struct callback_head numa_work; struct list_head numa_entry; struct numa_group *numa_group; unsigned long *numa_faults; unsigned long total_numa_faults; unsigned long numa_faults_locality[3U]; unsigned long numa_pages_migrated; struct tlbflush_unmap_batch tlb_ubc; struct callback_head rcu; struct pipe_inode_info *splice_pipe; struct page_frag task_frag; struct task_delay_info *delays; int make_it_fail; int nr_dirtied; int nr_dirtied_pause; unsigned long dirty_paused_when; int latency_record_count; struct latency_record latency_record[32U]; u64 timer_slack_ns; u64 default_timer_slack_ns; unsigned int kasan_depth; unsigned long trace; unsigned long trace_recursion; enum kcov_mode kcov_mode; unsigned int kcov_size; void *kcov_area; struct kcov *kcov; struct mem_cgroup *memcg_in_oom; gfp_t memcg_oom_gfp_mask; int memcg_oom_order; unsigned int memcg_nr_pages_over_high; struct uprobe_task *utask; unsigned int sequential_io; unsigned int sequential_io_avg; unsigned long task_state_change; int pagefault_disabled; struct task_struct *oom_reaper_list; struct thread_struct thread; } ;
13 typedef unsigned long kernel_ulong_t;
186 struct acpi_device_id { __u8 id[9U]; kernel_ulong_t driver_data; __u32 cls; __u32 cls_msk; } ;
229 struct of_device_id { char name[32U]; char type[32U]; char compatible[128U]; const void *data; } ;
484 struct platform_device_id { char name[20U]; kernel_ulong_t driver_data; } ;
674 struct klist_node ;
37 struct klist_node { void *n_klist; struct list_head n_node; struct kref n_ref; } ;
93 struct hlist_bl_node ;
93 struct hlist_bl_head { struct hlist_bl_node *first; } ;
36 struct hlist_bl_node { struct hlist_bl_node *next; struct hlist_bl_node **pprev; } ;
114 struct __anonstruct____missing_field_name_340 { spinlock_t lock; int count; } ;
114 union __anonunion____missing_field_name_339 { struct __anonstruct____missing_field_name_340 __annonCompField64; } ;
114 struct lockref { union __anonunion____missing_field_name_339 __annonCompField65; } ;
77 struct path ;
78 struct vfsmount ;
79 struct __anonstruct____missing_field_name_342 { u32 hash; u32 len; } ;
79 union __anonunion____missing_field_name_341 { struct __anonstruct____missing_field_name_342 __annonCompField66; u64 hash_len; } ;
79 struct qstr { union __anonunion____missing_field_name_341 __annonCompField67; const unsigned char *name; } ;
65 struct dentry_operations ;
65 union __anonunion____missing_field_name_343 { struct list_head d_lru; wait_queue_head_t *d_wait; } ;
65 union __anonunion_d_u_344 { struct hlist_node d_alias; struct hlist_bl_node d_in_lookup_hash; struct callback_head d_rcu; } ;
65 struct dentry { unsigned int d_flags; seqcount_t d_seq; struct hlist_bl_node d_hash; struct dentry *d_parent; struct qstr d_name; struct inode *d_inode; unsigned char d_iname[32U]; struct lockref d_lockref; const struct dentry_operations *d_op; struct super_block *d_sb; unsigned long d_time; void *d_fsdata; union __anonunion____missing_field_name_343 __annonCompField68; struct list_head d_child; struct list_head d_subdirs; union __anonunion_d_u_344 d_u; } ;
121 struct dentry_operations { int (*d_revalidate)(struct dentry *, unsigned int); int (*d_weak_revalidate)(struct dentry *, unsigned int); int (*d_hash)(const struct dentry *, struct qstr *); int (*d_compare)(const struct dentry *, unsigned int, const char *, const struct qstr *); int (*d_delete)(const struct dentry *); int (*d_init)(struct dentry *); void (*d_release)(struct dentry *); void (*d_prune)(struct dentry *); void (*d_iput)(struct dentry *, struct inode *); char * (*d_dname)(struct dentry *, char *, int); struct vfsmount * (*d_automount)(struct path *); int (*d_manage)(struct dentry *, bool ); struct dentry * (*d_real)(struct dentry *, const struct inode *, unsigned int); } ;
591 struct path { struct vfsmount *mnt; struct dentry *dentry; } ;
19 struct shrink_control { gfp_t gfp_mask; unsigned long nr_to_scan; int nid; struct mem_cgroup *memcg; } ;
27 struct shrinker { unsigned long int (*count_objects)(struct shrinker *, struct shrink_control *); unsigned long int (*scan_objects)(struct shrinker *, struct shrink_control *); int seeks; long batch; unsigned long flags; struct list_head list; atomic_long_t *nr_deferred; } ;
80 struct list_lru_one { struct list_head list; long nr_items; } ;
32 struct list_lru_memcg { struct list_lru_one *lru[0U]; } ;
37 struct list_lru_node { spinlock_t lock; struct list_lru_one lru; struct list_lru_memcg *memcg_lrus; } ;
47 struct list_lru { struct list_lru_node *node; struct list_head list; } ;
63 struct __anonstruct____missing_field_name_346 { struct radix_tree_node *parent; void *private_data; } ;
63 union __anonunion____missing_field_name_345 { struct __anonstruct____missing_field_name_346 __annonCompField69; struct callback_head callback_head; } ;
63 struct radix_tree_node { unsigned char shift; unsigned char offset; unsigned int count; union __anonunion____missing_field_name_345 __annonCompField70; struct list_head private_list; void *slots[64U]; unsigned long tags[3U][1U]; } ;
106 struct radix_tree_root { gfp_t gfp_mask; struct radix_tree_node *rnode; } ;
45 struct fiemap_extent { __u64 fe_logical; __u64 fe_physical; __u64 fe_length; __u64 fe_reserved64[2U]; __u32 fe_flags; __u32 fe_reserved[3U]; } ;
38 enum migrate_mode { MIGRATE_ASYNC = 0, MIGRATE_SYNC_LIGHT = 1, MIGRATE_SYNC = 2 } ;
87 struct block_device ;
266 struct delayed_call { void (*fn)(void *); void *arg; } ;
261 struct bdi_writeback ;
262 struct export_operations ;
265 struct kiocb ;
266 struct poll_table_struct ;
267 struct kstatfs ;
268 struct swap_info_struct ;
269 struct iov_iter ;
270 struct fscrypt_info ;
271 struct fscrypt_operations ;
76 struct iattr { unsigned int ia_valid; umode_t ia_mode; kuid_t ia_uid; kgid_t ia_gid; loff_t ia_size; struct timespec ia_atime; struct timespec ia_mtime; struct timespec ia_ctime; struct file *ia_file; } ;
213 struct dquot ;
214 struct kqid ;
19 typedef __kernel_uid32_t projid_t;
23 struct __anonstruct_kprojid_t_354 { projid_t val; } ;
23 typedef struct __anonstruct_kprojid_t_354 kprojid_t;
181 enum quota_type { USRQUOTA = 0, GRPQUOTA = 1, PRJQUOTA = 2 } ;
66 typedef long long qsize_t;
67 union __anonunion____missing_field_name_355 { kuid_t uid; kgid_t gid; kprojid_t projid; } ;
67 struct kqid { union __anonunion____missing_field_name_355 __annonCompField72; enum quota_type type; } ;
194 struct mem_dqblk { qsize_t dqb_bhardlimit; qsize_t dqb_bsoftlimit; qsize_t dqb_curspace; qsize_t dqb_rsvspace; qsize_t dqb_ihardlimit; qsize_t dqb_isoftlimit; qsize_t dqb_curinodes; time64_t dqb_btime; time64_t dqb_itime; } ;
216 struct quota_format_type ;
217 struct mem_dqinfo { struct quota_format_type *dqi_format; int dqi_fmt_id; struct list_head dqi_dirty_list; unsigned long dqi_flags; unsigned int dqi_bgrace; unsigned int dqi_igrace; qsize_t dqi_max_spc_limit; qsize_t dqi_max_ino_limit; void *dqi_priv; } ;
282 struct dquot { struct hlist_node dq_hash; struct list_head dq_inuse; struct list_head dq_free; struct list_head dq_dirty; struct mutex dq_lock; atomic_t dq_count; wait_queue_head_t dq_wait_unused; struct super_block *dq_sb; struct kqid dq_id; loff_t dq_off; unsigned long dq_flags; struct mem_dqblk dq_dqb; } ;
309 struct quota_format_ops { int (*check_quota_file)(struct super_block *, int); int (*read_file_info)(struct super_block *, int); int (*write_file_info)(struct super_block *, int); int (*free_file_info)(struct super_block *, int); int (*read_dqblk)(struct dquot *); int (*commit_dqblk)(struct dquot *); int (*release_dqblk)(struct dquot *); int (*get_next_id)(struct super_block *, struct kqid *); } ;
321 struct dquot_operations { int (*write_dquot)(struct dquot *); struct dquot * (*alloc_dquot)(struct super_block *, int); void (*destroy_dquot)(struct dquot *); int (*acquire_dquot)(struct dquot *); int (*release_dquot)(struct dquot *); int (*mark_dirty)(struct dquot *); int (*write_info)(struct super_block *, int); qsize_t * (*get_reserved_space)(struct inode *); int (*get_projid)(struct inode *, kprojid_t *); int (*get_next_id)(struct super_block *, struct kqid *); } ;
338 struct qc_dqblk { int d_fieldmask; u64 d_spc_hardlimit; u64 d_spc_softlimit; u64 d_ino_hardlimit; u64 d_ino_softlimit; u64 d_space; u64 d_ino_count; s64 d_ino_timer; s64 d_spc_timer; int d_ino_warns; int d_spc_warns; u64 d_rt_spc_hardlimit; u64 d_rt_spc_softlimit; u64 d_rt_space; s64 d_rt_spc_timer; int d_rt_spc_warns; } ;
361 struct qc_type_state { unsigned int flags; unsigned int spc_timelimit; unsigned int ino_timelimit; unsigned int rt_spc_timelimit; unsigned int spc_warnlimit; unsigned int ino_warnlimit; unsigned int rt_spc_warnlimit; unsigned long long ino; blkcnt_t blocks; blkcnt_t nextents; } ;
407 struct qc_state { unsigned int s_incoredqs; struct qc_type_state s_state[3U]; } ;
418 struct qc_info { int i_fieldmask; unsigned int i_flags; unsigned int i_spc_timelimit; unsigned int i_ino_timelimit; unsigned int i_rt_spc_timelimit; unsigned int i_spc_warnlimit; unsigned int i_ino_warnlimit; unsigned int i_rt_spc_warnlimit; } ;
431 struct quotactl_ops { int (*quota_on)(struct super_block *, int, int, struct path *); int (*quota_off)(struct super_block *, int); int (*quota_enable)(struct super_block *, unsigned int); int (*quota_disable)(struct super_block *, unsigned int); int (*quota_sync)(struct super_block *, int); int (*set_info)(struct super_block *, int, struct qc_info *); int (*get_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *); int (*get_nextdqblk)(struct super_block *, struct kqid *, struct qc_dqblk *); int (*set_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *); int (*get_state)(struct super_block *, struct qc_state *); int (*rm_xquota)(struct super_block *, unsigned int); } ;
447 struct quota_format_type { int qf_fmt_id; const struct quota_format_ops *qf_ops; struct module *qf_owner; struct quota_format_type *qf_next; } ;
511 struct quota_info { unsigned int flags; struct mutex dqio_mutex; struct mutex dqonoff_mutex; struct inode *files[3U]; struct mem_dqinfo info[3U]; const struct quota_format_ops *ops[3U]; } ;
541 struct writeback_control ;
542 struct kiocb { struct file *ki_filp; loff_t ki_pos; void (*ki_complete)(struct kiocb *, long, long); void *private; int ki_flags; } ;
367 struct address_space_operations { int (*writepage)(struct page *, struct writeback_control *); int (*readpage)(struct file *, struct page *); int (*writepages)(struct address_space *, struct writeback_control *); int (*set_page_dirty)(struct page *); int (*readpages)(struct file *, struct address_space *, struct list_head *, unsigned int); int (*write_begin)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page **, void **); int (*write_end)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page *, void *); sector_t (*bmap)(struct address_space *, sector_t ); void (*invalidatepage)(struct page *, unsigned int, unsigned int); int (*releasepage)(struct page *, gfp_t ); void (*freepage)(struct page *); ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *); int (*migratepage)(struct address_space *, struct page *, struct page *, enum migrate_mode ); bool (*isolate_page)(struct page *, isolate_mode_t ); void (*putback_page)(struct page *); int (*launder_page)(struct page *); int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long); void (*is_dirty_writeback)(struct page *, bool *, bool *); int (*error_remove_page)(struct address_space *, struct page *); int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *); void (*swap_deactivate)(struct file *); } ;
426 struct address_space { struct inode *host; struct radix_tree_root page_tree; spinlock_t tree_lock; atomic_t i_mmap_writable; struct rb_root i_mmap; struct rw_semaphore i_mmap_rwsem; unsigned long nrpages; unsigned long nrexceptional; unsigned long writeback_index; const struct address_space_operations *a_ops; unsigned long flags; spinlock_t private_lock; struct list_head private_list; void *private_data; } ;
447 struct request_queue ;
448 struct hd_struct ;
448 struct gendisk ;
448 struct block_device { dev_t bd_dev; int bd_openers; struct inode *bd_inode; struct super_block *bd_super; struct mutex bd_mutex; void *bd_claiming; void *bd_holder; int bd_holders; bool bd_write_holder; struct list_head bd_holder_disks; struct block_device *bd_contains; unsigned int bd_block_size; struct hd_struct *bd_part; unsigned int bd_part_count; int bd_invalidated; struct gendisk *bd_disk; struct request_queue *bd_queue; struct list_head bd_list; unsigned long bd_private; int bd_fsfreeze_count; struct mutex bd_fsfreeze_mutex; } ;
563 struct posix_acl ;
589 struct inode_operations ;
589 union __anonunion____missing_field_name_360 { const unsigned int i_nlink; unsigned int __i_nlink; } ;
589 union __anonunion____missing_field_name_361 { struct hlist_head i_dentry; struct callback_head i_rcu; } ;
589 struct file_lock_context ;
589 struct cdev ;
589 union __anonunion____missing_field_name_362 { struct pipe_inode_info *i_pipe; struct block_device *i_bdev; struct cdev *i_cdev; char *i_link; unsigned int i_dir_seq; } ;
589 struct inode { umode_t i_mode; unsigned short i_opflags; kuid_t i_uid; kgid_t i_gid; unsigned int i_flags; struct posix_acl *i_acl; struct posix_acl *i_default_acl; const struct inode_operations *i_op; struct super_block *i_sb; struct address_space *i_mapping; void *i_security; unsigned long i_ino; union __anonunion____missing_field_name_360 __annonCompField73; dev_t i_rdev; loff_t i_size; struct timespec i_atime; struct timespec i_mtime; struct timespec i_ctime; spinlock_t i_lock; unsigned short i_bytes; unsigned int i_blkbits; blkcnt_t i_blocks; unsigned long i_state; struct rw_semaphore i_rwsem; unsigned long dirtied_when; unsigned long dirtied_time_when; struct hlist_node i_hash; struct list_head i_io_list; struct bdi_writeback *i_wb; int i_wb_frn_winner; u16 i_wb_frn_avg_time; u16 i_wb_frn_history; struct list_head i_lru; struct list_head i_sb_list; struct list_head i_wb_list; union __anonunion____missing_field_name_361 __annonCompField74; u64 i_version; atomic_t i_count; atomic_t i_dio_count; atomic_t i_writecount; atomic_t i_readcount; const struct file_operations *i_fop; struct file_lock_context *i_flctx; struct address_space i_data; struct list_head i_devices; union __anonunion____missing_field_name_362 __annonCompField75; __u32 i_generation; __u32 i_fsnotify_mask; struct hlist_head i_fsnotify_marks; struct fscrypt_info *i_crypt_info; void *i_private; } ;
843 struct fown_struct { rwlock_t lock; struct pid *pid; enum pid_type pid_type; kuid_t uid; kuid_t euid; int signum; } ;
851 struct file_ra_state { unsigned long start; unsigned int size; unsigned int async_size; unsigned int ra_pages; unsigned int mmap_miss; loff_t prev_pos; } ;
874 union __anonunion_f_u_363 { struct llist_node fu_llist; struct callback_head fu_rcuhead; } ;
874 struct file { union __anonunion_f_u_363 f_u; struct path f_path; struct inode *f_inode; const struct file_operations *f_op; spinlock_t f_lock; atomic_long_t f_count; unsigned int f_flags; fmode_t f_mode; struct mutex f_pos_lock; loff_t f_pos; struct fown_struct f_owner; const struct cred *f_cred; struct file_ra_state f_ra; u64 f_version; void *f_security; void *private_data; struct list_head f_ep_links; struct list_head f_tfile_llink; struct address_space *f_mapping; } ;
959 typedef void *fl_owner_t;
960 struct file_lock ;
961 struct file_lock_operations { void (*fl_copy_lock)(struct file_lock *, struct file_lock *); void (*fl_release_private)(struct file_lock *); } ;
967 struct lock_manager_operations { int (*lm_compare_owner)(struct file_lock *, struct file_lock *); unsigned long int (*lm_owner_key)(struct file_lock *); fl_owner_t (*lm_get_owner)(fl_owner_t ); void (*lm_put_owner)(fl_owner_t ); void (*lm_notify)(struct file_lock *); int (*lm_grant)(struct file_lock *, int); bool (*lm_break)(struct file_lock *); int (*lm_change)(struct file_lock *, int, struct list_head *); void (*lm_setup)(struct file_lock *, void **); } ;
994 struct nlm_lockowner ;
995 struct nfs_lock_info { u32 state; struct nlm_lockowner *owner; struct list_head list; } ;
14 struct nfs4_lock_state ;
15 struct nfs4_lock_info { struct nfs4_lock_state *owner; } ;
19 struct fasync_struct ;
19 struct __anonstruct_afs_365 { struct list_head link; int state; } ;
19 union __anonunion_fl_u_364 { struct nfs_lock_info nfs_fl; struct nfs4_lock_info nfs4_fl; struct __anonstruct_afs_365 afs; } ;
19 struct file_lock { struct file_lock *fl_next; struct list_head fl_list; struct hlist_node fl_link; struct list_head fl_block; fl_owner_t fl_owner; unsigned int fl_flags; unsigned char fl_type; unsigned int fl_pid; int fl_link_cpu; struct pid *fl_nspid; wait_queue_head_t fl_wait; struct file *fl_file; loff_t fl_start; loff_t fl_end; struct fasync_struct *fl_fasync; unsigned long fl_break_time; unsigned long fl_downgrade_time; const struct file_lock_operations *fl_ops; const struct lock_manager_operations *fl_lmops; union __anonunion_fl_u_364 fl_u; } ;
1047 struct file_lock_context { spinlock_t flc_lock; struct list_head flc_flock; struct list_head flc_posix; struct list_head flc_lease; } ;
1255 struct fasync_struct { spinlock_t fa_lock; int magic; int fa_fd; struct fasync_struct *fa_next; struct file *fa_file; struct callback_head fa_rcu; } ;
1290 struct sb_writers { int frozen; wait_queue_head_t wait_unfrozen; struct percpu_rw_semaphore rw_sem[3U]; } ;
1320 struct super_operations ;
1320 struct xattr_handler ;
1320 struct mtd_info ;
1320 struct super_block { struct list_head s_list; dev_t s_dev; unsigned char s_blocksize_bits; unsigned long s_blocksize; loff_t s_maxbytes; struct file_system_type *s_type; const struct super_operations *s_op; const struct dquot_operations *dq_op; const struct quotactl_ops *s_qcop; const struct export_operations *s_export_op; unsigned long s_flags; unsigned long s_iflags; unsigned long s_magic; struct dentry *s_root; struct rw_semaphore s_umount; int s_count; atomic_t s_active; void *s_security; const struct xattr_handler **s_xattr; const struct fscrypt_operations *s_cop; struct hlist_bl_head s_anon; struct list_head s_mounts; struct block_device *s_bdev; struct backing_dev_info *s_bdi; struct mtd_info *s_mtd; struct hlist_node s_instances; unsigned int s_quota_types; struct quota_info s_dquot; struct sb_writers s_writers; char s_id[32U]; u8 s_uuid[16U]; void *s_fs_info; unsigned int s_max_links; fmode_t s_mode; u32 s_time_gran; struct mutex s_vfs_rename_mutex; char *s_subtype; char *s_options; const struct dentry_operations *s_d_op; int cleancache_poolid; struct shrinker s_shrink; atomic_long_t s_remove_count; int s_readonly_remount; struct workqueue_struct *s_dio_done_wq; struct hlist_head s_pins; struct user_namespace *s_user_ns; struct list_lru s_dentry_lru; struct list_lru s_inode_lru; struct callback_head rcu; struct work_struct destroy_work; struct mutex s_sync_lock; int s_stack_depth; spinlock_t s_inode_list_lock; struct list_head s_inodes; spinlock_t s_inode_wblist_lock; struct list_head s_inodes_wb; } ;
1603 struct fiemap_extent_info { unsigned int fi_flags; unsigned int fi_extents_mapped; unsigned int fi_extents_max; struct fiemap_extent *fi_extents_start; } ;
1616 struct dir_context ;
1641 struct dir_context { int (*actor)(struct dir_context *, const char *, int, loff_t , u64 , unsigned int); loff_t pos; } ;
1648 struct file_operations { struct module *owner; loff_t (*llseek)(struct file *, loff_t , int); ssize_t (*read)(struct file *, char *, size_t , loff_t *); ssize_t (*write)(struct file *, const char *, size_t , loff_t *); ssize_t (*read_iter)(struct kiocb *, struct iov_iter *); ssize_t (*write_iter)(struct kiocb *, struct iov_iter *); int (*iterate)(struct file *, struct dir_context *); int (*iterate_shared)(struct file *, struct dir_context *); unsigned int (*poll)(struct file *, struct poll_table_struct *); long int (*unlocked_ioctl)(struct file *, unsigned int, unsigned long); long int (*compat_ioctl)(struct file *, unsigned int, unsigned long); int (*mmap)(struct file *, struct vm_area_struct *); int (*open)(struct inode *, struct file *); int (*flush)(struct file *, fl_owner_t ); int (*release)(struct inode *, struct file *); int (*fsync)(struct file *, loff_t , loff_t , int); int (*aio_fsync)(struct kiocb *, int); int (*fasync)(int, struct file *, int); int (*lock)(struct file *, int, struct file_lock *); ssize_t (*sendpage)(struct file *, struct page *, int, size_t , loff_t *, int); unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); int (*check_flags)(int); int (*flock)(struct file *, int, struct file_lock *); ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t , unsigned int); ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t , unsigned int); int (*setlease)(struct file *, long, struct file_lock **, void **); long int (*fallocate)(struct file *, int, loff_t , loff_t ); void (*show_fdinfo)(struct seq_file *, struct file *); ssize_t (*copy_file_range)(struct file *, loff_t , struct file *, loff_t , size_t , unsigned int); int (*clone_file_range)(struct file *, loff_t , struct file *, loff_t , u64 ); ssize_t (*dedupe_file_range)(struct file *, u64 , u64 , struct file *, u64 ); } ;
1717 struct inode_operations { struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int); const char * (*get_link)(struct dentry *, struct inode *, struct delayed_call *); int (*permission)(struct inode *, int); struct posix_acl * (*get_acl)(struct inode *, int); int (*readlink)(struct dentry *, char *, int); int (*create)(struct inode *, struct dentry *, umode_t , bool ); int (*link)(struct dentry *, struct inode *, struct dentry *); int (*unlink)(struct inode *, struct dentry *); int (*symlink)(struct inode *, struct dentry *, const char *); int (*mkdir)(struct inode *, struct dentry *, umode_t ); int (*rmdir)(struct inode *, struct dentry *); int (*mknod)(struct inode *, struct dentry *, umode_t , dev_t ); int (*rename)(struct inode *, struct dentry *, struct inode *, struct dentry *); int (*rename2)(struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); int (*setattr)(struct dentry *, struct iattr *); int (*getattr)(struct vfsmount *, struct dentry *, struct kstat *); int (*setxattr)(struct dentry *, struct inode *, const char *, const void *, size_t , int); ssize_t (*getxattr)(struct dentry *, struct inode *, const char *, void *, size_t ); ssize_t (*listxattr)(struct dentry *, char *, size_t ); int (*removexattr)(struct dentry *, const char *); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 , u64 ); int (*update_time)(struct inode *, struct timespec *, int); int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t , int *); int (*tmpfile)(struct inode *, struct dentry *, umode_t ); int (*set_acl)(struct inode *, struct posix_acl *, int); } ;
1774 struct super_operations { struct inode * (*alloc_inode)(struct super_block *); void (*destroy_inode)(struct inode *); void (*dirty_inode)(struct inode *, int); int (*write_inode)(struct inode *, struct writeback_control *); int (*drop_inode)(struct inode *); void (*evict_inode)(struct inode *); void (*put_super)(struct super_block *); int (*sync_fs)(struct super_block *, int); int (*freeze_super)(struct super_block *); int (*freeze_fs)(struct super_block *); int (*thaw_super)(struct super_block *); int (*unfreeze_fs)(struct super_block *); int (*statfs)(struct dentry *, struct kstatfs *); int (*remount_fs)(struct super_block *, int *, char *); void (*umount_begin)(struct super_block *); int (*show_options)(struct seq_file *, struct dentry *); int (*show_devname)(struct seq_file *, struct dentry *); int (*show_path)(struct seq_file *, struct dentry *); int (*show_stats)(struct seq_file *, struct dentry *); ssize_t (*quota_read)(struct super_block *, int, char *, size_t , loff_t ); ssize_t (*quota_write)(struct super_block *, int, const char *, size_t , loff_t ); struct dquot ** (*get_dquots)(struct inode *); int (*bdev_try_to_free_page)(struct super_block *, struct page *, gfp_t ); long int (*nr_cached_objects)(struct super_block *, struct shrink_control *); long int (*free_cached_objects)(struct super_block *, struct shrink_control *); } ;
2018 struct file_system_type { const char *name; int fs_flags; struct dentry * (*mount)(struct file_system_type *, int, const char *, void *); void (*kill_sb)(struct super_block *); struct module *owner; struct file_system_type *next; struct hlist_head fs_supers; struct lock_class_key s_lock_key; struct lock_class_key s_umount_key; struct lock_class_key s_vfs_rename_key; struct lock_class_key s_writers_key[3U]; struct lock_class_key i_lock_key; struct lock_class_key i_mutex_key; struct lock_class_key i_mutex_dir_key; } ;
3193 struct seq_file { char *buf; size_t size; size_t from; size_t count; size_t pad_until; loff_t index; loff_t read_pos; u64 version; struct mutex lock; const struct seq_operations *op; int poll_event; const struct file *file; void *private; } ;
30 struct seq_operations { void * (*start)(struct seq_file *, loff_t *); void (*stop)(struct seq_file *, void *); void * (*next)(struct seq_file *, void *, loff_t *); int (*show)(struct seq_file *, void *); } ;
222 struct pinctrl ;
223 struct pinctrl_state ;
194 struct dev_pin_info { struct pinctrl *p; struct pinctrl_state *default_state; struct pinctrl_state *init_state; struct pinctrl_state *sleep_state; struct pinctrl_state *idle_state; } ;
76 struct dma_map_ops ;
76 struct dev_archdata { struct dma_map_ops *dma_ops; void *iommu; } ;
21 struct pdev_archdata { } ;
24 struct device_private ;
25 struct device_driver ;
26 struct driver_private ;
27 struct class ;
28 struct subsys_private ;
29 struct bus_type ;
30 struct fwnode_handle ;
31 struct iommu_ops ;
32 struct iommu_group ;
61 struct device_attribute ;
61 struct bus_type { const char *name; const char *dev_name; struct device *dev_root; struct device_attribute *dev_attrs; const struct attribute_group **bus_groups; const struct attribute_group **dev_groups; const struct attribute_group **drv_groups; int (*match)(struct device *, struct device_driver *); int (*uevent)(struct device *, struct kobj_uevent_env *); int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*online)(struct device *); int (*offline)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct dev_pm_ops *pm; const struct iommu_ops *iommu_ops; struct subsys_private *p; struct lock_class_key lock_key; } ;
142 struct device_type ;
201 enum probe_type { PROBE_DEFAULT_STRATEGY = 0, PROBE_PREFER_ASYNCHRONOUS = 1, PROBE_FORCE_SYNCHRONOUS = 2 } ;
207 struct device_driver { const char *name; struct bus_type *bus; struct module *owner; const char *mod_name; bool suppress_bind_attrs; enum probe_type probe_type; const struct of_device_id *of_match_table; const struct acpi_device_id *acpi_match_table; int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct attribute_group **groups; const struct dev_pm_ops *pm; struct driver_private *p; } ;
357 struct class_attribute ;
357 struct class { const char *name; struct module *owner; struct class_attribute *class_attrs; const struct attribute_group **dev_groups; struct kobject *dev_kobj; int (*dev_uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *); void (*class_release)(struct class *); void (*dev_release)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct kobj_ns_type_operations *ns_type; const void * (*namespace)(struct device *); const struct dev_pm_ops *pm; struct subsys_private *p; } ;
450 struct class_attribute { struct attribute attr; ssize_t (*show)(struct class *, struct class_attribute *, char *); ssize_t (*store)(struct class *, struct class_attribute *, const char *, size_t ); } ;
518 struct device_type { const char *name; const struct attribute_group **groups; int (*uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *, kuid_t *, kgid_t *); void (*release)(struct device *); const struct dev_pm_ops *pm; } ;
546 struct device_attribute { struct attribute attr; ssize_t (*show)(struct device *, struct device_attribute *, char *); ssize_t (*store)(struct device *, struct device_attribute *, const char *, size_t ); } ;
699 struct device_dma_parameters { unsigned int max_segment_size; unsigned long segment_boundary_mask; } ;
708 struct irq_domain ;
708 struct dma_coherent_mem ;
708 struct cma ;
708 struct device { struct device *parent; struct device_private *p; struct kobject kobj; const char *init_name; const struct device_type *type; struct mutex mutex; struct bus_type *bus; struct device_driver *driver; void *platform_data; void *driver_data; struct dev_pm_info power; struct dev_pm_domain *pm_domain; struct irq_domain *msi_domain; struct dev_pin_info *pins; struct list_head msi_list; int numa_node; u64 *dma_mask; u64 coherent_dma_mask; unsigned long dma_pfn_offset; struct device_dma_parameters *dma_parms; struct list_head dma_pools; struct dma_coherent_mem *dma_mem; struct cma *cma_area; struct dev_archdata archdata; struct device_node *of_node; struct fwnode_handle *fwnode; dev_t devt; u32 id; spinlock_t devres_lock; struct list_head devres_head; struct klist_node knode_class; struct class *class; const struct attribute_group **groups; void (*release)(struct device *); struct iommu_group *iommu_group; bool offline_disabled; bool offline; } ;
862 struct wakeup_source { const char *name; struct list_head entry; spinlock_t lock; struct wake_irq *wakeirq; struct timer_list timer; unsigned long timer_expires; ktime_t total_time; ktime_t max_time; ktime_t last_time; ktime_t start_prevent_time; ktime_t prevent_sleep_time; unsigned long event_count; unsigned long active_count; unsigned long relax_count; unsigned long expire_count; unsigned long wakeup_count; bool active; bool autosleep_enabled; } ;
1327 enum fwnode_type { FWNODE_INVALID = 0, FWNODE_OF = 1, FWNODE_ACPI = 2, FWNODE_ACPI_DATA = 3, FWNODE_PDATA = 4, FWNODE_IRQCHIP = 5 } ;
1336 struct fwnode_handle { enum fwnode_type type; struct fwnode_handle *secondary; } ;
32 typedef u32 phandle;
34 struct property { char *name; int length; void *value; struct property *next; unsigned long _flags; unsigned int unique_id; struct bin_attribute attr; } ;
44 struct device_node { const char *name; const char *type; phandle phandle; const char *full_name; struct fwnode_handle fwnode; struct property *properties; struct property *deadprops; struct device_node *parent; struct device_node *child; struct device_node *sibling; struct kobject kobj; unsigned long _flags; void *data; } ;
1155 struct i2c_msg { __u16 addr; __u16 flags; __u16 len; __u8 *buf; } ;
83 union i2c_smbus_data { __u8 byte; __u16 word; __u8 block[34U]; } ;
39 struct i2c_algorithm ;
40 struct i2c_adapter ;
41 struct i2c_client ;
44 enum i2c_slave_event ;
207 struct i2c_client { unsigned short flags; unsigned short addr; char name[20U]; struct i2c_adapter *adapter; struct device dev; int irq; struct list_head detected; int (*slave_cb)(struct i2c_client *, enum i2c_slave_event , u8 *); } ;
262 enum i2c_slave_event { I2C_SLAVE_READ_REQUESTED = 0, I2C_SLAVE_WRITE_REQUESTED = 1, I2C_SLAVE_READ_PROCESSED = 2, I2C_SLAVE_WRITE_RECEIVED = 3, I2C_SLAVE_STOP = 4 } ;
375 struct i2c_algorithm { int (*master_xfer)(struct i2c_adapter *, struct i2c_msg *, int); int (*smbus_xfer)(struct i2c_adapter *, u16 , unsigned short, char, u8 , int, union i2c_smbus_data *); u32 (*functionality)(struct i2c_adapter *); int (*reg_slave)(struct i2c_client *); int (*unreg_slave)(struct i2c_client *); } ;
444 struct i2c_bus_recovery_info { int (*recover_bus)(struct i2c_adapter *); int (*get_scl)(struct i2c_adapter *); void (*set_scl)(struct i2c_adapter *, int); int (*get_sda)(struct i2c_adapter *); void (*prepare_recovery)(struct i2c_adapter *); void (*unprepare_recovery)(struct i2c_adapter *); int scl_gpio; int sda_gpio; } ;
483 struct i2c_adapter_quirks { u64 flags; int max_num_msgs; u16 max_write_len; u16 max_read_len; u16 max_comb_1st_msg_len; u16 max_comb_2nd_msg_len; } ;
513 struct i2c_adapter { struct module *owner; unsigned int class; const struct i2c_algorithm *algo; void *algo_data; struct rt_mutex bus_lock; struct rt_mutex mux_lock; int timeout; int retries; struct device dev; int nr; char name[48U]; struct completion dev_released; struct mutex userspace_clients_lock; struct list_head userspace_clients; struct i2c_bus_recovery_info *bus_recovery_info; const struct i2c_adapter_quirks *quirks; void (*lock_bus)(struct i2c_adapter *, unsigned int); int (*trylock_bus)(struct i2c_adapter *, unsigned int); void (*unlock_bus)(struct i2c_adapter *, unsigned int); } ;
750 enum irqreturn { IRQ_NONE = 0, IRQ_HANDLED = 1, IRQ_WAKE_THREAD = 2 } ;
16 typedef enum irqreturn irqreturn_t;
63 struct exception_table_entry { int insn; int fixup; int handler; } ;
708 struct mfd_cell ;
709 struct platform_device { const char *name; int id; bool id_auto; struct device dev; u32 num_resources; struct resource *resource; const struct platform_device_id *id_entry; char *driver_override; struct mfd_cell *mfd_cell; struct pdev_archdata archdata; } ;
352 struct axxia_i2c_dev { void *base; struct i2c_msg *msg; size_t msg_xfrd; int msg_err; struct completion msg_complete; struct device *dev; struct i2c_adapter adapter; struct clk *i2c_clk; u32 bus_clk_rate; } ;
1 long int __builtin_expect(long, long);
33 extern struct module __this_module;
63 void __dynamic_dev_dbg(struct _ddebug *, const struct device *, const char *, ...);
3 bool ldv_is_err(const void *ptr);
6 long int ldv_ptr_err(const void *ptr);
27 size_t strlcpy(char *, const char *, size_t );
18 u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder);
97 u64 div_u64(u64 dividend, u32 divisor);
32 long int PTR_ERR(const void *ptr);
41 bool IS_ERR(const void *ptr);
72 void __init_waitqueue_head(wait_queue_head_t *, const char *, struct lock_class_key *);
73 void init_completion(struct completion *x);
86 void reinit_completion(struct completion *x);
95 unsigned long int wait_for_completion_timeout(struct completion *, unsigned long);
106 void complete(struct completion *);
78 extern volatile unsigned long jiffies;
292 unsigned long int __msecs_to_jiffies(const unsigned int);
354 unsigned long int msecs_to_jiffies(const unsigned int m);
58 unsigned int readl(const volatile void *addr);
66 void writel(unsigned int val, volatile void *addr);
11 void ldv_clk_disable_clk(struct clk *clk);
12 int ldv_clk_enable_clk();
13 void ldv_clk_disable_i2c_clk_of_axxia_i2c_dev(struct clk *clk);
14 int ldv_clk_enable_i2c_clk_of_axxia_i2c_dev();
249 struct clk * devm_clk_get(struct device *, const char *);
264 int ldv_clk_enable_5(struct clk *clk);
280 void ldv_clk_disable_6(struct clk *clk);
288 unsigned long int clk_get_rate(struct clk *);
501 int ldv_clk_prepare_enable_7(struct clk *clk);
511 void ldv_clk_disable_unprepare_8(struct clk *clk);
651 void * devm_kmalloc(struct device *, size_t , gfp_t );
657 void * devm_kzalloc(struct device *dev, size_t size, gfp_t gfp);
682 void * devm_ioremap_resource(struct device *, struct resource *);
912 void * dev_get_drvdata(const struct device *dev);
917 void dev_set_drvdata(struct device *dev, void *data);
1135 void dev_err(const struct device *, const char *, ...);
1137 void dev_warn(const struct device *, const char *, ...);
298 int of_property_read_u32_array(const struct device_node *, const char *, u32 *, size_t );
918 int of_property_read_u32(const struct device_node *np, const char *propname, u32 *out_value);
478 int i2c_recover_bus(struct i2c_adapter *);
482 int i2c_generic_scl_recovery(struct i2c_adapter *);
562 void * i2c_get_adapdata(const struct i2c_adapter *dev);
567 void i2c_set_adapdata(struct i2c_adapter *dev, void *data);
655 int i2c_add_adapter(struct i2c_adapter *);
656 void i2c_del_adapter(struct i2c_adapter *);
164 int devm_request_threaded_irq(struct device *, unsigned int, irqreturn_t (*)(int, void *), irqreturn_t (*)(int, void *), unsigned long, const char *, void *);
170 int devm_request_irq(struct device *dev, unsigned int irq, irqreturn_t (*handler)(int, void *), unsigned long irqflags, const char *devname, void *dev_id);
52 struct resource * platform_get_resource(struct platform_device *, unsigned int, unsigned int);
54 int platform_get_irq(struct platform_device *, unsigned int);
211 void * platform_get_drvdata(const struct platform_device *pdev);
216 void platform_set_drvdata(struct platform_device *pdev, void *data);
112 void i2c_int_disable(struct axxia_i2c_dev *idev, u32 mask);
120 void i2c_int_enable(struct axxia_i2c_dev *idev, u32 mask);
131 u32 ns_to_clk(u64 ns, u32 clk_mhz);
136 int axxia_i2c_init(struct axxia_i2c_dev *idev);
211 int i2c_m_rd(const struct i2c_msg *msg);
216 int i2c_m_ten(const struct i2c_msg *msg);
221 int i2c_m_recv_len(const struct i2c_msg *msg);
230 int axxia_i2c_empty_rx_fifo(struct axxia_i2c_dev *idev);
262 int axxia_i2c_fill_tx_fifo(struct axxia_i2c_dev *idev);
275 irqreturn_t axxia_i2c_isr(int irq, void *_dev);
337 int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg);
411 int axxia_i2c_stop(struct axxia_i2c_dev *idev);
434 int axxia_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num);
448 int axxia_i2c_get_scl(struct i2c_adapter *adap);
455 void axxia_i2c_set_scl(struct i2c_adapter *adap, int val);
467 int axxia_i2c_get_sda(struct i2c_adapter *adap);
474 struct i2c_bus_recovery_info axxia_i2c_recovery_info = { &i2c_generic_scl_recovery, &axxia_i2c_get_scl, &axxia_i2c_set_scl, &axxia_i2c_get_sda, 0, 0, 0, 0 };
481 u32 axxia_i2c_func(struct i2c_adapter *adap);
488 const struct i2c_algorithm axxia_i2c_algo = { &axxia_i2c_xfer, 0, &axxia_i2c_func, 0, 0 };
493 struct i2c_adapter_quirks axxia_i2c_quirks = { 0ULL, 0, 255U, 255U, (unsigned short)0, (unsigned short)0 };
498 int axxia_i2c_probe(struct platform_device *pdev);
571 int axxia_i2c_remove(struct platform_device *pdev);
587 const struct of_device_id __mod_of__axxia_i2c_of_match_device_table[2U] = { };
620 void ldv_check_final_state();
623 void ldv_check_return_value(int);
626 void ldv_check_return_value_probe(int);
629 void ldv_initialize();
632 void ldv_handler_precall();
635 int nondet_int();
638 int LDV_IN_INTERRUPT = 0;
641 void ldv_main0_sequence_infinite_withcheck_stateful();
10 void ldv_error();
25 int ldv_undef_int();
14 void * ldv_err_ptr(long error);
28 bool ldv_is_err_or_null(const void *ptr);
9 int ldv_counter_clk = 0;
32 int ldv_counter_i2c_clk_of_axxia_i2c_dev = 0;
return ;
}
-entry_point
{
643 struct i2c_adapter *var_group1;
644 int var_axxia_i2c_set_scl_13_p1;
645 struct platform_device *var_group2;
646 int res_axxia_i2c_probe_16;
647 int var_axxia_i2c_isr_9_p0;
648 void *var_axxia_i2c_isr_9_p1;
649 int ldv_s_axxia_i2c_driver_platform_driver;
650 int tmp;
651 int tmp___0;
1135 ldv_s_axxia_i2c_driver_platform_driver = 0;
1121 LDV_IN_INTERRUPT = 1;
1130 ldv_initialize() { /* Function call is skipped due to function is undefined */}
1140 goto ldv_30513;
1140 tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */}
1140 assume(tmp___0 != 0);
1143 goto ldv_30512;
1141 ldv_30512:;
1144 tmp = nondet_int() { /* Function call is skipped due to function is undefined */}
1144 switch (tmp);
1145 assume(!(tmp == 0));
1225 assume(!(tmp == 1));
1304 assume(!(tmp == 2));
1383 assume(!(tmp == 3));
1462 assume(tmp == 4);
1465 assume(ldv_s_axxia_i2c_driver_platform_driver == 0);
1532 -axxia_i2c_probe(var_group2)
{
500 struct device_node *np;
501 struct axxia_i2c_dev *idev;
502 struct resource *res;
503 void *base;
504 int irq;
505 int ret;
506 void *tmp;
507 long tmp___0;
508 _Bool tmp___1;
509 long tmp___2;
510 _Bool tmp___3;
500 np = pdev->dev.of_node;
501 idev = (struct axxia_i2c_dev *)0;
505 ret = 0;
507 -devm_kzalloc(&(pdev->dev), 2256UL, 37748928U)
{
659 void *tmp;
659 tmp = devm_kmalloc(dev, size, gfp | 32768U) { /* Function call is skipped due to function is undefined */}
659 return tmp;;
}
507 idev = (struct axxia_i2c_dev *)tmp;
508 assume(!(((unsigned long)idev) == ((unsigned long)((struct axxia_i2c_dev *)0))));
511 res = platform_get_resource(pdev, 512U, 0U) { /* Function call is skipped due to function is undefined */}
512 base = devm_ioremap_resource(&(pdev->dev), res) { /* Function call is skipped due to function is undefined */}
513 -IS_ERR((const void *)base)
{
30 _Bool tmp;
31 -ldv_is_err(ptr)
{
10 return ((unsigned long)ptr) > 2012UL;;
}
31 return tmp;;
}
513 assume(((int)tmp___1) == 0);
516 irq = platform_get_irq(pdev, 0U) { /* Function call is skipped due to function is undefined */}
517 assume(!(irq < 0));
522 idev->i2c_clk = devm_clk_get(&(pdev->dev), "i2c") { /* Function call is skipped due to function is undefined */}
523 const void *__CPAchecker_TMP_0 = (const void *)(idev->i2c_clk);
523 -IS_ERR(__CPAchecker_TMP_0)
{
30 _Bool tmp;
31 -ldv_is_err(ptr)
{
10 return ((unsigned long)ptr) > 2012UL;;
}
31 return tmp;;
}
523 assume(((int)tmp___3) == 0);
528 idev->base = base;
529 idev->dev = &(pdev->dev);
530 -init_completion(&(idev->msg_complete))
{
75 struct lock_class_key __key;
75 x->done = 0U;
76 __init_waitqueue_head(&(x->wait), "&x->wait", &__key) { /* Function call is skipped due to function is undefined */}
78 return ;;
}
532 -of_property_read_u32((const struct device_node *)np, "clock-frequency", &(idev->bus_clk_rate))
{
921 int tmp;
922 tmp = of_property_read_u32_array(np, propname, out_value, 1UL) { /* Function call is skipped due to function is undefined */}
922 return tmp;;
}
533 assume(!((idev->bus_clk_rate) == 0U));
536 -axxia_i2c_init(idev)
{
138 unsigned int divisor;
139 unsigned long tmp;
140 unsigned int clk_mhz;
141 unsigned long tmp___0;
142 unsigned int t_setup;
143 unsigned int t_high;
144 unsigned int t_low;
145 unsigned int tmo_clk;
146 unsigned int prescale;
147 unsigned long timeout;
148 struct _ddebug descriptor;
149 long tmp___1;
150 unsigned long tmp___2;
151 unsigned int tmp___3;
152 unsigned int tmp___4;
153 unsigned int tmp___5;
138 tmp = clk_get_rate(idev->i2c_clk) { /* Function call is skipped due to function is undefined */}
138 unsigned long __CPAchecker_TMP_0 = (unsigned long)(idev->bus_clk_rate);
138 divisor = (u32 )(tmp / __CPAchecker_TMP_0);
139 tmp___0 = clk_get_rate(idev->i2c_clk) { /* Function call is skipped due to function is undefined */}
139 clk_mhz = (u32 )(tmp___0 / 1000000UL);
146 descriptor.modname = "i2c_axxia";
146 descriptor.function = "axxia_i2c_init";
146 descriptor.filename = "/home/ldvuser/ldv/ref_launches/work/current--X--drivers--X--defaultlinux-4.8-rc1.tar.xz--X--320_7a--X--cpachecker/linux-4.8-rc1.tar.xz/csd_deg_dscv/11495/dscv_tempdir/dscv/ri/320_7a/drivers/i2c/busses/i2c-axxia.c";
146 descriptor.format = "rate=%uHz per_clk=%uMHz -> ratio=1:%u\n";
146 descriptor.lineno = 147U;
146 descriptor.flags = 1U;
146 tmp___1 = __builtin_expect(((long)(descriptor.flags)) & 1L, 1L) { /* Function call is skipped due to function is undefined */}
146 assume(!(tmp___1 != 0L));
150 volatile void *__CPAchecker_TMP_2 = (volatile void *)(idev->base);
150 -writel(1U, __CPAchecker_TMP_2 + 36U)
{
66 Ignored inline assembler code
67 return ;;
}
151 -msecs_to_jiffies(100U)
{
356 unsigned long tmp___0;
361 tmp___0 = __msecs_to_jiffies(m) { /* Function call is skipped due to function is undefined */}
361 return tmp___0;;
}
151 timeout = tmp___2 + ((unsigned long)jiffies);
152 goto ldv_30349;
152 const volatile void *__CPAchecker_TMP_4 = (const volatile void *)(idev->base);
152 -readl(__CPAchecker_TMP_4 + 36U)
{
60 unsigned int ret;
58 Ignored inline assembler code
58 return ret;;
}
152 assume(!((((int)tmp___3) & 1) == 0));
154 goto ldv_30348;
153 ldv_30348:;
153 assume(((long)(timeout - ((unsigned long)jiffies))) < 0L);
154 const struct device *__CPAchecker_TMP_3 = (const struct device *)(idev->dev);
154 dev_warn(__CPAchecker_TMP_3, "Soft reset failed\n") { /* Function call is skipped due to function is undefined */}
155 goto ldv_30347;
160 volatile void *__CPAchecker_TMP_5 = (volatile void *)(idev->base);
160 -writel(1U, __CPAchecker_TMP_5)
{
66 Ignored inline assembler code
67 return ;;
}
162 assume(!((idev->bus_clk_rate) <= 100000U));
169 t_high = divisor / 3U;
170 t_low = (divisor * 2U) / 3U;
171 -ns_to_clk(100ULL, clk_mhz)
{
133 unsigned long long tmp;
133 -div_u64(((u64 )clk_mhz) * ns, 1000U)
{
99 unsigned int remainder;
100 unsigned long long tmp;
100 -div_u64_rem(dividend, divisor, &remainder)
{
20 *remainder = (u32 )(dividend % ((u64 )divisor));
21 return dividend / ((u64 )divisor);;
}
100 return tmp;;
}
133 return (u32 )tmp;;
}
175 volatile void *__CPAchecker_TMP_6 = (volatile void *)(idev->base);
175 -writel(t_high, __CPAchecker_TMP_6 + 128U)
{
66 Ignored inline assembler code
67 return ;;
}
177 volatile void *__CPAchecker_TMP_7 = (volatile void *)(idev->base);
177 -writel(t_low, __CPAchecker_TMP_7 + 132U)
{
66 Ignored inline assembler code
67 return ;;
}
179 volatile void *__CPAchecker_TMP_8 = (volatile void *)(idev->base);
179 -writel(t_setup, __CPAchecker_TMP_8 + 140U)
{
66 Ignored inline assembler code
67 return ;;
}
181 -ns_to_clk(300ULL, clk_mhz)
{
133 unsigned long long tmp;
133 -div_u64(((u64 )clk_mhz) * ns, 1000U)
{
99 unsigned int remainder;
100 unsigned long long tmp;
100 -div_u64_rem(dividend, divisor, &remainder)
{
20 *remainder = (u32 )(dividend % ((u64 )divisor));
21 return dividend / ((u64 )divisor);;
}
100 return tmp;;
}
133 return (u32 )tmp;;
}
181 volatile void *__CPAchecker_TMP_9 = (volatile void *)(idev->base);
181 -writel(tmp___4, __CPAchecker_TMP_9 + 144U)
{
66 Ignored inline assembler code
67 return ;;
}
183 -ns_to_clk(50ULL, clk_mhz)
{
133 unsigned long long tmp;
133 -div_u64(((u64 )clk_mhz) * ns, 1000U)
{
99 unsigned int remainder;
100 unsigned long long tmp;
100 -div_u64_rem(dividend, divisor, &remainder)
{
20 *remainder = (u32 )(dividend % ((u64 )divisor));
21 return dividend / ((u64 )divisor);;
}
100 return tmp;;
}
133 return (u32 )tmp;;
}
183 volatile void *__CPAchecker_TMP_10 = (volatile void *)(idev->base);
183 -writel(tmp___5, __CPAchecker_TMP_10 + 136U)
{
66 Ignored inline assembler code
67 return ;;
}
186 -ns_to_clk(25000000ULL, clk_mhz)
{
133 unsigned long long tmp;
133 -div_u64(((u64 )clk_mhz) * ns, 1000U)
{
99 unsigned int remainder;
100 unsigned long long tmp;
100 -div_u64_rem(dividend, divisor, &remainder)
{
20 *remainder = (u32 )(dividend % ((u64 )divisor));
21 return dividend / ((u64 )divisor);;
}
100 return tmp;;
}
133 return (u32 )tmp;;
}
189 prescale = 0U;
189 goto ldv_30352;
189 assume(prescale <= 14U);
191 goto ldv_30351;
190 ldv_30351:;
190 assume(!(tmo_clk <= 32767U));
192 tmo_clk = tmo_clk >> 1;
189 prescale = prescale + 1U;
190 ldv_30352:;
189 assume(prescale <= 14U);
191 goto ldv_30351;
190 ldv_30351:;
190 assume(!(tmo_clk <= 32767U));
192 tmo_clk = tmo_clk >> 1;
189 prescale = prescale + 1U;
190 ldv_30352:;
189 assume(prescale <= 14U);
191 goto ldv_30351;
190 ldv_30351:;
190 assume(!(tmo_clk <= 32767U));
192 tmo_clk = tmo_clk >> 1;
189 prescale = prescale + 1U;
190 ldv_30352:;
189 assume(prescale <= 14U);
191 goto ldv_30351;
190 ldv_30351:;
190 assume(!(tmo_clk <= 32767U));
192 tmo_clk = tmo_clk >> 1;
189 prescale = prescale + 1U;
190 ldv_30352:;
189 assume(prescale <= 14U);
191 goto ldv_30351;
190 ldv_30351:;
190 assume(!(tmo_clk <= 32767U));
192 tmo_clk = tmo_clk >> 1;
189 prescale = prescale + 1U;
190 ldv_30352:;
189 assume(prescale <= 14U);
191 goto ldv_30351;
190 ldv_30351:;
190 assume(!(tmo_clk <= 32767U));
192 tmo_clk = tmo_clk >> 1;
189 prescale = prescale + 1U;
190 ldv_30352:;
189 assume(prescale <= 14U);
191 goto ldv_30351;
190 ldv_30351:;
190 assume(!(tmo_clk <= 32767U));
192 tmo_clk = tmo_clk >> 1;
189 prescale = prescale + 1U;
190 ldv_30352:;
189 assume(prescale <= 14U);
191 goto ldv_30351;
190 ldv_30351:;
190 assume(!(tmo_clk <= 32767U));
192 tmo_clk = tmo_clk >> 1;
189 prescale = prescale + 1U;
190 ldv_30352:;
189 assume(prescale <= 14U);
191 goto ldv_30351;
190 ldv_30351:;
190 assume(!(tmo_clk <= 32767U));
192 tmo_clk = tmo_clk >> 1;
189 prescale = prescale + 1U;
190 ldv_30352:;
189 assume(prescale <= 14U);
191 goto ldv_30351;
190 ldv_30351:;
190 assume(!(tmo_clk <= 32767U));
192 tmo_clk = tmo_clk >> 1;
189 prescale = prescale + 1U;
190 ldv_30352:;
189 assume(prescale <= 14U);
191 goto ldv_30351;
190 ldv_30351:;
190 assume(!(tmo_clk <= 32767U));
192 tmo_clk = tmo_clk >> 1;
189 prescale = prescale + 1U;
190 ldv_30352:;
189 assume(prescale <= 14U);
191 goto ldv_30351;
190 ldv_30351:;
190 assume(!(tmo_clk <= 32767U));
192 tmo_clk = tmo_clk >> 1;
189 prescale = prescale + 1U;
190 ldv_30352:;
189 assume(prescale <= 14U);
191 goto ldv_30351;
190 ldv_30351:;
190 assume(!(tmo_clk <= 32767U));
192 tmo_clk = tmo_clk >> 1;
189 prescale = prescale + 1U;
190 ldv_30352:;
189 assume(prescale <= 14U);
191 goto ldv_30351;
190 ldv_30351:;
190 assume(!(tmo_clk <= 32767U));
192 tmo_clk = tmo_clk >> 1;
189 prescale = prescale + 1U;
190 ldv_30352:;
189 assume(prescale <= 14U);
191 goto ldv_30351;
190 ldv_30351:;
190 assume(!(tmo_clk <= 32767U));
192 tmo_clk = tmo_clk >> 1;
189 prescale = prescale + 1U;
190 ldv_30352:;
189 assume(!(prescale <= 14U));
195 ldv_30350:;
194 assume(!(tmo_clk > 32767U));
198 volatile void *__CPAchecker_TMP_11 = (volatile void *)(idev->base);
198 -writel(prescale, __CPAchecker_TMP_11 + 28U)
{
66 Ignored inline assembler code
67 return ;;
}
200 volatile void *__CPAchecker_TMP_12 = (volatile void *)(idev->base);
200 -writel((tmo_clk & 65535U) | 32768U, __CPAchecker_TMP_12 + 12U)
{
66 Ignored inline assembler code
67 return ;;
}
203 -i2c_int_disable(idev, 4294967295U)
{
114 unsigned int int_en;
116 const volatile void *__CPAchecker_TMP_0 = (const volatile void *)(idev->base);
116 -readl(__CPAchecker_TMP_0 + 72U)
{
60 unsigned int ret;
58 Ignored inline assembler code
58 return ret;;
}
117 volatile void *__CPAchecker_TMP_1 = (volatile void *)(idev->base);
117 -writel((~mask) & int_en, __CPAchecker_TMP_1 + 72U)
{
66 Ignored inline assembler code
67 return ;;
}
118 return ;;
}
206 volatile void *__CPAchecker_TMP_13 = (volatile void *)(idev->base);
206 -writel(1U, __CPAchecker_TMP_13 + 8U)
{
66 Ignored inline assembler code
67 return ;;
}
208 return 0;;
}
537 assume(!(ret != 0));
542 -devm_request_irq(&(pdev->dev), (unsigned int)irq, &axxia_i2c_isr, 0UL, pdev->name, (void *)idev)
{
175 int tmp;
173 tmp = devm_request_threaded_irq(dev, irq, handler, (irqreturn_t (*)(int, void *))0, irqflags, devname, dev_id) { /* Function call is skipped due to function is undefined */}
173 return tmp;;
}
544 assume(!(ret != 0));
549 -ldv_clk_prepare_enable_7(idev->i2c_clk)
{
54 int tmp;
55 -ldv_clk_enable_i2c_clk_of_axxia_i2c_dev()
{
44 int retval;
45 int tmp;
44 tmp = ldv_undef_int() { /* Function call is skipped due to function is undefined */}
44 retval = tmp;
45 assume(retval == 0);
48 ldv_counter_i2c_clk_of_axxia_i2c_dev = 1;
50 return retval;;
}
55 return tmp;;
}
551 -i2c_set_adapdata(&(idev->adapter), (void *)idev)
{
569 -dev_set_drvdata(&(dev->dev), data)
{
919 dev->driver_data = data;
920 return ;;
}
570 return ;;
}
552 strlcpy((char *)(&(idev->adapter.name)), pdev->name, 48UL) { /* Function call is skipped due to function is undefined */}
553 idev->adapter.owner = &__this_module;
554 idev->adapter.algo = &axxia_i2c_algo;
555 idev->adapter.bus_recovery_info = &axxia_i2c_recovery_info;
556 idev->adapter.quirks = (const struct i2c_adapter_quirks *)(&axxia_i2c_quirks);
557 idev->adapter.dev.parent = &(pdev->dev);
558 idev->adapter.dev.of_node = pdev->dev.of_node;
560 -platform_set_drvdata(pdev, (void *)idev)
{
219 -dev_set_drvdata(&(pdev->dev), data)
{
919 dev->driver_data = data;
920 return ;;
}
220 return ;;
}
562 ret = i2c_add_adapter(&(idev->adapter)) { /* Function call is skipped due to function is undefined */}
563 assume(ret != 0);
564 dev_err((const struct device *)(&(pdev->dev)), "failed to add adapter\n") { /* Function call is skipped due to function is undefined */}
565 return ret;;
}
1533 ldv_check_return_value(res_axxia_i2c_probe_16) { /* Function call is skipped due to function is undefined */}
1534 ldv_check_return_value_probe(res_axxia_i2c_probe_16) { /* Function call is skipped due to function is undefined */}
1535 assume(res_axxia_i2c_probe_16 != 0);
1536 goto ldv_module_exit;
1711 -ldv_check_final_state()
{
58 assume(!(ldv_counter_clk != 0));
60 assume(ldv_counter_i2c_clk_of_axxia_i2c_dev != 0);
60 -ldv_error()
{
15 LDV_ERROR:;
}
}
}
Source code
1 #ifndef _ASM_X86_IO_H 2 #define _ASM_X86_IO_H 3 4 /* 5 * This file contains the definitions for the x86 IO instructions 6 * inb/inw/inl/outb/outw/outl and the "string versions" of the same 7 * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing" 8 * versions of the single-IO instructions (inb_p/inw_p/..). 9 * 10 * This file is not meant to be obfuscating: it's just complicated 11 * to (a) handle it all in a way that makes gcc able to optimize it 12 * as well as possible and (b) trying to avoid writing the same thing 13 * over and over again with slight variations and possibly making a 14 * mistake somewhere. 15 */ 16 17 /* 18 * Thanks to James van Artsdalen for a better timing-fix than 19 * the two short jumps: using outb's to a nonexistent port seems 20 * to guarantee better timings even on fast machines. 21 * 22 * On the other hand, I'd like to be sure of a non-existent port: 23 * I feel a bit unsafe about using 0x80 (should be safe, though) 24 * 25 * Linus 26 */ 27 28 /* 29 * Bit simplified and optimized by Jan Hubicka 30 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999. 31 * 32 * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added, 33 * isa_read[wl] and isa_write[wl] fixed 34 * - Arnaldo Carvalho de Melo <acme@conectiva.com.br> 35 */ 36 37 #define ARCH_HAS_IOREMAP_WC 38 #define ARCH_HAS_IOREMAP_WT 39 40 #include <linux/string.h> 41 #include <linux/compiler.h> 42 #include <asm/page.h> 43 #include <asm/early_ioremap.h> 44 #include <asm/pgtable_types.h> 45 46 #define build_mmio_read(name, size, type, reg, barrier) \ 47 static inline type name(const volatile void __iomem *addr) \ 48 { type ret; asm volatile("mov" size " %1,%0":reg (ret) \ 49 :"m" (*(volatile type __force *)addr) barrier); return ret; } 50 51 #define build_mmio_write(name, size, type, reg, barrier) \ 52 static inline void name(type val, volatile void __iomem *addr) \ 53 { asm volatile("mov" size " %0,%1": :reg (val), \ 54 "m" (*(volatile type __force *)addr) barrier); } 55 56 build_mmio_read(readb, "b", unsigned char, "=q", :"memory") 57 build_mmio_read(readw, "w", unsigned short, "=r", :"memory") 58 build_mmio_read(readl, "l", unsigned int, "=r", :"memory") 59 60 build_mmio_read(__readb, "b", unsigned char, "=q", ) 61 build_mmio_read(__readw, "w", unsigned short, "=r", ) 62 build_mmio_read(__readl, "l", unsigned int, "=r", ) 63 64 build_mmio_write(writeb, "b", unsigned char, "q", :"memory") 65 build_mmio_write(writew, "w", unsigned short, "r", :"memory") 66 build_mmio_write(writel, "l", unsigned int, "r", :"memory") 67 68 build_mmio_write(__writeb, "b", unsigned char, "q", ) 69 build_mmio_write(__writew, "w", unsigned short, "r", ) 70 build_mmio_write(__writel, "l", unsigned int, "r", ) 71 72 #define readb_relaxed(a) __readb(a) 73 #define readw_relaxed(a) __readw(a) 74 #define readl_relaxed(a) __readl(a) 75 #define __raw_readb __readb 76 #define __raw_readw __readw 77 #define __raw_readl __readl 78 79 #define writeb_relaxed(v, a) __writeb(v, a) 80 #define writew_relaxed(v, a) __writew(v, a) 81 #define writel_relaxed(v, a) __writel(v, a) 82 #define __raw_writeb __writeb 83 #define __raw_writew __writew 84 #define __raw_writel __writel 85 86 #define mmiowb() barrier() 87 88 #ifdef CONFIG_X86_64 89 90 build_mmio_read(readq, "q", unsigned long, "=r", :"memory") 91 build_mmio_write(writeq, "q", unsigned long, "r", :"memory") 92 93 #define readq_relaxed(a) readq(a) 94 #define writeq_relaxed(v, a) writeq(v, a) 95 96 #define __raw_readq(a) readq(a) 97 #define __raw_writeq(val, addr) writeq(val, addr) 98 99 /* Let people know that we have them */ 100 #define readq readq 101 #define writeq writeq 102 103 #endif 104 105 /** 106 * virt_to_phys - map virtual addresses to physical 107 * @address: address to remap 108 * 109 * The returned physical address is the physical (CPU) mapping for 110 * the memory address given. It is only valid to use this function on 111 * addresses directly mapped or allocated via kmalloc. 112 * 113 * This function does not give bus mappings for DMA transfers. In 114 * almost all conceivable cases a device driver should not be using 115 * this function 116 */ 117 118 static inline phys_addr_t virt_to_phys(volatile void *address) 119 { 120 return __pa(address); 121 } 122 123 /** 124 * phys_to_virt - map physical address to virtual 125 * @address: address to remap 126 * 127 * The returned virtual address is a current CPU mapping for 128 * the memory address given. It is only valid to use this function on 129 * addresses that have a kernel mapping 130 * 131 * This function does not handle bus mappings for DMA transfers. In 132 * almost all conceivable cases a device driver should not be using 133 * this function 134 */ 135 136 static inline void *phys_to_virt(phys_addr_t address) 137 { 138 return __va(address); 139 } 140 141 /* 142 * Change "struct page" to physical address. 143 */ 144 #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) 145 146 /* 147 * ISA I/O bus memory addresses are 1:1 with the physical address. 148 * However, we truncate the address to unsigned int to avoid undesirable 149 * promitions in legacy drivers. 150 */ 151 static inline unsigned int isa_virt_to_bus(volatile void *address) 152 { 153 return (unsigned int)virt_to_phys(address); 154 } 155 #define isa_page_to_bus(page) ((unsigned int)page_to_phys(page)) 156 #define isa_bus_to_virt phys_to_virt 157 158 /* 159 * However PCI ones are not necessarily 1:1 and therefore these interfaces 160 * are forbidden in portable PCI drivers. 161 * 162 * Allow them on x86 for legacy drivers, though. 163 */ 164 #define virt_to_bus virt_to_phys 165 #define bus_to_virt phys_to_virt 166 167 /** 168 * ioremap - map bus memory into CPU space 169 * @offset: bus address of the memory 170 * @size: size of the resource to map 171 * 172 * ioremap performs a platform specific sequence of operations to 173 * make bus memory CPU accessible via the readb/readw/readl/writeb/ 174 * writew/writel functions and the other mmio helpers. The returned 175 * address is not guaranteed to be usable directly as a virtual 176 * address. 177 * 178 * If the area you are trying to map is a PCI BAR you should have a 179 * look at pci_iomap(). 180 */ 181 extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size); 182 extern void __iomem *ioremap_uc(resource_size_t offset, unsigned long size); 183 #define ioremap_uc ioremap_uc 184 185 extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size); 186 extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size, 187 unsigned long prot_val); 188 189 /* 190 * The default ioremap() behavior is non-cached: 191 */ 192 static inline void __iomem *ioremap(resource_size_t offset, unsigned long size) 193 { 194 return ioremap_nocache(offset, size); 195 } 196 197 extern void iounmap(volatile void __iomem *addr); 198 199 extern void set_iounmap_nonlazy(void); 200 201 #ifdef __KERNEL__ 202 203 #include <asm-generic/iomap.h> 204 205 /* 206 * Convert a virtual cached pointer to an uncached pointer 207 */ 208 #define xlate_dev_kmem_ptr(p) p 209 210 static inline void 211 memset_io(volatile void __iomem *addr, unsigned char val, size_t count) 212 { 213 memset((void __force *)addr, val, count); 214 } 215 216 static inline void 217 memcpy_fromio(void *dst, const volatile void __iomem *src, size_t count) 218 { 219 memcpy(dst, (const void __force *)src, count); 220 } 221 222 static inline void 223 memcpy_toio(volatile void __iomem *dst, const void *src, size_t count) 224 { 225 memcpy((void __force *)dst, src, count); 226 } 227 228 /* 229 * ISA space is 'always mapped' on a typical x86 system, no need to 230 * explicitly ioremap() it. The fact that the ISA IO space is mapped 231 * to PAGE_OFFSET is pure coincidence - it does not mean ISA values 232 * are physical addresses. The following constant pointer can be 233 * used as the IO-area pointer (it can be iounmapped as well, so the 234 * analogy with PCI is quite large): 235 */ 236 #define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET)) 237 238 /* 239 * Cache management 240 * 241 * This needed for two cases 242 * 1. Out of order aware processors 243 * 2. Accidentally out of order processors (PPro errata #51) 244 */ 245 246 static inline void flush_write_buffers(void) 247 { 248 #if defined(CONFIG_X86_PPRO_FENCE) 249 asm volatile("lock; addl $0,0(%%esp)": : :"memory"); 250 #endif 251 } 252 253 #endif /* __KERNEL__ */ 254 255 extern void native_io_delay(void); 256 257 extern int io_delay_type; 258 extern void io_delay_init(void); 259 260 #if defined(CONFIG_PARAVIRT) 261 #include <asm/paravirt.h> 262 #else 263 264 static inline void slow_down_io(void) 265 { 266 native_io_delay(); 267 #ifdef REALLY_SLOW_IO 268 native_io_delay(); 269 native_io_delay(); 270 native_io_delay(); 271 #endif 272 } 273 274 #endif 275 276 #define BUILDIO(bwl, bw, type) \ 277 static inline void out##bwl(unsigned type value, int port) \ 278 { \ 279 asm volatile("out" #bwl " %" #bw "0, %w1" \ 280 : : "a"(value), "Nd"(port)); \ 281 } \ 282 \ 283 static inline unsigned type in##bwl(int port) \ 284 { \ 285 unsigned type value; \ 286 asm volatile("in" #bwl " %w1, %" #bw "0" \ 287 : "=a"(value) : "Nd"(port)); \ 288 return value; \ 289 } \ 290 \ 291 static inline void out##bwl##_p(unsigned type value, int port) \ 292 { \ 293 out##bwl(value, port); \ 294 slow_down_io(); \ 295 } \ 296 \ 297 static inline unsigned type in##bwl##_p(int port) \ 298 { \ 299 unsigned type value = in##bwl(port); \ 300 slow_down_io(); \ 301 return value; \ 302 } \ 303 \ 304 static inline void outs##bwl(int port, const void *addr, unsigned long count) \ 305 { \ 306 asm volatile("rep; outs" #bwl \ 307 : "+S"(addr), "+c"(count) : "d"(port)); \ 308 } \ 309 \ 310 static inline void ins##bwl(int port, void *addr, unsigned long count) \ 311 { \ 312 asm volatile("rep; ins" #bwl \ 313 : "+D"(addr), "+c"(count) : "d"(port)); \ 314 } 315 316 BUILDIO(b, b, char) 317 BUILDIO(w, w, short) 318 BUILDIO(l, , int) 319 320 extern void *xlate_dev_mem_ptr(phys_addr_t phys); 321 extern void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr); 322 323 extern int ioremap_change_attr(unsigned long vaddr, unsigned long size, 324 enum page_cache_mode pcm); 325 extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size); 326 extern void __iomem *ioremap_wt(resource_size_t offset, unsigned long size); 327 328 extern bool is_early_ioremap_ptep(pte_t *ptep); 329 330 #ifdef CONFIG_XEN 331 #include <xen/xen.h> 332 struct bio_vec; 333 334 extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, 335 const struct bio_vec *vec2); 336 337 #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ 338 (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \ 339 (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2))) 340 #endif /* CONFIG_XEN */ 341 342 #define IO_SPACE_LIMIT 0xffff 343 344 #ifdef CONFIG_MTRR 345 extern int __must_check arch_phys_wc_index(int handle); 346 #define arch_phys_wc_index arch_phys_wc_index 347 348 extern int __must_check arch_phys_wc_add(unsigned long base, 349 unsigned long size); 350 extern void arch_phys_wc_del(int handle); 351 #define arch_phys_wc_add arch_phys_wc_add 352 #endif 353 354 #endif /* _ASM_X86_IO_H */
1 2 /* 3 * This driver implements I2C master functionality using the LSI API2C 4 * controller. 5 * 6 * NOTE: The controller has a limitation in that it can only do transfers of 7 * maximum 255 bytes at a time. If a larger transfer is attempted, error code 8 * (-EINVAL) is returned. 9 * 10 * This software is licensed under the terms of the GNU General Public 11 * License version 2, as published by the Free Software Foundation, and 12 * may be copied, distributed, and modified under those terms. 13 */ 14 #include <linux/clk.h> 15 #include <linux/clkdev.h> 16 #include <linux/err.h> 17 #include <linux/i2c.h> 18 #include <linux/init.h> 19 #include <linux/interrupt.h> 20 #include <linux/module.h> 21 #include <linux/io.h> 22 #include <linux/kernel.h> 23 #include <linux/platform_device.h> 24 25 #define SCL_WAIT_TIMEOUT_NS 25000000 26 #define I2C_XFER_TIMEOUT (msecs_to_jiffies(250)) 27 #define I2C_STOP_TIMEOUT (msecs_to_jiffies(100)) 28 #define FIFO_SIZE 8 29 30 #define GLOBAL_CONTROL 0x00 31 #define GLOBAL_MST_EN BIT(0) 32 #define GLOBAL_SLV_EN BIT(1) 33 #define GLOBAL_IBML_EN BIT(2) 34 #define INTERRUPT_STATUS 0x04 35 #define INTERRUPT_ENABLE 0x08 36 #define INT_SLV BIT(1) 37 #define INT_MST BIT(0) 38 #define WAIT_TIMER_CONTROL 0x0c 39 #define WT_EN BIT(15) 40 #define WT_VALUE(_x) ((_x) & 0x7fff) 41 #define IBML_TIMEOUT 0x10 42 #define IBML_LOW_MEXT 0x14 43 #define IBML_LOW_SEXT 0x18 44 #define TIMER_CLOCK_DIV 0x1c 45 #define I2C_BUS_MONITOR 0x20 46 #define BM_SDAC BIT(3) 47 #define BM_SCLC BIT(2) 48 #define BM_SDAS BIT(1) 49 #define BM_SCLS BIT(0) 50 #define SOFT_RESET 0x24 51 #define MST_COMMAND 0x28 52 #define CMD_BUSY (1<<3) 53 #define CMD_MANUAL (0x00 | CMD_BUSY) 54 #define CMD_AUTO (0x01 | CMD_BUSY) 55 #define MST_RX_XFER 0x2c 56 #define MST_TX_XFER 0x30 57 #define MST_ADDR_1 0x34 58 #define MST_ADDR_2 0x38 59 #define MST_DATA 0x3c 60 #define MST_TX_FIFO 0x40 61 #define MST_RX_FIFO 0x44 62 #define MST_INT_ENABLE 0x48 63 #define MST_INT_STATUS 0x4c 64 #define MST_STATUS_RFL (1 << 13) /* RX FIFO serivce */ 65 #define MST_STATUS_TFL (1 << 12) /* TX FIFO service */ 66 #define MST_STATUS_SNS (1 << 11) /* Manual mode done */ 67 #define MST_STATUS_SS (1 << 10) /* Automatic mode done */ 68 #define MST_STATUS_SCC (1 << 9) /* Stop complete */ 69 #define MST_STATUS_IP (1 << 8) /* Invalid parameter */ 70 #define MST_STATUS_TSS (1 << 7) /* Timeout */ 71 #define MST_STATUS_AL (1 << 6) /* Arbitration lost */ 72 #define MST_STATUS_ND (1 << 5) /* NAK on data phase */ 73 #define MST_STATUS_NA (1 << 4) /* NAK on address phase */ 74 #define MST_STATUS_NAK (MST_STATUS_NA | \ 75 MST_STATUS_ND) 76 #define MST_STATUS_ERR (MST_STATUS_NAK | \ 77 MST_STATUS_AL | \ 78 MST_STATUS_IP | \ 79 MST_STATUS_TSS) 80 #define MST_TX_BYTES_XFRD 0x50 81 #define MST_RX_BYTES_XFRD 0x54 82 #define SCL_HIGH_PERIOD 0x80 83 #define SCL_LOW_PERIOD 0x84 84 #define SPIKE_FLTR_LEN 0x88 85 #define SDA_SETUP_TIME 0x8c 86 #define SDA_HOLD_TIME 0x90 87 88 /** 89 * axxia_i2c_dev - I2C device context 90 * @base: pointer to register struct 91 * @msg: pointer to current message 92 * @msg_xfrd: number of bytes transferred in msg 93 * @msg_err: error code for completed message 94 * @msg_complete: xfer completion object 95 * @dev: device reference 96 * @adapter: core i2c abstraction 97 * @i2c_clk: clock reference for i2c input clock 98 * @bus_clk_rate: current i2c bus clock rate 99 */ 100 struct axxia_i2c_dev { 101 void __iomem *base; 102 struct i2c_msg *msg; 103 size_t msg_xfrd; 104 int msg_err; 105 struct completion msg_complete; 106 struct device *dev; 107 struct i2c_adapter adapter; 108 struct clk *i2c_clk; 109 u32 bus_clk_rate; 110 }; 111 112 static void i2c_int_disable(struct axxia_i2c_dev *idev, u32 mask) 113 { 114 u32 int_en; 115 116 int_en = readl(idev->base + MST_INT_ENABLE); 117 writel(int_en & ~mask, idev->base + MST_INT_ENABLE); 118 } 119 120 static void i2c_int_enable(struct axxia_i2c_dev *idev, u32 mask) 121 { 122 u32 int_en; 123 124 int_en = readl(idev->base + MST_INT_ENABLE); 125 writel(int_en | mask, idev->base + MST_INT_ENABLE); 126 } 127 128 /** 129 * ns_to_clk - Convert time (ns) to clock cycles for the given clock frequency. 130 */ 131 static u32 ns_to_clk(u64 ns, u32 clk_mhz) 132 { 133 return div_u64(ns * clk_mhz, 1000); 134 } 135 136 static int axxia_i2c_init(struct axxia_i2c_dev *idev) 137 { 138 u32 divisor = clk_get_rate(idev->i2c_clk) / idev->bus_clk_rate; 139 u32 clk_mhz = clk_get_rate(idev->i2c_clk) / 1000000; 140 u32 t_setup; 141 u32 t_high, t_low; 142 u32 tmo_clk; 143 u32 prescale; 144 unsigned long timeout; 145 146 dev_dbg(idev->dev, "rate=%uHz per_clk=%uMHz -> ratio=1:%u\n", 147 idev->bus_clk_rate, clk_mhz, divisor); 148 149 /* Reset controller */ 150 writel(0x01, idev->base + SOFT_RESET); 151 timeout = jiffies + msecs_to_jiffies(100); 152 while (readl(idev->base + SOFT_RESET) & 1) { 153 if (time_after(jiffies, timeout)) { 154 dev_warn(idev->dev, "Soft reset failed\n"); 155 break; 156 } 157 } 158 159 /* Enable Master Mode */ 160 writel(0x1, idev->base + GLOBAL_CONTROL); 161 162 if (idev->bus_clk_rate <= 100000) { 163 /* Standard mode SCL 50/50, tSU:DAT = 250 ns */ 164 t_high = divisor * 1 / 2; 165 t_low = divisor * 1 / 2; 166 t_setup = ns_to_clk(250, clk_mhz); 167 } else { 168 /* Fast mode SCL 33/66, tSU:DAT = 100 ns */ 169 t_high = divisor * 1 / 3; 170 t_low = divisor * 2 / 3; 171 t_setup = ns_to_clk(100, clk_mhz); 172 } 173 174 /* SCL High Time */ 175 writel(t_high, idev->base + SCL_HIGH_PERIOD); 176 /* SCL Low Time */ 177 writel(t_low, idev->base + SCL_LOW_PERIOD); 178 /* SDA Setup Time */ 179 writel(t_setup, idev->base + SDA_SETUP_TIME); 180 /* SDA Hold Time, 300ns */ 181 writel(ns_to_clk(300, clk_mhz), idev->base + SDA_HOLD_TIME); 182 /* Filter <50ns spikes */ 183 writel(ns_to_clk(50, clk_mhz), idev->base + SPIKE_FLTR_LEN); 184 185 /* Configure Time-Out Registers */ 186 tmo_clk = ns_to_clk(SCL_WAIT_TIMEOUT_NS, clk_mhz); 187 188 /* Find prescaler value that makes tmo_clk fit in 15-bits counter. */ 189 for (prescale = 0; prescale < 15; ++prescale) { 190 if (tmo_clk <= 0x7fff) 191 break; 192 tmo_clk >>= 1; 193 } 194 if (tmo_clk > 0x7fff) 195 tmo_clk = 0x7fff; 196 197 /* Prescale divider (log2) */ 198 writel(prescale, idev->base + TIMER_CLOCK_DIV); 199 /* Timeout in divided clocks */ 200 writel(WT_EN | WT_VALUE(tmo_clk), idev->base + WAIT_TIMER_CONTROL); 201 202 /* Mask all master interrupt bits */ 203 i2c_int_disable(idev, ~0); 204 205 /* Interrupt enable */ 206 writel(0x01, idev->base + INTERRUPT_ENABLE); 207 208 return 0; 209 } 210 211 static int i2c_m_rd(const struct i2c_msg *msg) 212 { 213 return (msg->flags & I2C_M_RD) != 0; 214 } 215 216 static int i2c_m_ten(const struct i2c_msg *msg) 217 { 218 return (msg->flags & I2C_M_TEN) != 0; 219 } 220 221 static int i2c_m_recv_len(const struct i2c_msg *msg) 222 { 223 return (msg->flags & I2C_M_RECV_LEN) != 0; 224 } 225 226 /** 227 * axxia_i2c_empty_rx_fifo - Fetch data from RX FIFO and update SMBus block 228 * transfer length if this is the first byte of such a transfer. 229 */ 230 static int axxia_i2c_empty_rx_fifo(struct axxia_i2c_dev *idev) 231 { 232 struct i2c_msg *msg = idev->msg; 233 size_t rx_fifo_avail = readl(idev->base + MST_RX_FIFO); 234 int bytes_to_transfer = min(rx_fifo_avail, msg->len - idev->msg_xfrd); 235 236 while (bytes_to_transfer-- > 0) { 237 int c = readl(idev->base + MST_DATA); 238 239 if (idev->msg_xfrd == 0 && i2c_m_recv_len(msg)) { 240 /* 241 * Check length byte for SMBus block read 242 */ 243 if (c <= 0 || c > I2C_SMBUS_BLOCK_MAX) { 244 idev->msg_err = -EPROTO; 245 i2c_int_disable(idev, ~0); 246 complete(&idev->msg_complete); 247 break; 248 } 249 msg->len = 1 + c; 250 writel(msg->len, idev->base + MST_RX_XFER); 251 } 252 msg->buf[idev->msg_xfrd++] = c; 253 } 254 255 return 0; 256 } 257 258 /** 259 * axxia_i2c_fill_tx_fifo - Fill TX FIFO from current message buffer. 260 * @return: Number of bytes left to transfer. 261 */ 262 static int axxia_i2c_fill_tx_fifo(struct axxia_i2c_dev *idev) 263 { 264 struct i2c_msg *msg = idev->msg; 265 size_t tx_fifo_avail = FIFO_SIZE - readl(idev->base + MST_TX_FIFO); 266 int bytes_to_transfer = min(tx_fifo_avail, msg->len - idev->msg_xfrd); 267 int ret = msg->len - idev->msg_xfrd - bytes_to_transfer; 268 269 while (bytes_to_transfer-- > 0) 270 writel(msg->buf[idev->msg_xfrd++], idev->base + MST_DATA); 271 272 return ret; 273 } 274 275 static irqreturn_t axxia_i2c_isr(int irq, void *_dev) 276 { 277 struct axxia_i2c_dev *idev = _dev; 278 u32 status; 279 280 if (!(readl(idev->base + INTERRUPT_STATUS) & INT_MST)) 281 return IRQ_NONE; 282 283 /* Read interrupt status bits */ 284 status = readl(idev->base + MST_INT_STATUS); 285 286 if (!idev->msg) { 287 dev_warn(idev->dev, "unexpected interrupt\n"); 288 goto out; 289 } 290 291 /* RX FIFO needs service? */ 292 if (i2c_m_rd(idev->msg) && (status & MST_STATUS_RFL)) 293 axxia_i2c_empty_rx_fifo(idev); 294 295 /* TX FIFO needs service? */ 296 if (!i2c_m_rd(idev->msg) && (status & MST_STATUS_TFL)) { 297 if (axxia_i2c_fill_tx_fifo(idev) == 0) 298 i2c_int_disable(idev, MST_STATUS_TFL); 299 } 300 301 if (status & MST_STATUS_SCC) { 302 /* Stop completed */ 303 i2c_int_disable(idev, ~0); 304 complete(&idev->msg_complete); 305 } else if (status & MST_STATUS_SNS) { 306 /* Transfer done */ 307 i2c_int_disable(idev, ~0); 308 if (i2c_m_rd(idev->msg) && idev->msg_xfrd < idev->msg->len) 309 axxia_i2c_empty_rx_fifo(idev); 310 complete(&idev->msg_complete); 311 } else if (unlikely(status & MST_STATUS_ERR)) { 312 /* Transfer error */ 313 i2c_int_disable(idev, ~0); 314 if (status & MST_STATUS_AL) 315 idev->msg_err = -EAGAIN; 316 else if (status & MST_STATUS_NAK) 317 idev->msg_err = -ENXIO; 318 else 319 idev->msg_err = -EIO; 320 dev_dbg(idev->dev, "error %#x, addr=%#x rx=%u/%u tx=%u/%u\n", 321 status, 322 idev->msg->addr, 323 readl(idev->base + MST_RX_BYTES_XFRD), 324 readl(idev->base + MST_RX_XFER), 325 readl(idev->base + MST_TX_BYTES_XFRD), 326 readl(idev->base + MST_TX_XFER)); 327 complete(&idev->msg_complete); 328 } 329 330 out: 331 /* Clear interrupt */ 332 writel(INT_MST, idev->base + INTERRUPT_STATUS); 333 334 return IRQ_HANDLED; 335 } 336 337 static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg) 338 { 339 u32 int_mask = MST_STATUS_ERR | MST_STATUS_SNS; 340 u32 rx_xfer, tx_xfer; 341 u32 addr_1, addr_2; 342 unsigned long time_left; 343 344 idev->msg = msg; 345 idev->msg_xfrd = 0; 346 idev->msg_err = 0; 347 reinit_completion(&idev->msg_complete); 348 349 if (i2c_m_ten(msg)) { 350 /* 10-bit address 351 * addr_1: 5'b11110 | addr[9:8] | (R/nW) 352 * addr_2: addr[7:0] 353 */ 354 addr_1 = 0xF0 | ((msg->addr >> 7) & 0x06); 355 addr_2 = msg->addr & 0xFF; 356 } else { 357 /* 7-bit address 358 * addr_1: addr[6:0] | (R/nW) 359 * addr_2: dont care 360 */ 361 addr_1 = (msg->addr << 1) & 0xFF; 362 addr_2 = 0; 363 } 364 365 if (i2c_m_rd(msg)) { 366 /* I2C read transfer */ 367 rx_xfer = i2c_m_recv_len(msg) ? I2C_SMBUS_BLOCK_MAX : msg->len; 368 tx_xfer = 0; 369 addr_1 |= 1; /* Set the R/nW bit of the address */ 370 } else { 371 /* I2C write transfer */ 372 rx_xfer = 0; 373 tx_xfer = msg->len; 374 } 375 376 writel(rx_xfer, idev->base + MST_RX_XFER); 377 writel(tx_xfer, idev->base + MST_TX_XFER); 378 writel(addr_1, idev->base + MST_ADDR_1); 379 writel(addr_2, idev->base + MST_ADDR_2); 380 381 if (i2c_m_rd(msg)) 382 int_mask |= MST_STATUS_RFL; 383 else if (axxia_i2c_fill_tx_fifo(idev) != 0) 384 int_mask |= MST_STATUS_TFL; 385 386 /* Start manual mode */ 387 writel(CMD_MANUAL, idev->base + MST_COMMAND); 388 389 i2c_int_enable(idev, int_mask); 390 391 time_left = wait_for_completion_timeout(&idev->msg_complete, 392 I2C_XFER_TIMEOUT); 393 394 i2c_int_disable(idev, int_mask); 395 396 if (readl(idev->base + MST_COMMAND) & CMD_BUSY) 397 dev_warn(idev->dev, "busy after xfer\n"); 398 399 if (time_left == 0) 400 idev->msg_err = -ETIMEDOUT; 401 402 if (idev->msg_err == -ETIMEDOUT) 403 i2c_recover_bus(&idev->adapter); 404 405 if (unlikely(idev->msg_err) && idev->msg_err != -ENXIO) 406 axxia_i2c_init(idev); 407 408 return idev->msg_err; 409 } 410 411 static int axxia_i2c_stop(struct axxia_i2c_dev *idev) 412 { 413 u32 int_mask = MST_STATUS_ERR | MST_STATUS_SCC; 414 unsigned long time_left; 415 416 reinit_completion(&idev->msg_complete); 417 418 /* Issue stop */ 419 writel(0xb, idev->base + MST_COMMAND); 420 i2c_int_enable(idev, int_mask); 421 time_left = wait_for_completion_timeout(&idev->msg_complete, 422 I2C_STOP_TIMEOUT); 423 i2c_int_disable(idev, int_mask); 424 if (time_left == 0) 425 return -ETIMEDOUT; 426 427 if (readl(idev->base + MST_COMMAND) & CMD_BUSY) 428 dev_warn(idev->dev, "busy after stop\n"); 429 430 return 0; 431 } 432 433 static int 434 axxia_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) 435 { 436 struct axxia_i2c_dev *idev = i2c_get_adapdata(adap); 437 int i; 438 int ret = 0; 439 440 for (i = 0; ret == 0 && i < num; ++i) 441 ret = axxia_i2c_xfer_msg(idev, &msgs[i]); 442 443 axxia_i2c_stop(idev); 444 445 return ret ? : i; 446 } 447 448 static int axxia_i2c_get_scl(struct i2c_adapter *adap) 449 { 450 struct axxia_i2c_dev *idev = i2c_get_adapdata(adap); 451 452 return !!(readl(idev->base + I2C_BUS_MONITOR) & BM_SCLS); 453 } 454 455 static void axxia_i2c_set_scl(struct i2c_adapter *adap, int val) 456 { 457 struct axxia_i2c_dev *idev = i2c_get_adapdata(adap); 458 u32 tmp; 459 460 /* Preserve SDA Control */ 461 tmp = readl(idev->base + I2C_BUS_MONITOR) & BM_SDAC; 462 if (!val) 463 tmp |= BM_SCLC; 464 writel(tmp, idev->base + I2C_BUS_MONITOR); 465 } 466 467 static int axxia_i2c_get_sda(struct i2c_adapter *adap) 468 { 469 struct axxia_i2c_dev *idev = i2c_get_adapdata(adap); 470 471 return !!(readl(idev->base + I2C_BUS_MONITOR) & BM_SDAS); 472 } 473 474 static struct i2c_bus_recovery_info axxia_i2c_recovery_info = { 475 .recover_bus = i2c_generic_scl_recovery, 476 .get_scl = axxia_i2c_get_scl, 477 .set_scl = axxia_i2c_set_scl, 478 .get_sda = axxia_i2c_get_sda, 479 }; 480 481 static u32 axxia_i2c_func(struct i2c_adapter *adap) 482 { 483 u32 caps = (I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR | 484 I2C_FUNC_SMBUS_EMUL | I2C_FUNC_SMBUS_BLOCK_DATA); 485 return caps; 486 } 487 488 static const struct i2c_algorithm axxia_i2c_algo = { 489 .master_xfer = axxia_i2c_xfer, 490 .functionality = axxia_i2c_func, 491 }; 492 493 static struct i2c_adapter_quirks axxia_i2c_quirks = { 494 .max_read_len = 255, 495 .max_write_len = 255, 496 }; 497 498 static int axxia_i2c_probe(struct platform_device *pdev) 499 { 500 struct device_node *np = pdev->dev.of_node; 501 struct axxia_i2c_dev *idev = NULL; 502 struct resource *res; 503 void __iomem *base; 504 int irq; 505 int ret = 0; 506 507 idev = devm_kzalloc(&pdev->dev, sizeof(*idev), GFP_KERNEL); 508 if (!idev) 509 return -ENOMEM; 510 511 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 512 base = devm_ioremap_resource(&pdev->dev, res); 513 if (IS_ERR(base)) 514 return PTR_ERR(base); 515 516 irq = platform_get_irq(pdev, 0); 517 if (irq < 0) { 518 dev_err(&pdev->dev, "missing interrupt resource\n"); 519 return irq; 520 } 521 522 idev->i2c_clk = devm_clk_get(&pdev->dev, "i2c"); 523 if (IS_ERR(idev->i2c_clk)) { 524 dev_err(&pdev->dev, "missing clock\n"); 525 return PTR_ERR(idev->i2c_clk); 526 } 527 528 idev->base = base; 529 idev->dev = &pdev->dev; 530 init_completion(&idev->msg_complete); 531 532 of_property_read_u32(np, "clock-frequency", &idev->bus_clk_rate); 533 if (idev->bus_clk_rate == 0) 534 idev->bus_clk_rate = 100000; /* default clock rate */ 535 536 ret = axxia_i2c_init(idev); 537 if (ret) { 538 dev_err(&pdev->dev, "failed to initialize\n"); 539 return ret; 540 } 541 542 ret = devm_request_irq(&pdev->dev, irq, axxia_i2c_isr, 0, 543 pdev->name, idev); 544 if (ret) { 545 dev_err(&pdev->dev, "failed to claim IRQ%d\n", irq); 546 return ret; 547 } 548 549 clk_prepare_enable(idev->i2c_clk); 550 551 i2c_set_adapdata(&idev->adapter, idev); 552 strlcpy(idev->adapter.name, pdev->name, sizeof(idev->adapter.name)); 553 idev->adapter.owner = THIS_MODULE; 554 idev->adapter.algo = &axxia_i2c_algo; 555 idev->adapter.bus_recovery_info = &axxia_i2c_recovery_info; 556 idev->adapter.quirks = &axxia_i2c_quirks; 557 idev->adapter.dev.parent = &pdev->dev; 558 idev->adapter.dev.of_node = pdev->dev.of_node; 559 560 platform_set_drvdata(pdev, idev); 561 562 ret = i2c_add_adapter(&idev->adapter); 563 if (ret) { 564 dev_err(&pdev->dev, "failed to add adapter\n"); 565 return ret; 566 } 567 568 return 0; 569 } 570 571 static int axxia_i2c_remove(struct platform_device *pdev) 572 { 573 struct axxia_i2c_dev *idev = platform_get_drvdata(pdev); 574 575 clk_disable_unprepare(idev->i2c_clk); 576 i2c_del_adapter(&idev->adapter); 577 578 return 0; 579 } 580 581 /* Match table for of_platform binding */ 582 static const struct of_device_id axxia_i2c_of_match[] = { 583 { .compatible = "lsi,api2c", }, 584 {}, 585 }; 586 587 MODULE_DEVICE_TABLE(of, axxia_i2c_of_match); 588 589 static struct platform_driver axxia_i2c_driver = { 590 .probe = axxia_i2c_probe, 591 .remove = axxia_i2c_remove, 592 .driver = { 593 .name = "axxia-i2c", 594 .of_match_table = axxia_i2c_of_match, 595 }, 596 }; 597 598 module_platform_driver(axxia_i2c_driver); 599 600 MODULE_DESCRIPTION("Axxia I2C Bus driver"); 601 MODULE_AUTHOR("Anders Berg <anders.berg@lsi.com>"); 602 MODULE_LICENSE("GPL v2"); 603 604 605 606 607 608 /* LDV_COMMENT_BEGIN_MAIN */ 609 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful 610 611 /*###########################################################################*/ 612 613 /*############## Driver Environment Generator 0.2 output ####################*/ 614 615 /*###########################################################################*/ 616 617 618 619 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */ 620 void ldv_check_final_state(void); 621 622 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */ 623 void ldv_check_return_value(int res); 624 625 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */ 626 void ldv_check_return_value_probe(int res); 627 628 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */ 629 void ldv_initialize(void); 630 631 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */ 632 void ldv_handler_precall(void); 633 634 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */ 635 int nondet_int(void); 636 637 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */ 638 int LDV_IN_INTERRUPT; 639 640 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */ 641 void ldv_main0_sequence_infinite_withcheck_stateful(void) { 642 643 644 645 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */ 646 /*============================= VARIABLE DECLARATION PART =============================*/ 647 /** STRUCT: struct type: i2c_bus_recovery_info, struct name: axxia_i2c_recovery_info **/ 648 /* content: static int axxia_i2c_get_scl(struct i2c_adapter *adap)*/ 649 /* LDV_COMMENT_BEGIN_PREP */ 650 #define SCL_WAIT_TIMEOUT_NS 25000000 651 #define I2C_XFER_TIMEOUT (msecs_to_jiffies(250)) 652 #define I2C_STOP_TIMEOUT (msecs_to_jiffies(100)) 653 #define FIFO_SIZE 8 654 #define GLOBAL_CONTROL 0x00 655 #define GLOBAL_MST_EN BIT(0) 656 #define GLOBAL_SLV_EN BIT(1) 657 #define GLOBAL_IBML_EN BIT(2) 658 #define INTERRUPT_STATUS 0x04 659 #define INTERRUPT_ENABLE 0x08 660 #define INT_SLV BIT(1) 661 #define INT_MST BIT(0) 662 #define WAIT_TIMER_CONTROL 0x0c 663 #define WT_EN BIT(15) 664 #define WT_VALUE(_x) ((_x) & 0x7fff) 665 #define IBML_TIMEOUT 0x10 666 #define IBML_LOW_MEXT 0x14 667 #define IBML_LOW_SEXT 0x18 668 #define TIMER_CLOCK_DIV 0x1c 669 #define I2C_BUS_MONITOR 0x20 670 #define BM_SDAC BIT(3) 671 #define BM_SCLC BIT(2) 672 #define BM_SDAS BIT(1) 673 #define BM_SCLS BIT(0) 674 #define SOFT_RESET 0x24 675 #define MST_COMMAND 0x28 676 #define CMD_BUSY (1<<3) 677 #define CMD_MANUAL (0x00 | CMD_BUSY) 678 #define CMD_AUTO (0x01 | CMD_BUSY) 679 #define MST_RX_XFER 0x2c 680 #define MST_TX_XFER 0x30 681 #define MST_ADDR_1 0x34 682 #define MST_ADDR_2 0x38 683 #define MST_DATA 0x3c 684 #define MST_TX_FIFO 0x40 685 #define MST_RX_FIFO 0x44 686 #define MST_INT_ENABLE 0x48 687 #define MST_INT_STATUS 0x4c 688 #define MST_STATUS_RFL (1 << 13) 689 #define MST_STATUS_TFL (1 << 12) 690 #define MST_STATUS_SNS (1 << 11) 691 #define MST_STATUS_SS (1 << 10) 692 #define MST_STATUS_SCC (1 << 9) 693 #define MST_STATUS_IP (1 << 8) 694 #define MST_STATUS_TSS (1 << 7) 695 #define MST_STATUS_AL (1 << 6) 696 #define MST_STATUS_ND (1 << 5) 697 #define MST_STATUS_NA (1 << 4) 698 #define MST_STATUS_NAK (MST_STATUS_NA | \ 699 MST_STATUS_ND) 700 #define MST_STATUS_ERR (MST_STATUS_NAK | \ 701 MST_STATUS_AL | \ 702 MST_STATUS_IP | \ 703 MST_STATUS_TSS) 704 #define MST_TX_BYTES_XFRD 0x50 705 #define MST_RX_BYTES_XFRD 0x54 706 #define SCL_HIGH_PERIOD 0x80 707 #define SCL_LOW_PERIOD 0x84 708 #define SPIKE_FLTR_LEN 0x88 709 #define SDA_SETUP_TIME 0x8c 710 #define SDA_HOLD_TIME 0x90 711 /* LDV_COMMENT_END_PREP */ 712 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "axxia_i2c_get_scl" */ 713 struct i2c_adapter * var_group1; 714 /* content: static void axxia_i2c_set_scl(struct i2c_adapter *adap, int val)*/ 715 /* LDV_COMMENT_BEGIN_PREP */ 716 #define SCL_WAIT_TIMEOUT_NS 25000000 717 #define I2C_XFER_TIMEOUT (msecs_to_jiffies(250)) 718 #define I2C_STOP_TIMEOUT (msecs_to_jiffies(100)) 719 #define FIFO_SIZE 8 720 #define GLOBAL_CONTROL 0x00 721 #define GLOBAL_MST_EN BIT(0) 722 #define GLOBAL_SLV_EN BIT(1) 723 #define GLOBAL_IBML_EN BIT(2) 724 #define INTERRUPT_STATUS 0x04 725 #define INTERRUPT_ENABLE 0x08 726 #define INT_SLV BIT(1) 727 #define INT_MST BIT(0) 728 #define WAIT_TIMER_CONTROL 0x0c 729 #define WT_EN BIT(15) 730 #define WT_VALUE(_x) ((_x) & 0x7fff) 731 #define IBML_TIMEOUT 0x10 732 #define IBML_LOW_MEXT 0x14 733 #define IBML_LOW_SEXT 0x18 734 #define TIMER_CLOCK_DIV 0x1c 735 #define I2C_BUS_MONITOR 0x20 736 #define BM_SDAC BIT(3) 737 #define BM_SCLC BIT(2) 738 #define BM_SDAS BIT(1) 739 #define BM_SCLS BIT(0) 740 #define SOFT_RESET 0x24 741 #define MST_COMMAND 0x28 742 #define CMD_BUSY (1<<3) 743 #define CMD_MANUAL (0x00 | CMD_BUSY) 744 #define CMD_AUTO (0x01 | CMD_BUSY) 745 #define MST_RX_XFER 0x2c 746 #define MST_TX_XFER 0x30 747 #define MST_ADDR_1 0x34 748 #define MST_ADDR_2 0x38 749 #define MST_DATA 0x3c 750 #define MST_TX_FIFO 0x40 751 #define MST_RX_FIFO 0x44 752 #define MST_INT_ENABLE 0x48 753 #define MST_INT_STATUS 0x4c 754 #define MST_STATUS_RFL (1 << 13) 755 #define MST_STATUS_TFL (1 << 12) 756 #define MST_STATUS_SNS (1 << 11) 757 #define MST_STATUS_SS (1 << 10) 758 #define MST_STATUS_SCC (1 << 9) 759 #define MST_STATUS_IP (1 << 8) 760 #define MST_STATUS_TSS (1 << 7) 761 #define MST_STATUS_AL (1 << 6) 762 #define MST_STATUS_ND (1 << 5) 763 #define MST_STATUS_NA (1 << 4) 764 #define MST_STATUS_NAK (MST_STATUS_NA | \ 765 MST_STATUS_ND) 766 #define MST_STATUS_ERR (MST_STATUS_NAK | \ 767 MST_STATUS_AL | \ 768 MST_STATUS_IP | \ 769 MST_STATUS_TSS) 770 #define MST_TX_BYTES_XFRD 0x50 771 #define MST_RX_BYTES_XFRD 0x54 772 #define SCL_HIGH_PERIOD 0x80 773 #define SCL_LOW_PERIOD 0x84 774 #define SPIKE_FLTR_LEN 0x88 775 #define SDA_SETUP_TIME 0x8c 776 #define SDA_HOLD_TIME 0x90 777 /* LDV_COMMENT_END_PREP */ 778 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "axxia_i2c_set_scl" */ 779 int var_axxia_i2c_set_scl_13_p1; 780 /* content: static int axxia_i2c_get_sda(struct i2c_adapter *adap)*/ 781 /* LDV_COMMENT_BEGIN_PREP */ 782 #define SCL_WAIT_TIMEOUT_NS 25000000 783 #define I2C_XFER_TIMEOUT (msecs_to_jiffies(250)) 784 #define I2C_STOP_TIMEOUT (msecs_to_jiffies(100)) 785 #define FIFO_SIZE 8 786 #define GLOBAL_CONTROL 0x00 787 #define GLOBAL_MST_EN BIT(0) 788 #define GLOBAL_SLV_EN BIT(1) 789 #define GLOBAL_IBML_EN BIT(2) 790 #define INTERRUPT_STATUS 0x04 791 #define INTERRUPT_ENABLE 0x08 792 #define INT_SLV BIT(1) 793 #define INT_MST BIT(0) 794 #define WAIT_TIMER_CONTROL 0x0c 795 #define WT_EN BIT(15) 796 #define WT_VALUE(_x) ((_x) & 0x7fff) 797 #define IBML_TIMEOUT 0x10 798 #define IBML_LOW_MEXT 0x14 799 #define IBML_LOW_SEXT 0x18 800 #define TIMER_CLOCK_DIV 0x1c 801 #define I2C_BUS_MONITOR 0x20 802 #define BM_SDAC BIT(3) 803 #define BM_SCLC BIT(2) 804 #define BM_SDAS BIT(1) 805 #define BM_SCLS BIT(0) 806 #define SOFT_RESET 0x24 807 #define MST_COMMAND 0x28 808 #define CMD_BUSY (1<<3) 809 #define CMD_MANUAL (0x00 | CMD_BUSY) 810 #define CMD_AUTO (0x01 | CMD_BUSY) 811 #define MST_RX_XFER 0x2c 812 #define MST_TX_XFER 0x30 813 #define MST_ADDR_1 0x34 814 #define MST_ADDR_2 0x38 815 #define MST_DATA 0x3c 816 #define MST_TX_FIFO 0x40 817 #define MST_RX_FIFO 0x44 818 #define MST_INT_ENABLE 0x48 819 #define MST_INT_STATUS 0x4c 820 #define MST_STATUS_RFL (1 << 13) 821 #define MST_STATUS_TFL (1 << 12) 822 #define MST_STATUS_SNS (1 << 11) 823 #define MST_STATUS_SS (1 << 10) 824 #define MST_STATUS_SCC (1 << 9) 825 #define MST_STATUS_IP (1 << 8) 826 #define MST_STATUS_TSS (1 << 7) 827 #define MST_STATUS_AL (1 << 6) 828 #define MST_STATUS_ND (1 << 5) 829 #define MST_STATUS_NA (1 << 4) 830 #define MST_STATUS_NAK (MST_STATUS_NA | \ 831 MST_STATUS_ND) 832 #define MST_STATUS_ERR (MST_STATUS_NAK | \ 833 MST_STATUS_AL | \ 834 MST_STATUS_IP | \ 835 MST_STATUS_TSS) 836 #define MST_TX_BYTES_XFRD 0x50 837 #define MST_RX_BYTES_XFRD 0x54 838 #define SCL_HIGH_PERIOD 0x80 839 #define SCL_LOW_PERIOD 0x84 840 #define SPIKE_FLTR_LEN 0x88 841 #define SDA_SETUP_TIME 0x8c 842 #define SDA_HOLD_TIME 0x90 843 /* LDV_COMMENT_END_PREP */ 844 845 /** STRUCT: struct type: i2c_algorithm, struct name: axxia_i2c_algo **/ 846 /* content: static u32 axxia_i2c_func(struct i2c_adapter *adap)*/ 847 /* LDV_COMMENT_BEGIN_PREP */ 848 #define SCL_WAIT_TIMEOUT_NS 25000000 849 #define I2C_XFER_TIMEOUT (msecs_to_jiffies(250)) 850 #define I2C_STOP_TIMEOUT (msecs_to_jiffies(100)) 851 #define FIFO_SIZE 8 852 #define GLOBAL_CONTROL 0x00 853 #define GLOBAL_MST_EN BIT(0) 854 #define GLOBAL_SLV_EN BIT(1) 855 #define GLOBAL_IBML_EN BIT(2) 856 #define INTERRUPT_STATUS 0x04 857 #define INTERRUPT_ENABLE 0x08 858 #define INT_SLV BIT(1) 859 #define INT_MST BIT(0) 860 #define WAIT_TIMER_CONTROL 0x0c 861 #define WT_EN BIT(15) 862 #define WT_VALUE(_x) ((_x) & 0x7fff) 863 #define IBML_TIMEOUT 0x10 864 #define IBML_LOW_MEXT 0x14 865 #define IBML_LOW_SEXT 0x18 866 #define TIMER_CLOCK_DIV 0x1c 867 #define I2C_BUS_MONITOR 0x20 868 #define BM_SDAC BIT(3) 869 #define BM_SCLC BIT(2) 870 #define BM_SDAS BIT(1) 871 #define BM_SCLS BIT(0) 872 #define SOFT_RESET 0x24 873 #define MST_COMMAND 0x28 874 #define CMD_BUSY (1<<3) 875 #define CMD_MANUAL (0x00 | CMD_BUSY) 876 #define CMD_AUTO (0x01 | CMD_BUSY) 877 #define MST_RX_XFER 0x2c 878 #define MST_TX_XFER 0x30 879 #define MST_ADDR_1 0x34 880 #define MST_ADDR_2 0x38 881 #define MST_DATA 0x3c 882 #define MST_TX_FIFO 0x40 883 #define MST_RX_FIFO 0x44 884 #define MST_INT_ENABLE 0x48 885 #define MST_INT_STATUS 0x4c 886 #define MST_STATUS_RFL (1 << 13) 887 #define MST_STATUS_TFL (1 << 12) 888 #define MST_STATUS_SNS (1 << 11) 889 #define MST_STATUS_SS (1 << 10) 890 #define MST_STATUS_SCC (1 << 9) 891 #define MST_STATUS_IP (1 << 8) 892 #define MST_STATUS_TSS (1 << 7) 893 #define MST_STATUS_AL (1 << 6) 894 #define MST_STATUS_ND (1 << 5) 895 #define MST_STATUS_NA (1 << 4) 896 #define MST_STATUS_NAK (MST_STATUS_NA | \ 897 MST_STATUS_ND) 898 #define MST_STATUS_ERR (MST_STATUS_NAK | \ 899 MST_STATUS_AL | \ 900 MST_STATUS_IP | \ 901 MST_STATUS_TSS) 902 #define MST_TX_BYTES_XFRD 0x50 903 #define MST_RX_BYTES_XFRD 0x54 904 #define SCL_HIGH_PERIOD 0x80 905 #define SCL_LOW_PERIOD 0x84 906 #define SPIKE_FLTR_LEN 0x88 907 #define SDA_SETUP_TIME 0x8c 908 #define SDA_HOLD_TIME 0x90 909 /* LDV_COMMENT_END_PREP */ 910 911 /** STRUCT: struct type: platform_driver, struct name: axxia_i2c_driver **/ 912 /* content: static int axxia_i2c_probe(struct platform_device *pdev)*/ 913 /* LDV_COMMENT_BEGIN_PREP */ 914 #define SCL_WAIT_TIMEOUT_NS 25000000 915 #define I2C_XFER_TIMEOUT (msecs_to_jiffies(250)) 916 #define I2C_STOP_TIMEOUT (msecs_to_jiffies(100)) 917 #define FIFO_SIZE 8 918 #define GLOBAL_CONTROL 0x00 919 #define GLOBAL_MST_EN BIT(0) 920 #define GLOBAL_SLV_EN BIT(1) 921 #define GLOBAL_IBML_EN BIT(2) 922 #define INTERRUPT_STATUS 0x04 923 #define INTERRUPT_ENABLE 0x08 924 #define INT_SLV BIT(1) 925 #define INT_MST BIT(0) 926 #define WAIT_TIMER_CONTROL 0x0c 927 #define WT_EN BIT(15) 928 #define WT_VALUE(_x) ((_x) & 0x7fff) 929 #define IBML_TIMEOUT 0x10 930 #define IBML_LOW_MEXT 0x14 931 #define IBML_LOW_SEXT 0x18 932 #define TIMER_CLOCK_DIV 0x1c 933 #define I2C_BUS_MONITOR 0x20 934 #define BM_SDAC BIT(3) 935 #define BM_SCLC BIT(2) 936 #define BM_SDAS BIT(1) 937 #define BM_SCLS BIT(0) 938 #define SOFT_RESET 0x24 939 #define MST_COMMAND 0x28 940 #define CMD_BUSY (1<<3) 941 #define CMD_MANUAL (0x00 | CMD_BUSY) 942 #define CMD_AUTO (0x01 | CMD_BUSY) 943 #define MST_RX_XFER 0x2c 944 #define MST_TX_XFER 0x30 945 #define MST_ADDR_1 0x34 946 #define MST_ADDR_2 0x38 947 #define MST_DATA 0x3c 948 #define MST_TX_FIFO 0x40 949 #define MST_RX_FIFO 0x44 950 #define MST_INT_ENABLE 0x48 951 #define MST_INT_STATUS 0x4c 952 #define MST_STATUS_RFL (1 << 13) 953 #define MST_STATUS_TFL (1 << 12) 954 #define MST_STATUS_SNS (1 << 11) 955 #define MST_STATUS_SS (1 << 10) 956 #define MST_STATUS_SCC (1 << 9) 957 #define MST_STATUS_IP (1 << 8) 958 #define MST_STATUS_TSS (1 << 7) 959 #define MST_STATUS_AL (1 << 6) 960 #define MST_STATUS_ND (1 << 5) 961 #define MST_STATUS_NA (1 << 4) 962 #define MST_STATUS_NAK (MST_STATUS_NA | \ 963 MST_STATUS_ND) 964 #define MST_STATUS_ERR (MST_STATUS_NAK | \ 965 MST_STATUS_AL | \ 966 MST_STATUS_IP | \ 967 MST_STATUS_TSS) 968 #define MST_TX_BYTES_XFRD 0x50 969 #define MST_RX_BYTES_XFRD 0x54 970 #define SCL_HIGH_PERIOD 0x80 971 #define SCL_LOW_PERIOD 0x84 972 #define SPIKE_FLTR_LEN 0x88 973 #define SDA_SETUP_TIME 0x8c 974 #define SDA_HOLD_TIME 0x90 975 /* LDV_COMMENT_END_PREP */ 976 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "axxia_i2c_probe" */ 977 struct platform_device * var_group2; 978 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "axxia_i2c_probe" */ 979 static int res_axxia_i2c_probe_16; 980 /* content: static int axxia_i2c_remove(struct platform_device *pdev)*/ 981 /* LDV_COMMENT_BEGIN_PREP */ 982 #define SCL_WAIT_TIMEOUT_NS 25000000 983 #define I2C_XFER_TIMEOUT (msecs_to_jiffies(250)) 984 #define I2C_STOP_TIMEOUT (msecs_to_jiffies(100)) 985 #define FIFO_SIZE 8 986 #define GLOBAL_CONTROL 0x00 987 #define GLOBAL_MST_EN BIT(0) 988 #define GLOBAL_SLV_EN BIT(1) 989 #define GLOBAL_IBML_EN BIT(2) 990 #define INTERRUPT_STATUS 0x04 991 #define INTERRUPT_ENABLE 0x08 992 #define INT_SLV BIT(1) 993 #define INT_MST BIT(0) 994 #define WAIT_TIMER_CONTROL 0x0c 995 #define WT_EN BIT(15) 996 #define WT_VALUE(_x) ((_x) & 0x7fff) 997 #define IBML_TIMEOUT 0x10 998 #define IBML_LOW_MEXT 0x14 999 #define IBML_LOW_SEXT 0x18 1000 #define TIMER_CLOCK_DIV 0x1c 1001 #define I2C_BUS_MONITOR 0x20 1002 #define BM_SDAC BIT(3) 1003 #define BM_SCLC BIT(2) 1004 #define BM_SDAS BIT(1) 1005 #define BM_SCLS BIT(0) 1006 #define SOFT_RESET 0x24 1007 #define MST_COMMAND 0x28 1008 #define CMD_BUSY (1<<3) 1009 #define CMD_MANUAL (0x00 | CMD_BUSY) 1010 #define CMD_AUTO (0x01 | CMD_BUSY) 1011 #define MST_RX_XFER 0x2c 1012 #define MST_TX_XFER 0x30 1013 #define MST_ADDR_1 0x34 1014 #define MST_ADDR_2 0x38 1015 #define MST_DATA 0x3c 1016 #define MST_TX_FIFO 0x40 1017 #define MST_RX_FIFO 0x44 1018 #define MST_INT_ENABLE 0x48 1019 #define MST_INT_STATUS 0x4c 1020 #define MST_STATUS_RFL (1 << 13) 1021 #define MST_STATUS_TFL (1 << 12) 1022 #define MST_STATUS_SNS (1 << 11) 1023 #define MST_STATUS_SS (1 << 10) 1024 #define MST_STATUS_SCC (1 << 9) 1025 #define MST_STATUS_IP (1 << 8) 1026 #define MST_STATUS_TSS (1 << 7) 1027 #define MST_STATUS_AL (1 << 6) 1028 #define MST_STATUS_ND (1 << 5) 1029 #define MST_STATUS_NA (1 << 4) 1030 #define MST_STATUS_NAK (MST_STATUS_NA | \ 1031 MST_STATUS_ND) 1032 #define MST_STATUS_ERR (MST_STATUS_NAK | \ 1033 MST_STATUS_AL | \ 1034 MST_STATUS_IP | \ 1035 MST_STATUS_TSS) 1036 #define MST_TX_BYTES_XFRD 0x50 1037 #define MST_RX_BYTES_XFRD 0x54 1038 #define SCL_HIGH_PERIOD 0x80 1039 #define SCL_LOW_PERIOD 0x84 1040 #define SPIKE_FLTR_LEN 0x88 1041 #define SDA_SETUP_TIME 0x8c 1042 #define SDA_HOLD_TIME 0x90 1043 /* LDV_COMMENT_END_PREP */ 1044 1045 /** CALLBACK SECTION request_irq **/ 1046 /* content: static irqreturn_t axxia_i2c_isr(int irq, void *_dev)*/ 1047 /* LDV_COMMENT_BEGIN_PREP */ 1048 #define SCL_WAIT_TIMEOUT_NS 25000000 1049 #define I2C_XFER_TIMEOUT (msecs_to_jiffies(250)) 1050 #define I2C_STOP_TIMEOUT (msecs_to_jiffies(100)) 1051 #define FIFO_SIZE 8 1052 #define GLOBAL_CONTROL 0x00 1053 #define GLOBAL_MST_EN BIT(0) 1054 #define GLOBAL_SLV_EN BIT(1) 1055 #define GLOBAL_IBML_EN BIT(2) 1056 #define INTERRUPT_STATUS 0x04 1057 #define INTERRUPT_ENABLE 0x08 1058 #define INT_SLV BIT(1) 1059 #define INT_MST BIT(0) 1060 #define WAIT_TIMER_CONTROL 0x0c 1061 #define WT_EN BIT(15) 1062 #define WT_VALUE(_x) ((_x) & 0x7fff) 1063 #define IBML_TIMEOUT 0x10 1064 #define IBML_LOW_MEXT 0x14 1065 #define IBML_LOW_SEXT 0x18 1066 #define TIMER_CLOCK_DIV 0x1c 1067 #define I2C_BUS_MONITOR 0x20 1068 #define BM_SDAC BIT(3) 1069 #define BM_SCLC BIT(2) 1070 #define BM_SDAS BIT(1) 1071 #define BM_SCLS BIT(0) 1072 #define SOFT_RESET 0x24 1073 #define MST_COMMAND 0x28 1074 #define CMD_BUSY (1<<3) 1075 #define CMD_MANUAL (0x00 | CMD_BUSY) 1076 #define CMD_AUTO (0x01 | CMD_BUSY) 1077 #define MST_RX_XFER 0x2c 1078 #define MST_TX_XFER 0x30 1079 #define MST_ADDR_1 0x34 1080 #define MST_ADDR_2 0x38 1081 #define MST_DATA 0x3c 1082 #define MST_TX_FIFO 0x40 1083 #define MST_RX_FIFO 0x44 1084 #define MST_INT_ENABLE 0x48 1085 #define MST_INT_STATUS 0x4c 1086 #define MST_STATUS_RFL (1 << 13) 1087 #define MST_STATUS_TFL (1 << 12) 1088 #define MST_STATUS_SNS (1 << 11) 1089 #define MST_STATUS_SS (1 << 10) 1090 #define MST_STATUS_SCC (1 << 9) 1091 #define MST_STATUS_IP (1 << 8) 1092 #define MST_STATUS_TSS (1 << 7) 1093 #define MST_STATUS_AL (1 << 6) 1094 #define MST_STATUS_ND (1 << 5) 1095 #define MST_STATUS_NA (1 << 4) 1096 #define MST_STATUS_NAK (MST_STATUS_NA | \ 1097 MST_STATUS_ND) 1098 #define MST_STATUS_ERR (MST_STATUS_NAK | \ 1099 MST_STATUS_AL | \ 1100 MST_STATUS_IP | \ 1101 MST_STATUS_TSS) 1102 #define MST_TX_BYTES_XFRD 0x50 1103 #define MST_RX_BYTES_XFRD 0x54 1104 #define SCL_HIGH_PERIOD 0x80 1105 #define SCL_LOW_PERIOD 0x84 1106 #define SPIKE_FLTR_LEN 0x88 1107 #define SDA_SETUP_TIME 0x8c 1108 #define SDA_HOLD_TIME 0x90 1109 /* LDV_COMMENT_END_PREP */ 1110 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "axxia_i2c_isr" */ 1111 int var_axxia_i2c_isr_9_p0; 1112 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "axxia_i2c_isr" */ 1113 void * var_axxia_i2c_isr_9_p1; 1114 1115 1116 1117 1118 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */ 1119 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */ 1120 /*============================= VARIABLE INITIALIZING PART =============================*/ 1121 LDV_IN_INTERRUPT=1; 1122 1123 1124 1125 1126 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */ 1127 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */ 1128 /*============================= FUNCTION CALL SECTION =============================*/ 1129 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */ 1130 ldv_initialize(); 1131 1132 1133 1134 1135 int ldv_s_axxia_i2c_driver_platform_driver = 0; 1136 1137 1138 1139 1140 while( nondet_int() 1141 || !(ldv_s_axxia_i2c_driver_platform_driver == 0) 1142 ) { 1143 1144 switch(nondet_int()) { 1145 1146 case 0: { 1147 1148 /** STRUCT: struct type: i2c_bus_recovery_info, struct name: axxia_i2c_recovery_info **/ 1149 1150 1151 /* content: static int axxia_i2c_get_scl(struct i2c_adapter *adap)*/ 1152 /* LDV_COMMENT_BEGIN_PREP */ 1153 #define SCL_WAIT_TIMEOUT_NS 25000000 1154 #define I2C_XFER_TIMEOUT (msecs_to_jiffies(250)) 1155 #define I2C_STOP_TIMEOUT (msecs_to_jiffies(100)) 1156 #define FIFO_SIZE 8 1157 #define GLOBAL_CONTROL 0x00 1158 #define GLOBAL_MST_EN BIT(0) 1159 #define GLOBAL_SLV_EN BIT(1) 1160 #define GLOBAL_IBML_EN BIT(2) 1161 #define INTERRUPT_STATUS 0x04 1162 #define INTERRUPT_ENABLE 0x08 1163 #define INT_SLV BIT(1) 1164 #define INT_MST BIT(0) 1165 #define WAIT_TIMER_CONTROL 0x0c 1166 #define WT_EN BIT(15) 1167 #define WT_VALUE(_x) ((_x) & 0x7fff) 1168 #define IBML_TIMEOUT 0x10 1169 #define IBML_LOW_MEXT 0x14 1170 #define IBML_LOW_SEXT 0x18 1171 #define TIMER_CLOCK_DIV 0x1c 1172 #define I2C_BUS_MONITOR 0x20 1173 #define BM_SDAC BIT(3) 1174 #define BM_SCLC BIT(2) 1175 #define BM_SDAS BIT(1) 1176 #define BM_SCLS BIT(0) 1177 #define SOFT_RESET 0x24 1178 #define MST_COMMAND 0x28 1179 #define CMD_BUSY (1<<3) 1180 #define CMD_MANUAL (0x00 | CMD_BUSY) 1181 #define CMD_AUTO (0x01 | CMD_BUSY) 1182 #define MST_RX_XFER 0x2c 1183 #define MST_TX_XFER 0x30 1184 #define MST_ADDR_1 0x34 1185 #define MST_ADDR_2 0x38 1186 #define MST_DATA 0x3c 1187 #define MST_TX_FIFO 0x40 1188 #define MST_RX_FIFO 0x44 1189 #define MST_INT_ENABLE 0x48 1190 #define MST_INT_STATUS 0x4c 1191 #define MST_STATUS_RFL (1 << 13) 1192 #define MST_STATUS_TFL (1 << 12) 1193 #define MST_STATUS_SNS (1 << 11) 1194 #define MST_STATUS_SS (1 << 10) 1195 #define MST_STATUS_SCC (1 << 9) 1196 #define MST_STATUS_IP (1 << 8) 1197 #define MST_STATUS_TSS (1 << 7) 1198 #define MST_STATUS_AL (1 << 6) 1199 #define MST_STATUS_ND (1 << 5) 1200 #define MST_STATUS_NA (1 << 4) 1201 #define MST_STATUS_NAK (MST_STATUS_NA | \ 1202 MST_STATUS_ND) 1203 #define MST_STATUS_ERR (MST_STATUS_NAK | \ 1204 MST_STATUS_AL | \ 1205 MST_STATUS_IP | \ 1206 MST_STATUS_TSS) 1207 #define MST_TX_BYTES_XFRD 0x50 1208 #define MST_RX_BYTES_XFRD 0x54 1209 #define SCL_HIGH_PERIOD 0x80 1210 #define SCL_LOW_PERIOD 0x84 1211 #define SPIKE_FLTR_LEN 0x88 1212 #define SDA_SETUP_TIME 0x8c 1213 #define SDA_HOLD_TIME 0x90 1214 /* LDV_COMMENT_END_PREP */ 1215 /* LDV_COMMENT_FUNCTION_CALL Function from field "get_scl" from driver structure with callbacks "axxia_i2c_recovery_info" */ 1216 ldv_handler_precall(); 1217 axxia_i2c_get_scl( var_group1); 1218 1219 1220 1221 1222 } 1223 1224 break; 1225 case 1: { 1226 1227 /** STRUCT: struct type: i2c_bus_recovery_info, struct name: axxia_i2c_recovery_info **/ 1228 1229 1230 /* content: static void axxia_i2c_set_scl(struct i2c_adapter *adap, int val)*/ 1231 /* LDV_COMMENT_BEGIN_PREP */ 1232 #define SCL_WAIT_TIMEOUT_NS 25000000 1233 #define I2C_XFER_TIMEOUT (msecs_to_jiffies(250)) 1234 #define I2C_STOP_TIMEOUT (msecs_to_jiffies(100)) 1235 #define FIFO_SIZE 8 1236 #define GLOBAL_CONTROL 0x00 1237 #define GLOBAL_MST_EN BIT(0) 1238 #define GLOBAL_SLV_EN BIT(1) 1239 #define GLOBAL_IBML_EN BIT(2) 1240 #define INTERRUPT_STATUS 0x04 1241 #define INTERRUPT_ENABLE 0x08 1242 #define INT_SLV BIT(1) 1243 #define INT_MST BIT(0) 1244 #define WAIT_TIMER_CONTROL 0x0c 1245 #define WT_EN BIT(15) 1246 #define WT_VALUE(_x) ((_x) & 0x7fff) 1247 #define IBML_TIMEOUT 0x10 1248 #define IBML_LOW_MEXT 0x14 1249 #define IBML_LOW_SEXT 0x18 1250 #define TIMER_CLOCK_DIV 0x1c 1251 #define I2C_BUS_MONITOR 0x20 1252 #define BM_SDAC BIT(3) 1253 #define BM_SCLC BIT(2) 1254 #define BM_SDAS BIT(1) 1255 #define BM_SCLS BIT(0) 1256 #define SOFT_RESET 0x24 1257 #define MST_COMMAND 0x28 1258 #define CMD_BUSY (1<<3) 1259 #define CMD_MANUAL (0x00 | CMD_BUSY) 1260 #define CMD_AUTO (0x01 | CMD_BUSY) 1261 #define MST_RX_XFER 0x2c 1262 #define MST_TX_XFER 0x30 1263 #define MST_ADDR_1 0x34 1264 #define MST_ADDR_2 0x38 1265 #define MST_DATA 0x3c 1266 #define MST_TX_FIFO 0x40 1267 #define MST_RX_FIFO 0x44 1268 #define MST_INT_ENABLE 0x48 1269 #define MST_INT_STATUS 0x4c 1270 #define MST_STATUS_RFL (1 << 13) 1271 #define MST_STATUS_TFL (1 << 12) 1272 #define MST_STATUS_SNS (1 << 11) 1273 #define MST_STATUS_SS (1 << 10) 1274 #define MST_STATUS_SCC (1 << 9) 1275 #define MST_STATUS_IP (1 << 8) 1276 #define MST_STATUS_TSS (1 << 7) 1277 #define MST_STATUS_AL (1 << 6) 1278 #define MST_STATUS_ND (1 << 5) 1279 #define MST_STATUS_NA (1 << 4) 1280 #define MST_STATUS_NAK (MST_STATUS_NA | \ 1281 MST_STATUS_ND) 1282 #define MST_STATUS_ERR (MST_STATUS_NAK | \ 1283 MST_STATUS_AL | \ 1284 MST_STATUS_IP | \ 1285 MST_STATUS_TSS) 1286 #define MST_TX_BYTES_XFRD 0x50 1287 #define MST_RX_BYTES_XFRD 0x54 1288 #define SCL_HIGH_PERIOD 0x80 1289 #define SCL_LOW_PERIOD 0x84 1290 #define SPIKE_FLTR_LEN 0x88 1291 #define SDA_SETUP_TIME 0x8c 1292 #define SDA_HOLD_TIME 0x90 1293 /* LDV_COMMENT_END_PREP */ 1294 /* LDV_COMMENT_FUNCTION_CALL Function from field "set_scl" from driver structure with callbacks "axxia_i2c_recovery_info" */ 1295 ldv_handler_precall(); 1296 axxia_i2c_set_scl( var_group1, var_axxia_i2c_set_scl_13_p1); 1297 1298 1299 1300 1301 } 1302 1303 break; 1304 case 2: { 1305 1306 /** STRUCT: struct type: i2c_bus_recovery_info, struct name: axxia_i2c_recovery_info **/ 1307 1308 1309 /* content: static int axxia_i2c_get_sda(struct i2c_adapter *adap)*/ 1310 /* LDV_COMMENT_BEGIN_PREP */ 1311 #define SCL_WAIT_TIMEOUT_NS 25000000 1312 #define I2C_XFER_TIMEOUT (msecs_to_jiffies(250)) 1313 #define I2C_STOP_TIMEOUT (msecs_to_jiffies(100)) 1314 #define FIFO_SIZE 8 1315 #define GLOBAL_CONTROL 0x00 1316 #define GLOBAL_MST_EN BIT(0) 1317 #define GLOBAL_SLV_EN BIT(1) 1318 #define GLOBAL_IBML_EN BIT(2) 1319 #define INTERRUPT_STATUS 0x04 1320 #define INTERRUPT_ENABLE 0x08 1321 #define INT_SLV BIT(1) 1322 #define INT_MST BIT(0) 1323 #define WAIT_TIMER_CONTROL 0x0c 1324 #define WT_EN BIT(15) 1325 #define WT_VALUE(_x) ((_x) & 0x7fff) 1326 #define IBML_TIMEOUT 0x10 1327 #define IBML_LOW_MEXT 0x14 1328 #define IBML_LOW_SEXT 0x18 1329 #define TIMER_CLOCK_DIV 0x1c 1330 #define I2C_BUS_MONITOR 0x20 1331 #define BM_SDAC BIT(3) 1332 #define BM_SCLC BIT(2) 1333 #define BM_SDAS BIT(1) 1334 #define BM_SCLS BIT(0) 1335 #define SOFT_RESET 0x24 1336 #define MST_COMMAND 0x28 1337 #define CMD_BUSY (1<<3) 1338 #define CMD_MANUAL (0x00 | CMD_BUSY) 1339 #define CMD_AUTO (0x01 | CMD_BUSY) 1340 #define MST_RX_XFER 0x2c 1341 #define MST_TX_XFER 0x30 1342 #define MST_ADDR_1 0x34 1343 #define MST_ADDR_2 0x38 1344 #define MST_DATA 0x3c 1345 #define MST_TX_FIFO 0x40 1346 #define MST_RX_FIFO 0x44 1347 #define MST_INT_ENABLE 0x48 1348 #define MST_INT_STATUS 0x4c 1349 #define MST_STATUS_RFL (1 << 13) 1350 #define MST_STATUS_TFL (1 << 12) 1351 #define MST_STATUS_SNS (1 << 11) 1352 #define MST_STATUS_SS (1 << 10) 1353 #define MST_STATUS_SCC (1 << 9) 1354 #define MST_STATUS_IP (1 << 8) 1355 #define MST_STATUS_TSS (1 << 7) 1356 #define MST_STATUS_AL (1 << 6) 1357 #define MST_STATUS_ND (1 << 5) 1358 #define MST_STATUS_NA (1 << 4) 1359 #define MST_STATUS_NAK (MST_STATUS_NA | \ 1360 MST_STATUS_ND) 1361 #define MST_STATUS_ERR (MST_STATUS_NAK | \ 1362 MST_STATUS_AL | \ 1363 MST_STATUS_IP | \ 1364 MST_STATUS_TSS) 1365 #define MST_TX_BYTES_XFRD 0x50 1366 #define MST_RX_BYTES_XFRD 0x54 1367 #define SCL_HIGH_PERIOD 0x80 1368 #define SCL_LOW_PERIOD 0x84 1369 #define SPIKE_FLTR_LEN 0x88 1370 #define SDA_SETUP_TIME 0x8c 1371 #define SDA_HOLD_TIME 0x90 1372 /* LDV_COMMENT_END_PREP */ 1373 /* LDV_COMMENT_FUNCTION_CALL Function from field "get_sda" from driver structure with callbacks "axxia_i2c_recovery_info" */ 1374 ldv_handler_precall(); 1375 axxia_i2c_get_sda( var_group1); 1376 1377 1378 1379 1380 } 1381 1382 break; 1383 case 3: { 1384 1385 /** STRUCT: struct type: i2c_algorithm, struct name: axxia_i2c_algo **/ 1386 1387 1388 /* content: static u32 axxia_i2c_func(struct i2c_adapter *adap)*/ 1389 /* LDV_COMMENT_BEGIN_PREP */ 1390 #define SCL_WAIT_TIMEOUT_NS 25000000 1391 #define I2C_XFER_TIMEOUT (msecs_to_jiffies(250)) 1392 #define I2C_STOP_TIMEOUT (msecs_to_jiffies(100)) 1393 #define FIFO_SIZE 8 1394 #define GLOBAL_CONTROL 0x00 1395 #define GLOBAL_MST_EN BIT(0) 1396 #define GLOBAL_SLV_EN BIT(1) 1397 #define GLOBAL_IBML_EN BIT(2) 1398 #define INTERRUPT_STATUS 0x04 1399 #define INTERRUPT_ENABLE 0x08 1400 #define INT_SLV BIT(1) 1401 #define INT_MST BIT(0) 1402 #define WAIT_TIMER_CONTROL 0x0c 1403 #define WT_EN BIT(15) 1404 #define WT_VALUE(_x) ((_x) & 0x7fff) 1405 #define IBML_TIMEOUT 0x10 1406 #define IBML_LOW_MEXT 0x14 1407 #define IBML_LOW_SEXT 0x18 1408 #define TIMER_CLOCK_DIV 0x1c 1409 #define I2C_BUS_MONITOR 0x20 1410 #define BM_SDAC BIT(3) 1411 #define BM_SCLC BIT(2) 1412 #define BM_SDAS BIT(1) 1413 #define BM_SCLS BIT(0) 1414 #define SOFT_RESET 0x24 1415 #define MST_COMMAND 0x28 1416 #define CMD_BUSY (1<<3) 1417 #define CMD_MANUAL (0x00 | CMD_BUSY) 1418 #define CMD_AUTO (0x01 | CMD_BUSY) 1419 #define MST_RX_XFER 0x2c 1420 #define MST_TX_XFER 0x30 1421 #define MST_ADDR_1 0x34 1422 #define MST_ADDR_2 0x38 1423 #define MST_DATA 0x3c 1424 #define MST_TX_FIFO 0x40 1425 #define MST_RX_FIFO 0x44 1426 #define MST_INT_ENABLE 0x48 1427 #define MST_INT_STATUS 0x4c 1428 #define MST_STATUS_RFL (1 << 13) 1429 #define MST_STATUS_TFL (1 << 12) 1430 #define MST_STATUS_SNS (1 << 11) 1431 #define MST_STATUS_SS (1 << 10) 1432 #define MST_STATUS_SCC (1 << 9) 1433 #define MST_STATUS_IP (1 << 8) 1434 #define MST_STATUS_TSS (1 << 7) 1435 #define MST_STATUS_AL (1 << 6) 1436 #define MST_STATUS_ND (1 << 5) 1437 #define MST_STATUS_NA (1 << 4) 1438 #define MST_STATUS_NAK (MST_STATUS_NA | \ 1439 MST_STATUS_ND) 1440 #define MST_STATUS_ERR (MST_STATUS_NAK | \ 1441 MST_STATUS_AL | \ 1442 MST_STATUS_IP | \ 1443 MST_STATUS_TSS) 1444 #define MST_TX_BYTES_XFRD 0x50 1445 #define MST_RX_BYTES_XFRD 0x54 1446 #define SCL_HIGH_PERIOD 0x80 1447 #define SCL_LOW_PERIOD 0x84 1448 #define SPIKE_FLTR_LEN 0x88 1449 #define SDA_SETUP_TIME 0x8c 1450 #define SDA_HOLD_TIME 0x90 1451 /* LDV_COMMENT_END_PREP */ 1452 /* LDV_COMMENT_FUNCTION_CALL Function from field "functionality" from driver structure with callbacks "axxia_i2c_algo" */ 1453 ldv_handler_precall(); 1454 axxia_i2c_func( var_group1); 1455 1456 1457 1458 1459 } 1460 1461 break; 1462 case 4: { 1463 1464 /** STRUCT: struct type: platform_driver, struct name: axxia_i2c_driver **/ 1465 if(ldv_s_axxia_i2c_driver_platform_driver==0) { 1466 1467 /* content: static int axxia_i2c_probe(struct platform_device *pdev)*/ 1468 /* LDV_COMMENT_BEGIN_PREP */ 1469 #define SCL_WAIT_TIMEOUT_NS 25000000 1470 #define I2C_XFER_TIMEOUT (msecs_to_jiffies(250)) 1471 #define I2C_STOP_TIMEOUT (msecs_to_jiffies(100)) 1472 #define FIFO_SIZE 8 1473 #define GLOBAL_CONTROL 0x00 1474 #define GLOBAL_MST_EN BIT(0) 1475 #define GLOBAL_SLV_EN BIT(1) 1476 #define GLOBAL_IBML_EN BIT(2) 1477 #define INTERRUPT_STATUS 0x04 1478 #define INTERRUPT_ENABLE 0x08 1479 #define INT_SLV BIT(1) 1480 #define INT_MST BIT(0) 1481 #define WAIT_TIMER_CONTROL 0x0c 1482 #define WT_EN BIT(15) 1483 #define WT_VALUE(_x) ((_x) & 0x7fff) 1484 #define IBML_TIMEOUT 0x10 1485 #define IBML_LOW_MEXT 0x14 1486 #define IBML_LOW_SEXT 0x18 1487 #define TIMER_CLOCK_DIV 0x1c 1488 #define I2C_BUS_MONITOR 0x20 1489 #define BM_SDAC BIT(3) 1490 #define BM_SCLC BIT(2) 1491 #define BM_SDAS BIT(1) 1492 #define BM_SCLS BIT(0) 1493 #define SOFT_RESET 0x24 1494 #define MST_COMMAND 0x28 1495 #define CMD_BUSY (1<<3) 1496 #define CMD_MANUAL (0x00 | CMD_BUSY) 1497 #define CMD_AUTO (0x01 | CMD_BUSY) 1498 #define MST_RX_XFER 0x2c 1499 #define MST_TX_XFER 0x30 1500 #define MST_ADDR_1 0x34 1501 #define MST_ADDR_2 0x38 1502 #define MST_DATA 0x3c 1503 #define MST_TX_FIFO 0x40 1504 #define MST_RX_FIFO 0x44 1505 #define MST_INT_ENABLE 0x48 1506 #define MST_INT_STATUS 0x4c 1507 #define MST_STATUS_RFL (1 << 13) 1508 #define MST_STATUS_TFL (1 << 12) 1509 #define MST_STATUS_SNS (1 << 11) 1510 #define MST_STATUS_SS (1 << 10) 1511 #define MST_STATUS_SCC (1 << 9) 1512 #define MST_STATUS_IP (1 << 8) 1513 #define MST_STATUS_TSS (1 << 7) 1514 #define MST_STATUS_AL (1 << 6) 1515 #define MST_STATUS_ND (1 << 5) 1516 #define MST_STATUS_NA (1 << 4) 1517 #define MST_STATUS_NAK (MST_STATUS_NA | \ 1518 MST_STATUS_ND) 1519 #define MST_STATUS_ERR (MST_STATUS_NAK | \ 1520 MST_STATUS_AL | \ 1521 MST_STATUS_IP | \ 1522 MST_STATUS_TSS) 1523 #define MST_TX_BYTES_XFRD 0x50 1524 #define MST_RX_BYTES_XFRD 0x54 1525 #define SCL_HIGH_PERIOD 0x80 1526 #define SCL_LOW_PERIOD 0x84 1527 #define SPIKE_FLTR_LEN 0x88 1528 #define SDA_SETUP_TIME 0x8c 1529 #define SDA_HOLD_TIME 0x90 1530 /* LDV_COMMENT_END_PREP */ 1531 /* LDV_COMMENT_FUNCTION_CALL Function from field "probe" from driver structure with callbacks "axxia_i2c_driver". Standart function test for correct return result. */ 1532 res_axxia_i2c_probe_16 = axxia_i2c_probe( var_group2); 1533 ldv_check_return_value(res_axxia_i2c_probe_16); 1534 ldv_check_return_value_probe(res_axxia_i2c_probe_16); 1535 if(res_axxia_i2c_probe_16) 1536 goto ldv_module_exit; 1537 ldv_s_axxia_i2c_driver_platform_driver++; 1538 1539 } 1540 1541 } 1542 1543 break; 1544 case 5: { 1545 1546 /** STRUCT: struct type: platform_driver, struct name: axxia_i2c_driver **/ 1547 if(ldv_s_axxia_i2c_driver_platform_driver==1) { 1548 1549 /* content: static int axxia_i2c_remove(struct platform_device *pdev)*/ 1550 /* LDV_COMMENT_BEGIN_PREP */ 1551 #define SCL_WAIT_TIMEOUT_NS 25000000 1552 #define I2C_XFER_TIMEOUT (msecs_to_jiffies(250)) 1553 #define I2C_STOP_TIMEOUT (msecs_to_jiffies(100)) 1554 #define FIFO_SIZE 8 1555 #define GLOBAL_CONTROL 0x00 1556 #define GLOBAL_MST_EN BIT(0) 1557 #define GLOBAL_SLV_EN BIT(1) 1558 #define GLOBAL_IBML_EN BIT(2) 1559 #define INTERRUPT_STATUS 0x04 1560 #define INTERRUPT_ENABLE 0x08 1561 #define INT_SLV BIT(1) 1562 #define INT_MST BIT(0) 1563 #define WAIT_TIMER_CONTROL 0x0c 1564 #define WT_EN BIT(15) 1565 #define WT_VALUE(_x) ((_x) & 0x7fff) 1566 #define IBML_TIMEOUT 0x10 1567 #define IBML_LOW_MEXT 0x14 1568 #define IBML_LOW_SEXT 0x18 1569 #define TIMER_CLOCK_DIV 0x1c 1570 #define I2C_BUS_MONITOR 0x20 1571 #define BM_SDAC BIT(3) 1572 #define BM_SCLC BIT(2) 1573 #define BM_SDAS BIT(1) 1574 #define BM_SCLS BIT(0) 1575 #define SOFT_RESET 0x24 1576 #define MST_COMMAND 0x28 1577 #define CMD_BUSY (1<<3) 1578 #define CMD_MANUAL (0x00 | CMD_BUSY) 1579 #define CMD_AUTO (0x01 | CMD_BUSY) 1580 #define MST_RX_XFER 0x2c 1581 #define MST_TX_XFER 0x30 1582 #define MST_ADDR_1 0x34 1583 #define MST_ADDR_2 0x38 1584 #define MST_DATA 0x3c 1585 #define MST_TX_FIFO 0x40 1586 #define MST_RX_FIFO 0x44 1587 #define MST_INT_ENABLE 0x48 1588 #define MST_INT_STATUS 0x4c 1589 #define MST_STATUS_RFL (1 << 13) 1590 #define MST_STATUS_TFL (1 << 12) 1591 #define MST_STATUS_SNS (1 << 11) 1592 #define MST_STATUS_SS (1 << 10) 1593 #define MST_STATUS_SCC (1 << 9) 1594 #define MST_STATUS_IP (1 << 8) 1595 #define MST_STATUS_TSS (1 << 7) 1596 #define MST_STATUS_AL (1 << 6) 1597 #define MST_STATUS_ND (1 << 5) 1598 #define MST_STATUS_NA (1 << 4) 1599 #define MST_STATUS_NAK (MST_STATUS_NA | \ 1600 MST_STATUS_ND) 1601 #define MST_STATUS_ERR (MST_STATUS_NAK | \ 1602 MST_STATUS_AL | \ 1603 MST_STATUS_IP | \ 1604 MST_STATUS_TSS) 1605 #define MST_TX_BYTES_XFRD 0x50 1606 #define MST_RX_BYTES_XFRD 0x54 1607 #define SCL_HIGH_PERIOD 0x80 1608 #define SCL_LOW_PERIOD 0x84 1609 #define SPIKE_FLTR_LEN 0x88 1610 #define SDA_SETUP_TIME 0x8c 1611 #define SDA_HOLD_TIME 0x90 1612 /* LDV_COMMENT_END_PREP */ 1613 /* LDV_COMMENT_FUNCTION_CALL Function from field "remove" from driver structure with callbacks "axxia_i2c_driver" */ 1614 ldv_handler_precall(); 1615 axxia_i2c_remove( var_group2); 1616 ldv_s_axxia_i2c_driver_platform_driver=0; 1617 1618 } 1619 1620 } 1621 1622 break; 1623 case 6: { 1624 1625 /** CALLBACK SECTION request_irq **/ 1626 LDV_IN_INTERRUPT=2; 1627 1628 /* content: static irqreturn_t axxia_i2c_isr(int irq, void *_dev)*/ 1629 /* LDV_COMMENT_BEGIN_PREP */ 1630 #define SCL_WAIT_TIMEOUT_NS 25000000 1631 #define I2C_XFER_TIMEOUT (msecs_to_jiffies(250)) 1632 #define I2C_STOP_TIMEOUT (msecs_to_jiffies(100)) 1633 #define FIFO_SIZE 8 1634 #define GLOBAL_CONTROL 0x00 1635 #define GLOBAL_MST_EN BIT(0) 1636 #define GLOBAL_SLV_EN BIT(1) 1637 #define GLOBAL_IBML_EN BIT(2) 1638 #define INTERRUPT_STATUS 0x04 1639 #define INTERRUPT_ENABLE 0x08 1640 #define INT_SLV BIT(1) 1641 #define INT_MST BIT(0) 1642 #define WAIT_TIMER_CONTROL 0x0c 1643 #define WT_EN BIT(15) 1644 #define WT_VALUE(_x) ((_x) & 0x7fff) 1645 #define IBML_TIMEOUT 0x10 1646 #define IBML_LOW_MEXT 0x14 1647 #define IBML_LOW_SEXT 0x18 1648 #define TIMER_CLOCK_DIV 0x1c 1649 #define I2C_BUS_MONITOR 0x20 1650 #define BM_SDAC BIT(3) 1651 #define BM_SCLC BIT(2) 1652 #define BM_SDAS BIT(1) 1653 #define BM_SCLS BIT(0) 1654 #define SOFT_RESET 0x24 1655 #define MST_COMMAND 0x28 1656 #define CMD_BUSY (1<<3) 1657 #define CMD_MANUAL (0x00 | CMD_BUSY) 1658 #define CMD_AUTO (0x01 | CMD_BUSY) 1659 #define MST_RX_XFER 0x2c 1660 #define MST_TX_XFER 0x30 1661 #define MST_ADDR_1 0x34 1662 #define MST_ADDR_2 0x38 1663 #define MST_DATA 0x3c 1664 #define MST_TX_FIFO 0x40 1665 #define MST_RX_FIFO 0x44 1666 #define MST_INT_ENABLE 0x48 1667 #define MST_INT_STATUS 0x4c 1668 #define MST_STATUS_RFL (1 << 13) 1669 #define MST_STATUS_TFL (1 << 12) 1670 #define MST_STATUS_SNS (1 << 11) 1671 #define MST_STATUS_SS (1 << 10) 1672 #define MST_STATUS_SCC (1 << 9) 1673 #define MST_STATUS_IP (1 << 8) 1674 #define MST_STATUS_TSS (1 << 7) 1675 #define MST_STATUS_AL (1 << 6) 1676 #define MST_STATUS_ND (1 << 5) 1677 #define MST_STATUS_NA (1 << 4) 1678 #define MST_STATUS_NAK (MST_STATUS_NA | \ 1679 MST_STATUS_ND) 1680 #define MST_STATUS_ERR (MST_STATUS_NAK | \ 1681 MST_STATUS_AL | \ 1682 MST_STATUS_IP | \ 1683 MST_STATUS_TSS) 1684 #define MST_TX_BYTES_XFRD 0x50 1685 #define MST_RX_BYTES_XFRD 0x54 1686 #define SCL_HIGH_PERIOD 0x80 1687 #define SCL_LOW_PERIOD 0x84 1688 #define SPIKE_FLTR_LEN 0x88 1689 #define SDA_SETUP_TIME 0x8c 1690 #define SDA_HOLD_TIME 0x90 1691 /* LDV_COMMENT_END_PREP */ 1692 /* LDV_COMMENT_FUNCTION_CALL */ 1693 ldv_handler_precall(); 1694 axxia_i2c_isr( var_axxia_i2c_isr_9_p0, var_axxia_i2c_isr_9_p1); 1695 LDV_IN_INTERRUPT=1; 1696 1697 1698 1699 } 1700 1701 break; 1702 default: break; 1703 1704 } 1705 1706 } 1707 1708 ldv_module_exit: 1709 1710 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */ 1711 ldv_final: ldv_check_final_state(); 1712 1713 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */ 1714 return; 1715 1716 } 1717 #endif 1718 1719 /* LDV_COMMENT_END_MAIN */
1 2 #include <linux/kernel.h> 3 bool ldv_is_err(const void *ptr); 4 bool ldv_is_err_or_null(const void *ptr); 5 void* ldv_err_ptr(long error); 6 long ldv_ptr_err(const void *ptr); 7 8 #include <linux/module.h> 9 struct clk; 10 11 extern void ldv_clk_disable_clk(struct clk *clk); 12 extern int ldv_clk_enable_clk(void); 13 extern void ldv_clk_disable_i2c_clk_of_axxia_i2c_dev(struct clk *clk); 14 extern int ldv_clk_enable_i2c_clk_of_axxia_i2c_dev(void); 15 #line 1 "/home/ldvuser/ldv/ref_launches/work/current--X--drivers--X--defaultlinux-4.8-rc1.tar.xz--X--320_7a--X--cpachecker/linux-4.8-rc1.tar.xz/csd_deg_dscv/11495/dscv_tempdir/dscv/ri/320_7a/drivers/i2c/busses/i2c-axxia.c" 16 17 /* 18 * This driver implements I2C master functionality using the LSI API2C 19 * controller. 20 * 21 * NOTE: The controller has a limitation in that it can only do transfers of 22 * maximum 255 bytes at a time. If a larger transfer is attempted, error code 23 * (-EINVAL) is returned. 24 * 25 * This software is licensed under the terms of the GNU General Public 26 * License version 2, as published by the Free Software Foundation, and 27 * may be copied, distributed, and modified under those terms. 28 */ 29 #include <linux/clk.h> 30 #include <linux/clkdev.h> 31 #include <linux/err.h> 32 #include <linux/i2c.h> 33 #include <linux/init.h> 34 #include <linux/interrupt.h> 35 #include <linux/module.h> 36 #include <linux/io.h> 37 #include <linux/kernel.h> 38 #include <linux/platform_device.h> 39 40 #define SCL_WAIT_TIMEOUT_NS 25000000 41 #define I2C_XFER_TIMEOUT (msecs_to_jiffies(250)) 42 #define I2C_STOP_TIMEOUT (msecs_to_jiffies(100)) 43 #define FIFO_SIZE 8 44 45 #define GLOBAL_CONTROL 0x00 46 #define GLOBAL_MST_EN BIT(0) 47 #define GLOBAL_SLV_EN BIT(1) 48 #define GLOBAL_IBML_EN BIT(2) 49 #define INTERRUPT_STATUS 0x04 50 #define INTERRUPT_ENABLE 0x08 51 #define INT_SLV BIT(1) 52 #define INT_MST BIT(0) 53 #define WAIT_TIMER_CONTROL 0x0c 54 #define WT_EN BIT(15) 55 #define WT_VALUE(_x) ((_x) & 0x7fff) 56 #define IBML_TIMEOUT 0x10 57 #define IBML_LOW_MEXT 0x14 58 #define IBML_LOW_SEXT 0x18 59 #define TIMER_CLOCK_DIV 0x1c 60 #define I2C_BUS_MONITOR 0x20 61 #define BM_SDAC BIT(3) 62 #define BM_SCLC BIT(2) 63 #define BM_SDAS BIT(1) 64 #define BM_SCLS BIT(0) 65 #define SOFT_RESET 0x24 66 #define MST_COMMAND 0x28 67 #define CMD_BUSY (1<<3) 68 #define CMD_MANUAL (0x00 | CMD_BUSY) 69 #define CMD_AUTO (0x01 | CMD_BUSY) 70 #define MST_RX_XFER 0x2c 71 #define MST_TX_XFER 0x30 72 #define MST_ADDR_1 0x34 73 #define MST_ADDR_2 0x38 74 #define MST_DATA 0x3c 75 #define MST_TX_FIFO 0x40 76 #define MST_RX_FIFO 0x44 77 #define MST_INT_ENABLE 0x48 78 #define MST_INT_STATUS 0x4c 79 #define MST_STATUS_RFL (1 << 13) /* RX FIFO serivce */ 80 #define MST_STATUS_TFL (1 << 12) /* TX FIFO service */ 81 #define MST_STATUS_SNS (1 << 11) /* Manual mode done */ 82 #define MST_STATUS_SS (1 << 10) /* Automatic mode done */ 83 #define MST_STATUS_SCC (1 << 9) /* Stop complete */ 84 #define MST_STATUS_IP (1 << 8) /* Invalid parameter */ 85 #define MST_STATUS_TSS (1 << 7) /* Timeout */ 86 #define MST_STATUS_AL (1 << 6) /* Arbitration lost */ 87 #define MST_STATUS_ND (1 << 5) /* NAK on data phase */ 88 #define MST_STATUS_NA (1 << 4) /* NAK on address phase */ 89 #define MST_STATUS_NAK (MST_STATUS_NA | \ 90 MST_STATUS_ND) 91 #define MST_STATUS_ERR (MST_STATUS_NAK | \ 92 MST_STATUS_AL | \ 93 MST_STATUS_IP | \ 94 MST_STATUS_TSS) 95 #define MST_TX_BYTES_XFRD 0x50 96 #define MST_RX_BYTES_XFRD 0x54 97 #define SCL_HIGH_PERIOD 0x80 98 #define SCL_LOW_PERIOD 0x84 99 #define SPIKE_FLTR_LEN 0x88 100 #define SDA_SETUP_TIME 0x8c 101 #define SDA_HOLD_TIME 0x90 102 103 /** 104 * axxia_i2c_dev - I2C device context 105 * @base: pointer to register struct 106 * @msg: pointer to current message 107 * @msg_xfrd: number of bytes transferred in msg 108 * @msg_err: error code for completed message 109 * @msg_complete: xfer completion object 110 * @dev: device reference 111 * @adapter: core i2c abstraction 112 * @i2c_clk: clock reference for i2c input clock 113 * @bus_clk_rate: current i2c bus clock rate 114 */ 115 struct axxia_i2c_dev { 116 void __iomem *base; 117 struct i2c_msg *msg; 118 size_t msg_xfrd; 119 int msg_err; 120 struct completion msg_complete; 121 struct device *dev; 122 struct i2c_adapter adapter; 123 struct clk *i2c_clk; 124 u32 bus_clk_rate; 125 }; 126 127 static void i2c_int_disable(struct axxia_i2c_dev *idev, u32 mask) 128 { 129 u32 int_en; 130 131 int_en = readl(idev->base + MST_INT_ENABLE); 132 writel(int_en & ~mask, idev->base + MST_INT_ENABLE); 133 } 134 135 static void i2c_int_enable(struct axxia_i2c_dev *idev, u32 mask) 136 { 137 u32 int_en; 138 139 int_en = readl(idev->base + MST_INT_ENABLE); 140 writel(int_en | mask, idev->base + MST_INT_ENABLE); 141 } 142 143 /** 144 * ns_to_clk - Convert time (ns) to clock cycles for the given clock frequency. 145 */ 146 static u32 ns_to_clk(u64 ns, u32 clk_mhz) 147 { 148 return div_u64(ns * clk_mhz, 1000); 149 } 150 151 static int axxia_i2c_init(struct axxia_i2c_dev *idev) 152 { 153 u32 divisor = clk_get_rate(idev->i2c_clk) / idev->bus_clk_rate; 154 u32 clk_mhz = clk_get_rate(idev->i2c_clk) / 1000000; 155 u32 t_setup; 156 u32 t_high, t_low; 157 u32 tmo_clk; 158 u32 prescale; 159 unsigned long timeout; 160 161 dev_dbg(idev->dev, "rate=%uHz per_clk=%uMHz -> ratio=1:%u\n", 162 idev->bus_clk_rate, clk_mhz, divisor); 163 164 /* Reset controller */ 165 writel(0x01, idev->base + SOFT_RESET); 166 timeout = jiffies + msecs_to_jiffies(100); 167 while (readl(idev->base + SOFT_RESET) & 1) { 168 if (time_after(jiffies, timeout)) { 169 dev_warn(idev->dev, "Soft reset failed\n"); 170 break; 171 } 172 } 173 174 /* Enable Master Mode */ 175 writel(0x1, idev->base + GLOBAL_CONTROL); 176 177 if (idev->bus_clk_rate <= 100000) { 178 /* Standard mode SCL 50/50, tSU:DAT = 250 ns */ 179 t_high = divisor * 1 / 2; 180 t_low = divisor * 1 / 2; 181 t_setup = ns_to_clk(250, clk_mhz); 182 } else { 183 /* Fast mode SCL 33/66, tSU:DAT = 100 ns */ 184 t_high = divisor * 1 / 3; 185 t_low = divisor * 2 / 3; 186 t_setup = ns_to_clk(100, clk_mhz); 187 } 188 189 /* SCL High Time */ 190 writel(t_high, idev->base + SCL_HIGH_PERIOD); 191 /* SCL Low Time */ 192 writel(t_low, idev->base + SCL_LOW_PERIOD); 193 /* SDA Setup Time */ 194 writel(t_setup, idev->base + SDA_SETUP_TIME); 195 /* SDA Hold Time, 300ns */ 196 writel(ns_to_clk(300, clk_mhz), idev->base + SDA_HOLD_TIME); 197 /* Filter <50ns spikes */ 198 writel(ns_to_clk(50, clk_mhz), idev->base + SPIKE_FLTR_LEN); 199 200 /* Configure Time-Out Registers */ 201 tmo_clk = ns_to_clk(SCL_WAIT_TIMEOUT_NS, clk_mhz); 202 203 /* Find prescaler value that makes tmo_clk fit in 15-bits counter. */ 204 for (prescale = 0; prescale < 15; ++prescale) { 205 if (tmo_clk <= 0x7fff) 206 break; 207 tmo_clk >>= 1; 208 } 209 if (tmo_clk > 0x7fff) 210 tmo_clk = 0x7fff; 211 212 /* Prescale divider (log2) */ 213 writel(prescale, idev->base + TIMER_CLOCK_DIV); 214 /* Timeout in divided clocks */ 215 writel(WT_EN | WT_VALUE(tmo_clk), idev->base + WAIT_TIMER_CONTROL); 216 217 /* Mask all master interrupt bits */ 218 i2c_int_disable(idev, ~0); 219 220 /* Interrupt enable */ 221 writel(0x01, idev->base + INTERRUPT_ENABLE); 222 223 return 0; 224 } 225 226 static int i2c_m_rd(const struct i2c_msg *msg) 227 { 228 return (msg->flags & I2C_M_RD) != 0; 229 } 230 231 static int i2c_m_ten(const struct i2c_msg *msg) 232 { 233 return (msg->flags & I2C_M_TEN) != 0; 234 } 235 236 static int i2c_m_recv_len(const struct i2c_msg *msg) 237 { 238 return (msg->flags & I2C_M_RECV_LEN) != 0; 239 } 240 241 /** 242 * axxia_i2c_empty_rx_fifo - Fetch data from RX FIFO and update SMBus block 243 * transfer length if this is the first byte of such a transfer. 244 */ 245 static int axxia_i2c_empty_rx_fifo(struct axxia_i2c_dev *idev) 246 { 247 struct i2c_msg *msg = idev->msg; 248 size_t rx_fifo_avail = readl(idev->base + MST_RX_FIFO); 249 int bytes_to_transfer = min(rx_fifo_avail, msg->len - idev->msg_xfrd); 250 251 while (bytes_to_transfer-- > 0) { 252 int c = readl(idev->base + MST_DATA); 253 254 if (idev->msg_xfrd == 0 && i2c_m_recv_len(msg)) { 255 /* 256 * Check length byte for SMBus block read 257 */ 258 if (c <= 0 || c > I2C_SMBUS_BLOCK_MAX) { 259 idev->msg_err = -EPROTO; 260 i2c_int_disable(idev, ~0); 261 complete(&idev->msg_complete); 262 break; 263 } 264 msg->len = 1 + c; 265 writel(msg->len, idev->base + MST_RX_XFER); 266 } 267 msg->buf[idev->msg_xfrd++] = c; 268 } 269 270 return 0; 271 } 272 273 /** 274 * axxia_i2c_fill_tx_fifo - Fill TX FIFO from current message buffer. 275 * @return: Number of bytes left to transfer. 276 */ 277 static int axxia_i2c_fill_tx_fifo(struct axxia_i2c_dev *idev) 278 { 279 struct i2c_msg *msg = idev->msg; 280 size_t tx_fifo_avail = FIFO_SIZE - readl(idev->base + MST_TX_FIFO); 281 int bytes_to_transfer = min(tx_fifo_avail, msg->len - idev->msg_xfrd); 282 int ret = msg->len - idev->msg_xfrd - bytes_to_transfer; 283 284 while (bytes_to_transfer-- > 0) 285 writel(msg->buf[idev->msg_xfrd++], idev->base + MST_DATA); 286 287 return ret; 288 } 289 290 static irqreturn_t axxia_i2c_isr(int irq, void *_dev) 291 { 292 struct axxia_i2c_dev *idev = _dev; 293 u32 status; 294 295 if (!(readl(idev->base + INTERRUPT_STATUS) & INT_MST)) 296 return IRQ_NONE; 297 298 /* Read interrupt status bits */ 299 status = readl(idev->base + MST_INT_STATUS); 300 301 if (!idev->msg) { 302 dev_warn(idev->dev, "unexpected interrupt\n"); 303 goto out; 304 } 305 306 /* RX FIFO needs service? */ 307 if (i2c_m_rd(idev->msg) && (status & MST_STATUS_RFL)) 308 axxia_i2c_empty_rx_fifo(idev); 309 310 /* TX FIFO needs service? */ 311 if (!i2c_m_rd(idev->msg) && (status & MST_STATUS_TFL)) { 312 if (axxia_i2c_fill_tx_fifo(idev) == 0) 313 i2c_int_disable(idev, MST_STATUS_TFL); 314 } 315 316 if (status & MST_STATUS_SCC) { 317 /* Stop completed */ 318 i2c_int_disable(idev, ~0); 319 complete(&idev->msg_complete); 320 } else if (status & MST_STATUS_SNS) { 321 /* Transfer done */ 322 i2c_int_disable(idev, ~0); 323 if (i2c_m_rd(idev->msg) && idev->msg_xfrd < idev->msg->len) 324 axxia_i2c_empty_rx_fifo(idev); 325 complete(&idev->msg_complete); 326 } else if (unlikely(status & MST_STATUS_ERR)) { 327 /* Transfer error */ 328 i2c_int_disable(idev, ~0); 329 if (status & MST_STATUS_AL) 330 idev->msg_err = -EAGAIN; 331 else if (status & MST_STATUS_NAK) 332 idev->msg_err = -ENXIO; 333 else 334 idev->msg_err = -EIO; 335 dev_dbg(idev->dev, "error %#x, addr=%#x rx=%u/%u tx=%u/%u\n", 336 status, 337 idev->msg->addr, 338 readl(idev->base + MST_RX_BYTES_XFRD), 339 readl(idev->base + MST_RX_XFER), 340 readl(idev->base + MST_TX_BYTES_XFRD), 341 readl(idev->base + MST_TX_XFER)); 342 complete(&idev->msg_complete); 343 } 344 345 out: 346 /* Clear interrupt */ 347 writel(INT_MST, idev->base + INTERRUPT_STATUS); 348 349 return IRQ_HANDLED; 350 } 351 352 static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg) 353 { 354 u32 int_mask = MST_STATUS_ERR | MST_STATUS_SNS; 355 u32 rx_xfer, tx_xfer; 356 u32 addr_1, addr_2; 357 unsigned long time_left; 358 359 idev->msg = msg; 360 idev->msg_xfrd = 0; 361 idev->msg_err = 0; 362 reinit_completion(&idev->msg_complete); 363 364 if (i2c_m_ten(msg)) { 365 /* 10-bit address 366 * addr_1: 5'b11110 | addr[9:8] | (R/nW) 367 * addr_2: addr[7:0] 368 */ 369 addr_1 = 0xF0 | ((msg->addr >> 7) & 0x06); 370 addr_2 = msg->addr & 0xFF; 371 } else { 372 /* 7-bit address 373 * addr_1: addr[6:0] | (R/nW) 374 * addr_2: dont care 375 */ 376 addr_1 = (msg->addr << 1) & 0xFF; 377 addr_2 = 0; 378 } 379 380 if (i2c_m_rd(msg)) { 381 /* I2C read transfer */ 382 rx_xfer = i2c_m_recv_len(msg) ? I2C_SMBUS_BLOCK_MAX : msg->len; 383 tx_xfer = 0; 384 addr_1 |= 1; /* Set the R/nW bit of the address */ 385 } else { 386 /* I2C write transfer */ 387 rx_xfer = 0; 388 tx_xfer = msg->len; 389 } 390 391 writel(rx_xfer, idev->base + MST_RX_XFER); 392 writel(tx_xfer, idev->base + MST_TX_XFER); 393 writel(addr_1, idev->base + MST_ADDR_1); 394 writel(addr_2, idev->base + MST_ADDR_2); 395 396 if (i2c_m_rd(msg)) 397 int_mask |= MST_STATUS_RFL; 398 else if (axxia_i2c_fill_tx_fifo(idev) != 0) 399 int_mask |= MST_STATUS_TFL; 400 401 /* Start manual mode */ 402 writel(CMD_MANUAL, idev->base + MST_COMMAND); 403 404 i2c_int_enable(idev, int_mask); 405 406 time_left = wait_for_completion_timeout(&idev->msg_complete, 407 I2C_XFER_TIMEOUT); 408 409 i2c_int_disable(idev, int_mask); 410 411 if (readl(idev->base + MST_COMMAND) & CMD_BUSY) 412 dev_warn(idev->dev, "busy after xfer\n"); 413 414 if (time_left == 0) 415 idev->msg_err = -ETIMEDOUT; 416 417 if (idev->msg_err == -ETIMEDOUT) 418 i2c_recover_bus(&idev->adapter); 419 420 if (unlikely(idev->msg_err) && idev->msg_err != -ENXIO) 421 axxia_i2c_init(idev); 422 423 return idev->msg_err; 424 } 425 426 static int axxia_i2c_stop(struct axxia_i2c_dev *idev) 427 { 428 u32 int_mask = MST_STATUS_ERR | MST_STATUS_SCC; 429 unsigned long time_left; 430 431 reinit_completion(&idev->msg_complete); 432 433 /* Issue stop */ 434 writel(0xb, idev->base + MST_COMMAND); 435 i2c_int_enable(idev, int_mask); 436 time_left = wait_for_completion_timeout(&idev->msg_complete, 437 I2C_STOP_TIMEOUT); 438 i2c_int_disable(idev, int_mask); 439 if (time_left == 0) 440 return -ETIMEDOUT; 441 442 if (readl(idev->base + MST_COMMAND) & CMD_BUSY) 443 dev_warn(idev->dev, "busy after stop\n"); 444 445 return 0; 446 } 447 448 static int 449 axxia_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) 450 { 451 struct axxia_i2c_dev *idev = i2c_get_adapdata(adap); 452 int i; 453 int ret = 0; 454 455 for (i = 0; ret == 0 && i < num; ++i) 456 ret = axxia_i2c_xfer_msg(idev, &msgs[i]); 457 458 axxia_i2c_stop(idev); 459 460 return ret ? : i; 461 } 462 463 static int axxia_i2c_get_scl(struct i2c_adapter *adap) 464 { 465 struct axxia_i2c_dev *idev = i2c_get_adapdata(adap); 466 467 return !!(readl(idev->base + I2C_BUS_MONITOR) & BM_SCLS); 468 } 469 470 static void axxia_i2c_set_scl(struct i2c_adapter *adap, int val) 471 { 472 struct axxia_i2c_dev *idev = i2c_get_adapdata(adap); 473 u32 tmp; 474 475 /* Preserve SDA Control */ 476 tmp = readl(idev->base + I2C_BUS_MONITOR) & BM_SDAC; 477 if (!val) 478 tmp |= BM_SCLC; 479 writel(tmp, idev->base + I2C_BUS_MONITOR); 480 } 481 482 static int axxia_i2c_get_sda(struct i2c_adapter *adap) 483 { 484 struct axxia_i2c_dev *idev = i2c_get_adapdata(adap); 485 486 return !!(readl(idev->base + I2C_BUS_MONITOR) & BM_SDAS); 487 } 488 489 static struct i2c_bus_recovery_info axxia_i2c_recovery_info = { 490 .recover_bus = i2c_generic_scl_recovery, 491 .get_scl = axxia_i2c_get_scl, 492 .set_scl = axxia_i2c_set_scl, 493 .get_sda = axxia_i2c_get_sda, 494 }; 495 496 static u32 axxia_i2c_func(struct i2c_adapter *adap) 497 { 498 u32 caps = (I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR | 499 I2C_FUNC_SMBUS_EMUL | I2C_FUNC_SMBUS_BLOCK_DATA); 500 return caps; 501 } 502 503 static const struct i2c_algorithm axxia_i2c_algo = { 504 .master_xfer = axxia_i2c_xfer, 505 .functionality = axxia_i2c_func, 506 }; 507 508 static struct i2c_adapter_quirks axxia_i2c_quirks = { 509 .max_read_len = 255, 510 .max_write_len = 255, 511 }; 512 513 static int axxia_i2c_probe(struct platform_device *pdev) 514 { 515 struct device_node *np = pdev->dev.of_node; 516 struct axxia_i2c_dev *idev = NULL; 517 struct resource *res; 518 void __iomem *base; 519 int irq; 520 int ret = 0; 521 522 idev = devm_kzalloc(&pdev->dev, sizeof(*idev), GFP_KERNEL); 523 if (!idev) 524 return -ENOMEM; 525 526 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 527 base = devm_ioremap_resource(&pdev->dev, res); 528 if (IS_ERR(base)) 529 return PTR_ERR(base); 530 531 irq = platform_get_irq(pdev, 0); 532 if (irq < 0) { 533 dev_err(&pdev->dev, "missing interrupt resource\n"); 534 return irq; 535 } 536 537 idev->i2c_clk = devm_clk_get(&pdev->dev, "i2c"); 538 if (IS_ERR(idev->i2c_clk)) { 539 dev_err(&pdev->dev, "missing clock\n"); 540 return PTR_ERR(idev->i2c_clk); 541 } 542 543 idev->base = base; 544 idev->dev = &pdev->dev; 545 init_completion(&idev->msg_complete); 546 547 of_property_read_u32(np, "clock-frequency", &idev->bus_clk_rate); 548 if (idev->bus_clk_rate == 0) 549 idev->bus_clk_rate = 100000; /* default clock rate */ 550 551 ret = axxia_i2c_init(idev); 552 if (ret) { 553 dev_err(&pdev->dev, "failed to initialize\n"); 554 return ret; 555 } 556 557 ret = devm_request_irq(&pdev->dev, irq, axxia_i2c_isr, 0, 558 pdev->name, idev); 559 if (ret) { 560 dev_err(&pdev->dev, "failed to claim IRQ%d\n", irq); 561 return ret; 562 } 563 564 clk_prepare_enable(idev->i2c_clk); 565 566 i2c_set_adapdata(&idev->adapter, idev); 567 strlcpy(idev->adapter.name, pdev->name, sizeof(idev->adapter.name)); 568 idev->adapter.owner = THIS_MODULE; 569 idev->adapter.algo = &axxia_i2c_algo; 570 idev->adapter.bus_recovery_info = &axxia_i2c_recovery_info; 571 idev->adapter.quirks = &axxia_i2c_quirks; 572 idev->adapter.dev.parent = &pdev->dev; 573 idev->adapter.dev.of_node = pdev->dev.of_node; 574 575 platform_set_drvdata(pdev, idev); 576 577 ret = i2c_add_adapter(&idev->adapter); 578 if (ret) { 579 dev_err(&pdev->dev, "failed to add adapter\n"); 580 return ret; 581 } 582 583 return 0; 584 } 585 586 static int axxia_i2c_remove(struct platform_device *pdev) 587 { 588 struct axxia_i2c_dev *idev = platform_get_drvdata(pdev); 589 590 clk_disable_unprepare(idev->i2c_clk); 591 i2c_del_adapter(&idev->adapter); 592 593 return 0; 594 } 595 596 /* Match table for of_platform binding */ 597 static const struct of_device_id axxia_i2c_of_match[] = { 598 { .compatible = "lsi,api2c", }, 599 {}, 600 }; 601 602 MODULE_DEVICE_TABLE(of, axxia_i2c_of_match); 603 604 static struct platform_driver axxia_i2c_driver = { 605 .probe = axxia_i2c_probe, 606 .remove = axxia_i2c_remove, 607 .driver = { 608 .name = "axxia-i2c", 609 .of_match_table = axxia_i2c_of_match, 610 }, 611 }; 612 613 module_platform_driver(axxia_i2c_driver); 614 615 MODULE_DESCRIPTION("Axxia I2C Bus driver"); 616 MODULE_AUTHOR("Anders Berg <anders.berg@lsi.com>"); 617 MODULE_LICENSE("GPL v2"); 618 619 620 621 622 623 /* LDV_COMMENT_BEGIN_MAIN */ 624 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful 625 626 /*###########################################################################*/ 627 628 /*############## Driver Environment Generator 0.2 output ####################*/ 629 630 /*###########################################################################*/ 631 632 633 634 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */ 635 void ldv_check_final_state(void); 636 637 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */ 638 void ldv_check_return_value(int res); 639 640 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */ 641 void ldv_check_return_value_probe(int res); 642 643 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */ 644 void ldv_initialize(void); 645 646 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */ 647 void ldv_handler_precall(void); 648 649 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */ 650 int nondet_int(void); 651 652 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */ 653 int LDV_IN_INTERRUPT; 654 655 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */ 656 void ldv_main0_sequence_infinite_withcheck_stateful(void) { 657 658 659 660 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */ 661 /*============================= VARIABLE DECLARATION PART =============================*/ 662 /** STRUCT: struct type: i2c_bus_recovery_info, struct name: axxia_i2c_recovery_info **/ 663 /* content: static int axxia_i2c_get_scl(struct i2c_adapter *adap)*/ 664 /* LDV_COMMENT_BEGIN_PREP */ 665 #define SCL_WAIT_TIMEOUT_NS 25000000 666 #define I2C_XFER_TIMEOUT (msecs_to_jiffies(250)) 667 #define I2C_STOP_TIMEOUT (msecs_to_jiffies(100)) 668 #define FIFO_SIZE 8 669 #define GLOBAL_CONTROL 0x00 670 #define GLOBAL_MST_EN BIT(0) 671 #define GLOBAL_SLV_EN BIT(1) 672 #define GLOBAL_IBML_EN BIT(2) 673 #define INTERRUPT_STATUS 0x04 674 #define INTERRUPT_ENABLE 0x08 675 #define INT_SLV BIT(1) 676 #define INT_MST BIT(0) 677 #define WAIT_TIMER_CONTROL 0x0c 678 #define WT_EN BIT(15) 679 #define WT_VALUE(_x) ((_x) & 0x7fff) 680 #define IBML_TIMEOUT 0x10 681 #define IBML_LOW_MEXT 0x14 682 #define IBML_LOW_SEXT 0x18 683 #define TIMER_CLOCK_DIV 0x1c 684 #define I2C_BUS_MONITOR 0x20 685 #define BM_SDAC BIT(3) 686 #define BM_SCLC BIT(2) 687 #define BM_SDAS BIT(1) 688 #define BM_SCLS BIT(0) 689 #define SOFT_RESET 0x24 690 #define MST_COMMAND 0x28 691 #define CMD_BUSY (1<<3) 692 #define CMD_MANUAL (0x00 | CMD_BUSY) 693 #define CMD_AUTO (0x01 | CMD_BUSY) 694 #define MST_RX_XFER 0x2c 695 #define MST_TX_XFER 0x30 696 #define MST_ADDR_1 0x34 697 #define MST_ADDR_2 0x38 698 #define MST_DATA 0x3c 699 #define MST_TX_FIFO 0x40 700 #define MST_RX_FIFO 0x44 701 #define MST_INT_ENABLE 0x48 702 #define MST_INT_STATUS 0x4c 703 #define MST_STATUS_RFL (1 << 13) 704 #define MST_STATUS_TFL (1 << 12) 705 #define MST_STATUS_SNS (1 << 11) 706 #define MST_STATUS_SS (1 << 10) 707 #define MST_STATUS_SCC (1 << 9) 708 #define MST_STATUS_IP (1 << 8) 709 #define MST_STATUS_TSS (1 << 7) 710 #define MST_STATUS_AL (1 << 6) 711 #define MST_STATUS_ND (1 << 5) 712 #define MST_STATUS_NA (1 << 4) 713 #define MST_STATUS_NAK (MST_STATUS_NA | \ 714 MST_STATUS_ND) 715 #define MST_STATUS_ERR (MST_STATUS_NAK | \ 716 MST_STATUS_AL | \ 717 MST_STATUS_IP | \ 718 MST_STATUS_TSS) 719 #define MST_TX_BYTES_XFRD 0x50 720 #define MST_RX_BYTES_XFRD 0x54 721 #define SCL_HIGH_PERIOD 0x80 722 #define SCL_LOW_PERIOD 0x84 723 #define SPIKE_FLTR_LEN 0x88 724 #define SDA_SETUP_TIME 0x8c 725 #define SDA_HOLD_TIME 0x90 726 /* LDV_COMMENT_END_PREP */ 727 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "axxia_i2c_get_scl" */ 728 struct i2c_adapter * var_group1; 729 /* content: static void axxia_i2c_set_scl(struct i2c_adapter *adap, int val)*/ 730 /* LDV_COMMENT_BEGIN_PREP */ 731 #define SCL_WAIT_TIMEOUT_NS 25000000 732 #define I2C_XFER_TIMEOUT (msecs_to_jiffies(250)) 733 #define I2C_STOP_TIMEOUT (msecs_to_jiffies(100)) 734 #define FIFO_SIZE 8 735 #define GLOBAL_CONTROL 0x00 736 #define GLOBAL_MST_EN BIT(0) 737 #define GLOBAL_SLV_EN BIT(1) 738 #define GLOBAL_IBML_EN BIT(2) 739 #define INTERRUPT_STATUS 0x04 740 #define INTERRUPT_ENABLE 0x08 741 #define INT_SLV BIT(1) 742 #define INT_MST BIT(0) 743 #define WAIT_TIMER_CONTROL 0x0c 744 #define WT_EN BIT(15) 745 #define WT_VALUE(_x) ((_x) & 0x7fff) 746 #define IBML_TIMEOUT 0x10 747 #define IBML_LOW_MEXT 0x14 748 #define IBML_LOW_SEXT 0x18 749 #define TIMER_CLOCK_DIV 0x1c 750 #define I2C_BUS_MONITOR 0x20 751 #define BM_SDAC BIT(3) 752 #define BM_SCLC BIT(2) 753 #define BM_SDAS BIT(1) 754 #define BM_SCLS BIT(0) 755 #define SOFT_RESET 0x24 756 #define MST_COMMAND 0x28 757 #define CMD_BUSY (1<<3) 758 #define CMD_MANUAL (0x00 | CMD_BUSY) 759 #define CMD_AUTO (0x01 | CMD_BUSY) 760 #define MST_RX_XFER 0x2c 761 #define MST_TX_XFER 0x30 762 #define MST_ADDR_1 0x34 763 #define MST_ADDR_2 0x38 764 #define MST_DATA 0x3c 765 #define MST_TX_FIFO 0x40 766 #define MST_RX_FIFO 0x44 767 #define MST_INT_ENABLE 0x48 768 #define MST_INT_STATUS 0x4c 769 #define MST_STATUS_RFL (1 << 13) 770 #define MST_STATUS_TFL (1 << 12) 771 #define MST_STATUS_SNS (1 << 11) 772 #define MST_STATUS_SS (1 << 10) 773 #define MST_STATUS_SCC (1 << 9) 774 #define MST_STATUS_IP (1 << 8) 775 #define MST_STATUS_TSS (1 << 7) 776 #define MST_STATUS_AL (1 << 6) 777 #define MST_STATUS_ND (1 << 5) 778 #define MST_STATUS_NA (1 << 4) 779 #define MST_STATUS_NAK (MST_STATUS_NA | \ 780 MST_STATUS_ND) 781 #define MST_STATUS_ERR (MST_STATUS_NAK | \ 782 MST_STATUS_AL | \ 783 MST_STATUS_IP | \ 784 MST_STATUS_TSS) 785 #define MST_TX_BYTES_XFRD 0x50 786 #define MST_RX_BYTES_XFRD 0x54 787 #define SCL_HIGH_PERIOD 0x80 788 #define SCL_LOW_PERIOD 0x84 789 #define SPIKE_FLTR_LEN 0x88 790 #define SDA_SETUP_TIME 0x8c 791 #define SDA_HOLD_TIME 0x90 792 /* LDV_COMMENT_END_PREP */ 793 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "axxia_i2c_set_scl" */ 794 int var_axxia_i2c_set_scl_13_p1; 795 /* content: static int axxia_i2c_get_sda(struct i2c_adapter *adap)*/ 796 /* LDV_COMMENT_BEGIN_PREP */ 797 #define SCL_WAIT_TIMEOUT_NS 25000000 798 #define I2C_XFER_TIMEOUT (msecs_to_jiffies(250)) 799 #define I2C_STOP_TIMEOUT (msecs_to_jiffies(100)) 800 #define FIFO_SIZE 8 801 #define GLOBAL_CONTROL 0x00 802 #define GLOBAL_MST_EN BIT(0) 803 #define GLOBAL_SLV_EN BIT(1) 804 #define GLOBAL_IBML_EN BIT(2) 805 #define INTERRUPT_STATUS 0x04 806 #define INTERRUPT_ENABLE 0x08 807 #define INT_SLV BIT(1) 808 #define INT_MST BIT(0) 809 #define WAIT_TIMER_CONTROL 0x0c 810 #define WT_EN BIT(15) 811 #define WT_VALUE(_x) ((_x) & 0x7fff) 812 #define IBML_TIMEOUT 0x10 813 #define IBML_LOW_MEXT 0x14 814 #define IBML_LOW_SEXT 0x18 815 #define TIMER_CLOCK_DIV 0x1c 816 #define I2C_BUS_MONITOR 0x20 817 #define BM_SDAC BIT(3) 818 #define BM_SCLC BIT(2) 819 #define BM_SDAS BIT(1) 820 #define BM_SCLS BIT(0) 821 #define SOFT_RESET 0x24 822 #define MST_COMMAND 0x28 823 #define CMD_BUSY (1<<3) 824 #define CMD_MANUAL (0x00 | CMD_BUSY) 825 #define CMD_AUTO (0x01 | CMD_BUSY) 826 #define MST_RX_XFER 0x2c 827 #define MST_TX_XFER 0x30 828 #define MST_ADDR_1 0x34 829 #define MST_ADDR_2 0x38 830 #define MST_DATA 0x3c 831 #define MST_TX_FIFO 0x40 832 #define MST_RX_FIFO 0x44 833 #define MST_INT_ENABLE 0x48 834 #define MST_INT_STATUS 0x4c 835 #define MST_STATUS_RFL (1 << 13) 836 #define MST_STATUS_TFL (1 << 12) 837 #define MST_STATUS_SNS (1 << 11) 838 #define MST_STATUS_SS (1 << 10) 839 #define MST_STATUS_SCC (1 << 9) 840 #define MST_STATUS_IP (1 << 8) 841 #define MST_STATUS_TSS (1 << 7) 842 #define MST_STATUS_AL (1 << 6) 843 #define MST_STATUS_ND (1 << 5) 844 #define MST_STATUS_NA (1 << 4) 845 #define MST_STATUS_NAK (MST_STATUS_NA | \ 846 MST_STATUS_ND) 847 #define MST_STATUS_ERR (MST_STATUS_NAK | \ 848 MST_STATUS_AL | \ 849 MST_STATUS_IP | \ 850 MST_STATUS_TSS) 851 #define MST_TX_BYTES_XFRD 0x50 852 #define MST_RX_BYTES_XFRD 0x54 853 #define SCL_HIGH_PERIOD 0x80 854 #define SCL_LOW_PERIOD 0x84 855 #define SPIKE_FLTR_LEN 0x88 856 #define SDA_SETUP_TIME 0x8c 857 #define SDA_HOLD_TIME 0x90 858 /* LDV_COMMENT_END_PREP */ 859 860 /** STRUCT: struct type: i2c_algorithm, struct name: axxia_i2c_algo **/ 861 /* content: static u32 axxia_i2c_func(struct i2c_adapter *adap)*/ 862 /* LDV_COMMENT_BEGIN_PREP */ 863 #define SCL_WAIT_TIMEOUT_NS 25000000 864 #define I2C_XFER_TIMEOUT (msecs_to_jiffies(250)) 865 #define I2C_STOP_TIMEOUT (msecs_to_jiffies(100)) 866 #define FIFO_SIZE 8 867 #define GLOBAL_CONTROL 0x00 868 #define GLOBAL_MST_EN BIT(0) 869 #define GLOBAL_SLV_EN BIT(1) 870 #define GLOBAL_IBML_EN BIT(2) 871 #define INTERRUPT_STATUS 0x04 872 #define INTERRUPT_ENABLE 0x08 873 #define INT_SLV BIT(1) 874 #define INT_MST BIT(0) 875 #define WAIT_TIMER_CONTROL 0x0c 876 #define WT_EN BIT(15) 877 #define WT_VALUE(_x) ((_x) & 0x7fff) 878 #define IBML_TIMEOUT 0x10 879 #define IBML_LOW_MEXT 0x14 880 #define IBML_LOW_SEXT 0x18 881 #define TIMER_CLOCK_DIV 0x1c 882 #define I2C_BUS_MONITOR 0x20 883 #define BM_SDAC BIT(3) 884 #define BM_SCLC BIT(2) 885 #define BM_SDAS BIT(1) 886 #define BM_SCLS BIT(0) 887 #define SOFT_RESET 0x24 888 #define MST_COMMAND 0x28 889 #define CMD_BUSY (1<<3) 890 #define CMD_MANUAL (0x00 | CMD_BUSY) 891 #define CMD_AUTO (0x01 | CMD_BUSY) 892 #define MST_RX_XFER 0x2c 893 #define MST_TX_XFER 0x30 894 #define MST_ADDR_1 0x34 895 #define MST_ADDR_2 0x38 896 #define MST_DATA 0x3c 897 #define MST_TX_FIFO 0x40 898 #define MST_RX_FIFO 0x44 899 #define MST_INT_ENABLE 0x48 900 #define MST_INT_STATUS 0x4c 901 #define MST_STATUS_RFL (1 << 13) 902 #define MST_STATUS_TFL (1 << 12) 903 #define MST_STATUS_SNS (1 << 11) 904 #define MST_STATUS_SS (1 << 10) 905 #define MST_STATUS_SCC (1 << 9) 906 #define MST_STATUS_IP (1 << 8) 907 #define MST_STATUS_TSS (1 << 7) 908 #define MST_STATUS_AL (1 << 6) 909 #define MST_STATUS_ND (1 << 5) 910 #define MST_STATUS_NA (1 << 4) 911 #define MST_STATUS_NAK (MST_STATUS_NA | \ 912 MST_STATUS_ND) 913 #define MST_STATUS_ERR (MST_STATUS_NAK | \ 914 MST_STATUS_AL | \ 915 MST_STATUS_IP | \ 916 MST_STATUS_TSS) 917 #define MST_TX_BYTES_XFRD 0x50 918 #define MST_RX_BYTES_XFRD 0x54 919 #define SCL_HIGH_PERIOD 0x80 920 #define SCL_LOW_PERIOD 0x84 921 #define SPIKE_FLTR_LEN 0x88 922 #define SDA_SETUP_TIME 0x8c 923 #define SDA_HOLD_TIME 0x90 924 /* LDV_COMMENT_END_PREP */ 925 926 /** STRUCT: struct type: platform_driver, struct name: axxia_i2c_driver **/ 927 /* content: static int axxia_i2c_probe(struct platform_device *pdev)*/ 928 /* LDV_COMMENT_BEGIN_PREP */ 929 #define SCL_WAIT_TIMEOUT_NS 25000000 930 #define I2C_XFER_TIMEOUT (msecs_to_jiffies(250)) 931 #define I2C_STOP_TIMEOUT (msecs_to_jiffies(100)) 932 #define FIFO_SIZE 8 933 #define GLOBAL_CONTROL 0x00 934 #define GLOBAL_MST_EN BIT(0) 935 #define GLOBAL_SLV_EN BIT(1) 936 #define GLOBAL_IBML_EN BIT(2) 937 #define INTERRUPT_STATUS 0x04 938 #define INTERRUPT_ENABLE 0x08 939 #define INT_SLV BIT(1) 940 #define INT_MST BIT(0) 941 #define WAIT_TIMER_CONTROL 0x0c 942 #define WT_EN BIT(15) 943 #define WT_VALUE(_x) ((_x) & 0x7fff) 944 #define IBML_TIMEOUT 0x10 945 #define IBML_LOW_MEXT 0x14 946 #define IBML_LOW_SEXT 0x18 947 #define TIMER_CLOCK_DIV 0x1c 948 #define I2C_BUS_MONITOR 0x20 949 #define BM_SDAC BIT(3) 950 #define BM_SCLC BIT(2) 951 #define BM_SDAS BIT(1) 952 #define BM_SCLS BIT(0) 953 #define SOFT_RESET 0x24 954 #define MST_COMMAND 0x28 955 #define CMD_BUSY (1<<3) 956 #define CMD_MANUAL (0x00 | CMD_BUSY) 957 #define CMD_AUTO (0x01 | CMD_BUSY) 958 #define MST_RX_XFER 0x2c 959 #define MST_TX_XFER 0x30 960 #define MST_ADDR_1 0x34 961 #define MST_ADDR_2 0x38 962 #define MST_DATA 0x3c 963 #define MST_TX_FIFO 0x40 964 #define MST_RX_FIFO 0x44 965 #define MST_INT_ENABLE 0x48 966 #define MST_INT_STATUS 0x4c 967 #define MST_STATUS_RFL (1 << 13) 968 #define MST_STATUS_TFL (1 << 12) 969 #define MST_STATUS_SNS (1 << 11) 970 #define MST_STATUS_SS (1 << 10) 971 #define MST_STATUS_SCC (1 << 9) 972 #define MST_STATUS_IP (1 << 8) 973 #define MST_STATUS_TSS (1 << 7) 974 #define MST_STATUS_AL (1 << 6) 975 #define MST_STATUS_ND (1 << 5) 976 #define MST_STATUS_NA (1 << 4) 977 #define MST_STATUS_NAK (MST_STATUS_NA | \ 978 MST_STATUS_ND) 979 #define MST_STATUS_ERR (MST_STATUS_NAK | \ 980 MST_STATUS_AL | \ 981 MST_STATUS_IP | \ 982 MST_STATUS_TSS) 983 #define MST_TX_BYTES_XFRD 0x50 984 #define MST_RX_BYTES_XFRD 0x54 985 #define SCL_HIGH_PERIOD 0x80 986 #define SCL_LOW_PERIOD 0x84 987 #define SPIKE_FLTR_LEN 0x88 988 #define SDA_SETUP_TIME 0x8c 989 #define SDA_HOLD_TIME 0x90 990 /* LDV_COMMENT_END_PREP */ 991 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "axxia_i2c_probe" */ 992 struct platform_device * var_group2; 993 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "axxia_i2c_probe" */ 994 static int res_axxia_i2c_probe_16; 995 /* content: static int axxia_i2c_remove(struct platform_device *pdev)*/ 996 /* LDV_COMMENT_BEGIN_PREP */ 997 #define SCL_WAIT_TIMEOUT_NS 25000000 998 #define I2C_XFER_TIMEOUT (msecs_to_jiffies(250)) 999 #define I2C_STOP_TIMEOUT (msecs_to_jiffies(100)) 1000 #define FIFO_SIZE 8 1001 #define GLOBAL_CONTROL 0x00 1002 #define GLOBAL_MST_EN BIT(0) 1003 #define GLOBAL_SLV_EN BIT(1) 1004 #define GLOBAL_IBML_EN BIT(2) 1005 #define INTERRUPT_STATUS 0x04 1006 #define INTERRUPT_ENABLE 0x08 1007 #define INT_SLV BIT(1) 1008 #define INT_MST BIT(0) 1009 #define WAIT_TIMER_CONTROL 0x0c 1010 #define WT_EN BIT(15) 1011 #define WT_VALUE(_x) ((_x) & 0x7fff) 1012 #define IBML_TIMEOUT 0x10 1013 #define IBML_LOW_MEXT 0x14 1014 #define IBML_LOW_SEXT 0x18 1015 #define TIMER_CLOCK_DIV 0x1c 1016 #define I2C_BUS_MONITOR 0x20 1017 #define BM_SDAC BIT(3) 1018 #define BM_SCLC BIT(2) 1019 #define BM_SDAS BIT(1) 1020 #define BM_SCLS BIT(0) 1021 #define SOFT_RESET 0x24 1022 #define MST_COMMAND 0x28 1023 #define CMD_BUSY (1<<3) 1024 #define CMD_MANUAL (0x00 | CMD_BUSY) 1025 #define CMD_AUTO (0x01 | CMD_BUSY) 1026 #define MST_RX_XFER 0x2c 1027 #define MST_TX_XFER 0x30 1028 #define MST_ADDR_1 0x34 1029 #define MST_ADDR_2 0x38 1030 #define MST_DATA 0x3c 1031 #define MST_TX_FIFO 0x40 1032 #define MST_RX_FIFO 0x44 1033 #define MST_INT_ENABLE 0x48 1034 #define MST_INT_STATUS 0x4c 1035 #define MST_STATUS_RFL (1 << 13) 1036 #define MST_STATUS_TFL (1 << 12) 1037 #define MST_STATUS_SNS (1 << 11) 1038 #define MST_STATUS_SS (1 << 10) 1039 #define MST_STATUS_SCC (1 << 9) 1040 #define MST_STATUS_IP (1 << 8) 1041 #define MST_STATUS_TSS (1 << 7) 1042 #define MST_STATUS_AL (1 << 6) 1043 #define MST_STATUS_ND (1 << 5) 1044 #define MST_STATUS_NA (1 << 4) 1045 #define MST_STATUS_NAK (MST_STATUS_NA | \ 1046 MST_STATUS_ND) 1047 #define MST_STATUS_ERR (MST_STATUS_NAK | \ 1048 MST_STATUS_AL | \ 1049 MST_STATUS_IP | \ 1050 MST_STATUS_TSS) 1051 #define MST_TX_BYTES_XFRD 0x50 1052 #define MST_RX_BYTES_XFRD 0x54 1053 #define SCL_HIGH_PERIOD 0x80 1054 #define SCL_LOW_PERIOD 0x84 1055 #define SPIKE_FLTR_LEN 0x88 1056 #define SDA_SETUP_TIME 0x8c 1057 #define SDA_HOLD_TIME 0x90 1058 /* LDV_COMMENT_END_PREP */ 1059 1060 /** CALLBACK SECTION request_irq **/ 1061 /* content: static irqreturn_t axxia_i2c_isr(int irq, void *_dev)*/ 1062 /* LDV_COMMENT_BEGIN_PREP */ 1063 #define SCL_WAIT_TIMEOUT_NS 25000000 1064 #define I2C_XFER_TIMEOUT (msecs_to_jiffies(250)) 1065 #define I2C_STOP_TIMEOUT (msecs_to_jiffies(100)) 1066 #define FIFO_SIZE 8 1067 #define GLOBAL_CONTROL 0x00 1068 #define GLOBAL_MST_EN BIT(0) 1069 #define GLOBAL_SLV_EN BIT(1) 1070 #define GLOBAL_IBML_EN BIT(2) 1071 #define INTERRUPT_STATUS 0x04 1072 #define INTERRUPT_ENABLE 0x08 1073 #define INT_SLV BIT(1) 1074 #define INT_MST BIT(0) 1075 #define WAIT_TIMER_CONTROL 0x0c 1076 #define WT_EN BIT(15) 1077 #define WT_VALUE(_x) ((_x) & 0x7fff) 1078 #define IBML_TIMEOUT 0x10 1079 #define IBML_LOW_MEXT 0x14 1080 #define IBML_LOW_SEXT 0x18 1081 #define TIMER_CLOCK_DIV 0x1c 1082 #define I2C_BUS_MONITOR 0x20 1083 #define BM_SDAC BIT(3) 1084 #define BM_SCLC BIT(2) 1085 #define BM_SDAS BIT(1) 1086 #define BM_SCLS BIT(0) 1087 #define SOFT_RESET 0x24 1088 #define MST_COMMAND 0x28 1089 #define CMD_BUSY (1<<3) 1090 #define CMD_MANUAL (0x00 | CMD_BUSY) 1091 #define CMD_AUTO (0x01 | CMD_BUSY) 1092 #define MST_RX_XFER 0x2c 1093 #define MST_TX_XFER 0x30 1094 #define MST_ADDR_1 0x34 1095 #define MST_ADDR_2 0x38 1096 #define MST_DATA 0x3c 1097 #define MST_TX_FIFO 0x40 1098 #define MST_RX_FIFO 0x44 1099 #define MST_INT_ENABLE 0x48 1100 #define MST_INT_STATUS 0x4c 1101 #define MST_STATUS_RFL (1 << 13) 1102 #define MST_STATUS_TFL (1 << 12) 1103 #define MST_STATUS_SNS (1 << 11) 1104 #define MST_STATUS_SS (1 << 10) 1105 #define MST_STATUS_SCC (1 << 9) 1106 #define MST_STATUS_IP (1 << 8) 1107 #define MST_STATUS_TSS (1 << 7) 1108 #define MST_STATUS_AL (1 << 6) 1109 #define MST_STATUS_ND (1 << 5) 1110 #define MST_STATUS_NA (1 << 4) 1111 #define MST_STATUS_NAK (MST_STATUS_NA | \ 1112 MST_STATUS_ND) 1113 #define MST_STATUS_ERR (MST_STATUS_NAK | \ 1114 MST_STATUS_AL | \ 1115 MST_STATUS_IP | \ 1116 MST_STATUS_TSS) 1117 #define MST_TX_BYTES_XFRD 0x50 1118 #define MST_RX_BYTES_XFRD 0x54 1119 #define SCL_HIGH_PERIOD 0x80 1120 #define SCL_LOW_PERIOD 0x84 1121 #define SPIKE_FLTR_LEN 0x88 1122 #define SDA_SETUP_TIME 0x8c 1123 #define SDA_HOLD_TIME 0x90 1124 /* LDV_COMMENT_END_PREP */ 1125 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "axxia_i2c_isr" */ 1126 int var_axxia_i2c_isr_9_p0; 1127 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "axxia_i2c_isr" */ 1128 void * var_axxia_i2c_isr_9_p1; 1129 1130 1131 1132 1133 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */ 1134 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */ 1135 /*============================= VARIABLE INITIALIZING PART =============================*/ 1136 LDV_IN_INTERRUPT=1; 1137 1138 1139 1140 1141 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */ 1142 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */ 1143 /*============================= FUNCTION CALL SECTION =============================*/ 1144 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */ 1145 ldv_initialize(); 1146 1147 1148 1149 1150 int ldv_s_axxia_i2c_driver_platform_driver = 0; 1151 1152 1153 1154 1155 while( nondet_int() 1156 || !(ldv_s_axxia_i2c_driver_platform_driver == 0) 1157 ) { 1158 1159 switch(nondet_int()) { 1160 1161 case 0: { 1162 1163 /** STRUCT: struct type: i2c_bus_recovery_info, struct name: axxia_i2c_recovery_info **/ 1164 1165 1166 /* content: static int axxia_i2c_get_scl(struct i2c_adapter *adap)*/ 1167 /* LDV_COMMENT_BEGIN_PREP */ 1168 #define SCL_WAIT_TIMEOUT_NS 25000000 1169 #define I2C_XFER_TIMEOUT (msecs_to_jiffies(250)) 1170 #define I2C_STOP_TIMEOUT (msecs_to_jiffies(100)) 1171 #define FIFO_SIZE 8 1172 #define GLOBAL_CONTROL 0x00 1173 #define GLOBAL_MST_EN BIT(0) 1174 #define GLOBAL_SLV_EN BIT(1) 1175 #define GLOBAL_IBML_EN BIT(2) 1176 #define INTERRUPT_STATUS 0x04 1177 #define INTERRUPT_ENABLE 0x08 1178 #define INT_SLV BIT(1) 1179 #define INT_MST BIT(0) 1180 #define WAIT_TIMER_CONTROL 0x0c 1181 #define WT_EN BIT(15) 1182 #define WT_VALUE(_x) ((_x) & 0x7fff) 1183 #define IBML_TIMEOUT 0x10 1184 #define IBML_LOW_MEXT 0x14 1185 #define IBML_LOW_SEXT 0x18 1186 #define TIMER_CLOCK_DIV 0x1c 1187 #define I2C_BUS_MONITOR 0x20 1188 #define BM_SDAC BIT(3) 1189 #define BM_SCLC BIT(2) 1190 #define BM_SDAS BIT(1) 1191 #define BM_SCLS BIT(0) 1192 #define SOFT_RESET 0x24 1193 #define MST_COMMAND 0x28 1194 #define CMD_BUSY (1<<3) 1195 #define CMD_MANUAL (0x00 | CMD_BUSY) 1196 #define CMD_AUTO (0x01 | CMD_BUSY) 1197 #define MST_RX_XFER 0x2c 1198 #define MST_TX_XFER 0x30 1199 #define MST_ADDR_1 0x34 1200 #define MST_ADDR_2 0x38 1201 #define MST_DATA 0x3c 1202 #define MST_TX_FIFO 0x40 1203 #define MST_RX_FIFO 0x44 1204 #define MST_INT_ENABLE 0x48 1205 #define MST_INT_STATUS 0x4c 1206 #define MST_STATUS_RFL (1 << 13) 1207 #define MST_STATUS_TFL (1 << 12) 1208 #define MST_STATUS_SNS (1 << 11) 1209 #define MST_STATUS_SS (1 << 10) 1210 #define MST_STATUS_SCC (1 << 9) 1211 #define MST_STATUS_IP (1 << 8) 1212 #define MST_STATUS_TSS (1 << 7) 1213 #define MST_STATUS_AL (1 << 6) 1214 #define MST_STATUS_ND (1 << 5) 1215 #define MST_STATUS_NA (1 << 4) 1216 #define MST_STATUS_NAK (MST_STATUS_NA | \ 1217 MST_STATUS_ND) 1218 #define MST_STATUS_ERR (MST_STATUS_NAK | \ 1219 MST_STATUS_AL | \ 1220 MST_STATUS_IP | \ 1221 MST_STATUS_TSS) 1222 #define MST_TX_BYTES_XFRD 0x50 1223 #define MST_RX_BYTES_XFRD 0x54 1224 #define SCL_HIGH_PERIOD 0x80 1225 #define SCL_LOW_PERIOD 0x84 1226 #define SPIKE_FLTR_LEN 0x88 1227 #define SDA_SETUP_TIME 0x8c 1228 #define SDA_HOLD_TIME 0x90 1229 /* LDV_COMMENT_END_PREP */ 1230 /* LDV_COMMENT_FUNCTION_CALL Function from field "get_scl" from driver structure with callbacks "axxia_i2c_recovery_info" */ 1231 ldv_handler_precall(); 1232 axxia_i2c_get_scl( var_group1); 1233 1234 1235 1236 1237 } 1238 1239 break; 1240 case 1: { 1241 1242 /** STRUCT: struct type: i2c_bus_recovery_info, struct name: axxia_i2c_recovery_info **/ 1243 1244 1245 /* content: static void axxia_i2c_set_scl(struct i2c_adapter *adap, int val)*/ 1246 /* LDV_COMMENT_BEGIN_PREP */ 1247 #define SCL_WAIT_TIMEOUT_NS 25000000 1248 #define I2C_XFER_TIMEOUT (msecs_to_jiffies(250)) 1249 #define I2C_STOP_TIMEOUT (msecs_to_jiffies(100)) 1250 #define FIFO_SIZE 8 1251 #define GLOBAL_CONTROL 0x00 1252 #define GLOBAL_MST_EN BIT(0) 1253 #define GLOBAL_SLV_EN BIT(1) 1254 #define GLOBAL_IBML_EN BIT(2) 1255 #define INTERRUPT_STATUS 0x04 1256 #define INTERRUPT_ENABLE 0x08 1257 #define INT_SLV BIT(1) 1258 #define INT_MST BIT(0) 1259 #define WAIT_TIMER_CONTROL 0x0c 1260 #define WT_EN BIT(15) 1261 #define WT_VALUE(_x) ((_x) & 0x7fff) 1262 #define IBML_TIMEOUT 0x10 1263 #define IBML_LOW_MEXT 0x14 1264 #define IBML_LOW_SEXT 0x18 1265 #define TIMER_CLOCK_DIV 0x1c 1266 #define I2C_BUS_MONITOR 0x20 1267 #define BM_SDAC BIT(3) 1268 #define BM_SCLC BIT(2) 1269 #define BM_SDAS BIT(1) 1270 #define BM_SCLS BIT(0) 1271 #define SOFT_RESET 0x24 1272 #define MST_COMMAND 0x28 1273 #define CMD_BUSY (1<<3) 1274 #define CMD_MANUAL (0x00 | CMD_BUSY) 1275 #define CMD_AUTO (0x01 | CMD_BUSY) 1276 #define MST_RX_XFER 0x2c 1277 #define MST_TX_XFER 0x30 1278 #define MST_ADDR_1 0x34 1279 #define MST_ADDR_2 0x38 1280 #define MST_DATA 0x3c 1281 #define MST_TX_FIFO 0x40 1282 #define MST_RX_FIFO 0x44 1283 #define MST_INT_ENABLE 0x48 1284 #define MST_INT_STATUS 0x4c 1285 #define MST_STATUS_RFL (1 << 13) 1286 #define MST_STATUS_TFL (1 << 12) 1287 #define MST_STATUS_SNS (1 << 11) 1288 #define MST_STATUS_SS (1 << 10) 1289 #define MST_STATUS_SCC (1 << 9) 1290 #define MST_STATUS_IP (1 << 8) 1291 #define MST_STATUS_TSS (1 << 7) 1292 #define MST_STATUS_AL (1 << 6) 1293 #define MST_STATUS_ND (1 << 5) 1294 #define MST_STATUS_NA (1 << 4) 1295 #define MST_STATUS_NAK (MST_STATUS_NA | \ 1296 MST_STATUS_ND) 1297 #define MST_STATUS_ERR (MST_STATUS_NAK | \ 1298 MST_STATUS_AL | \ 1299 MST_STATUS_IP | \ 1300 MST_STATUS_TSS) 1301 #define MST_TX_BYTES_XFRD 0x50 1302 #define MST_RX_BYTES_XFRD 0x54 1303 #define SCL_HIGH_PERIOD 0x80 1304 #define SCL_LOW_PERIOD 0x84 1305 #define SPIKE_FLTR_LEN 0x88 1306 #define SDA_SETUP_TIME 0x8c 1307 #define SDA_HOLD_TIME 0x90 1308 /* LDV_COMMENT_END_PREP */ 1309 /* LDV_COMMENT_FUNCTION_CALL Function from field "set_scl" from driver structure with callbacks "axxia_i2c_recovery_info" */ 1310 ldv_handler_precall(); 1311 axxia_i2c_set_scl( var_group1, var_axxia_i2c_set_scl_13_p1); 1312 1313 1314 1315 1316 } 1317 1318 break; 1319 case 2: { 1320 1321 /** STRUCT: struct type: i2c_bus_recovery_info, struct name: axxia_i2c_recovery_info **/ 1322 1323 1324 /* content: static int axxia_i2c_get_sda(struct i2c_adapter *adap)*/ 1325 /* LDV_COMMENT_BEGIN_PREP */ 1326 #define SCL_WAIT_TIMEOUT_NS 25000000 1327 #define I2C_XFER_TIMEOUT (msecs_to_jiffies(250)) 1328 #define I2C_STOP_TIMEOUT (msecs_to_jiffies(100)) 1329 #define FIFO_SIZE 8 1330 #define GLOBAL_CONTROL 0x00 1331 #define GLOBAL_MST_EN BIT(0) 1332 #define GLOBAL_SLV_EN BIT(1) 1333 #define GLOBAL_IBML_EN BIT(2) 1334 #define INTERRUPT_STATUS 0x04 1335 #define INTERRUPT_ENABLE 0x08 1336 #define INT_SLV BIT(1) 1337 #define INT_MST BIT(0) 1338 #define WAIT_TIMER_CONTROL 0x0c 1339 #define WT_EN BIT(15) 1340 #define WT_VALUE(_x) ((_x) & 0x7fff) 1341 #define IBML_TIMEOUT 0x10 1342 #define IBML_LOW_MEXT 0x14 1343 #define IBML_LOW_SEXT 0x18 1344 #define TIMER_CLOCK_DIV 0x1c 1345 #define I2C_BUS_MONITOR 0x20 1346 #define BM_SDAC BIT(3) 1347 #define BM_SCLC BIT(2) 1348 #define BM_SDAS BIT(1) 1349 #define BM_SCLS BIT(0) 1350 #define SOFT_RESET 0x24 1351 #define MST_COMMAND 0x28 1352 #define CMD_BUSY (1<<3) 1353 #define CMD_MANUAL (0x00 | CMD_BUSY) 1354 #define CMD_AUTO (0x01 | CMD_BUSY) 1355 #define MST_RX_XFER 0x2c 1356 #define MST_TX_XFER 0x30 1357 #define MST_ADDR_1 0x34 1358 #define MST_ADDR_2 0x38 1359 #define MST_DATA 0x3c 1360 #define MST_TX_FIFO 0x40 1361 #define MST_RX_FIFO 0x44 1362 #define MST_INT_ENABLE 0x48 1363 #define MST_INT_STATUS 0x4c 1364 #define MST_STATUS_RFL (1 << 13) 1365 #define MST_STATUS_TFL (1 << 12) 1366 #define MST_STATUS_SNS (1 << 11) 1367 #define MST_STATUS_SS (1 << 10) 1368 #define MST_STATUS_SCC (1 << 9) 1369 #define MST_STATUS_IP (1 << 8) 1370 #define MST_STATUS_TSS (1 << 7) 1371 #define MST_STATUS_AL (1 << 6) 1372 #define MST_STATUS_ND (1 << 5) 1373 #define MST_STATUS_NA (1 << 4) 1374 #define MST_STATUS_NAK (MST_STATUS_NA | \ 1375 MST_STATUS_ND) 1376 #define MST_STATUS_ERR (MST_STATUS_NAK | \ 1377 MST_STATUS_AL | \ 1378 MST_STATUS_IP | \ 1379 MST_STATUS_TSS) 1380 #define MST_TX_BYTES_XFRD 0x50 1381 #define MST_RX_BYTES_XFRD 0x54 1382 #define SCL_HIGH_PERIOD 0x80 1383 #define SCL_LOW_PERIOD 0x84 1384 #define SPIKE_FLTR_LEN 0x88 1385 #define SDA_SETUP_TIME 0x8c 1386 #define SDA_HOLD_TIME 0x90 1387 /* LDV_COMMENT_END_PREP */ 1388 /* LDV_COMMENT_FUNCTION_CALL Function from field "get_sda" from driver structure with callbacks "axxia_i2c_recovery_info" */ 1389 ldv_handler_precall(); 1390 axxia_i2c_get_sda( var_group1); 1391 1392 1393 1394 1395 } 1396 1397 break; 1398 case 3: { 1399 1400 /** STRUCT: struct type: i2c_algorithm, struct name: axxia_i2c_algo **/ 1401 1402 1403 /* content: static u32 axxia_i2c_func(struct i2c_adapter *adap)*/ 1404 /* LDV_COMMENT_BEGIN_PREP */ 1405 #define SCL_WAIT_TIMEOUT_NS 25000000 1406 #define I2C_XFER_TIMEOUT (msecs_to_jiffies(250)) 1407 #define I2C_STOP_TIMEOUT (msecs_to_jiffies(100)) 1408 #define FIFO_SIZE 8 1409 #define GLOBAL_CONTROL 0x00 1410 #define GLOBAL_MST_EN BIT(0) 1411 #define GLOBAL_SLV_EN BIT(1) 1412 #define GLOBAL_IBML_EN BIT(2) 1413 #define INTERRUPT_STATUS 0x04 1414 #define INTERRUPT_ENABLE 0x08 1415 #define INT_SLV BIT(1) 1416 #define INT_MST BIT(0) 1417 #define WAIT_TIMER_CONTROL 0x0c 1418 #define WT_EN BIT(15) 1419 #define WT_VALUE(_x) ((_x) & 0x7fff) 1420 #define IBML_TIMEOUT 0x10 1421 #define IBML_LOW_MEXT 0x14 1422 #define IBML_LOW_SEXT 0x18 1423 #define TIMER_CLOCK_DIV 0x1c 1424 #define I2C_BUS_MONITOR 0x20 1425 #define BM_SDAC BIT(3) 1426 #define BM_SCLC BIT(2) 1427 #define BM_SDAS BIT(1) 1428 #define BM_SCLS BIT(0) 1429 #define SOFT_RESET 0x24 1430 #define MST_COMMAND 0x28 1431 #define CMD_BUSY (1<<3) 1432 #define CMD_MANUAL (0x00 | CMD_BUSY) 1433 #define CMD_AUTO (0x01 | CMD_BUSY) 1434 #define MST_RX_XFER 0x2c 1435 #define MST_TX_XFER 0x30 1436 #define MST_ADDR_1 0x34 1437 #define MST_ADDR_2 0x38 1438 #define MST_DATA 0x3c 1439 #define MST_TX_FIFO 0x40 1440 #define MST_RX_FIFO 0x44 1441 #define MST_INT_ENABLE 0x48 1442 #define MST_INT_STATUS 0x4c 1443 #define MST_STATUS_RFL (1 << 13) 1444 #define MST_STATUS_TFL (1 << 12) 1445 #define MST_STATUS_SNS (1 << 11) 1446 #define MST_STATUS_SS (1 << 10) 1447 #define MST_STATUS_SCC (1 << 9) 1448 #define MST_STATUS_IP (1 << 8) 1449 #define MST_STATUS_TSS (1 << 7) 1450 #define MST_STATUS_AL (1 << 6) 1451 #define MST_STATUS_ND (1 << 5) 1452 #define MST_STATUS_NA (1 << 4) 1453 #define MST_STATUS_NAK (MST_STATUS_NA | \ 1454 MST_STATUS_ND) 1455 #define MST_STATUS_ERR (MST_STATUS_NAK | \ 1456 MST_STATUS_AL | \ 1457 MST_STATUS_IP | \ 1458 MST_STATUS_TSS) 1459 #define MST_TX_BYTES_XFRD 0x50 1460 #define MST_RX_BYTES_XFRD 0x54 1461 #define SCL_HIGH_PERIOD 0x80 1462 #define SCL_LOW_PERIOD 0x84 1463 #define SPIKE_FLTR_LEN 0x88 1464 #define SDA_SETUP_TIME 0x8c 1465 #define SDA_HOLD_TIME 0x90 1466 /* LDV_COMMENT_END_PREP */ 1467 /* LDV_COMMENT_FUNCTION_CALL Function from field "functionality" from driver structure with callbacks "axxia_i2c_algo" */ 1468 ldv_handler_precall(); 1469 axxia_i2c_func( var_group1); 1470 1471 1472 1473 1474 } 1475 1476 break; 1477 case 4: { 1478 1479 /** STRUCT: struct type: platform_driver, struct name: axxia_i2c_driver **/ 1480 if(ldv_s_axxia_i2c_driver_platform_driver==0) { 1481 1482 /* content: static int axxia_i2c_probe(struct platform_device *pdev)*/ 1483 /* LDV_COMMENT_BEGIN_PREP */ 1484 #define SCL_WAIT_TIMEOUT_NS 25000000 1485 #define I2C_XFER_TIMEOUT (msecs_to_jiffies(250)) 1486 #define I2C_STOP_TIMEOUT (msecs_to_jiffies(100)) 1487 #define FIFO_SIZE 8 1488 #define GLOBAL_CONTROL 0x00 1489 #define GLOBAL_MST_EN BIT(0) 1490 #define GLOBAL_SLV_EN BIT(1) 1491 #define GLOBAL_IBML_EN BIT(2) 1492 #define INTERRUPT_STATUS 0x04 1493 #define INTERRUPT_ENABLE 0x08 1494 #define INT_SLV BIT(1) 1495 #define INT_MST BIT(0) 1496 #define WAIT_TIMER_CONTROL 0x0c 1497 #define WT_EN BIT(15) 1498 #define WT_VALUE(_x) ((_x) & 0x7fff) 1499 #define IBML_TIMEOUT 0x10 1500 #define IBML_LOW_MEXT 0x14 1501 #define IBML_LOW_SEXT 0x18 1502 #define TIMER_CLOCK_DIV 0x1c 1503 #define I2C_BUS_MONITOR 0x20 1504 #define BM_SDAC BIT(3) 1505 #define BM_SCLC BIT(2) 1506 #define BM_SDAS BIT(1) 1507 #define BM_SCLS BIT(0) 1508 #define SOFT_RESET 0x24 1509 #define MST_COMMAND 0x28 1510 #define CMD_BUSY (1<<3) 1511 #define CMD_MANUAL (0x00 | CMD_BUSY) 1512 #define CMD_AUTO (0x01 | CMD_BUSY) 1513 #define MST_RX_XFER 0x2c 1514 #define MST_TX_XFER 0x30 1515 #define MST_ADDR_1 0x34 1516 #define MST_ADDR_2 0x38 1517 #define MST_DATA 0x3c 1518 #define MST_TX_FIFO 0x40 1519 #define MST_RX_FIFO 0x44 1520 #define MST_INT_ENABLE 0x48 1521 #define MST_INT_STATUS 0x4c 1522 #define MST_STATUS_RFL (1 << 13) 1523 #define MST_STATUS_TFL (1 << 12) 1524 #define MST_STATUS_SNS (1 << 11) 1525 #define MST_STATUS_SS (1 << 10) 1526 #define MST_STATUS_SCC (1 << 9) 1527 #define MST_STATUS_IP (1 << 8) 1528 #define MST_STATUS_TSS (1 << 7) 1529 #define MST_STATUS_AL (1 << 6) 1530 #define MST_STATUS_ND (1 << 5) 1531 #define MST_STATUS_NA (1 << 4) 1532 #define MST_STATUS_NAK (MST_STATUS_NA | \ 1533 MST_STATUS_ND) 1534 #define MST_STATUS_ERR (MST_STATUS_NAK | \ 1535 MST_STATUS_AL | \ 1536 MST_STATUS_IP | \ 1537 MST_STATUS_TSS) 1538 #define MST_TX_BYTES_XFRD 0x50 1539 #define MST_RX_BYTES_XFRD 0x54 1540 #define SCL_HIGH_PERIOD 0x80 1541 #define SCL_LOW_PERIOD 0x84 1542 #define SPIKE_FLTR_LEN 0x88 1543 #define SDA_SETUP_TIME 0x8c 1544 #define SDA_HOLD_TIME 0x90 1545 /* LDV_COMMENT_END_PREP */ 1546 /* LDV_COMMENT_FUNCTION_CALL Function from field "probe" from driver structure with callbacks "axxia_i2c_driver". Standart function test for correct return result. */ 1547 res_axxia_i2c_probe_16 = axxia_i2c_probe( var_group2); 1548 ldv_check_return_value(res_axxia_i2c_probe_16); 1549 ldv_check_return_value_probe(res_axxia_i2c_probe_16); 1550 if(res_axxia_i2c_probe_16) 1551 goto ldv_module_exit; 1552 ldv_s_axxia_i2c_driver_platform_driver++; 1553 1554 } 1555 1556 } 1557 1558 break; 1559 case 5: { 1560 1561 /** STRUCT: struct type: platform_driver, struct name: axxia_i2c_driver **/ 1562 if(ldv_s_axxia_i2c_driver_platform_driver==1) { 1563 1564 /* content: static int axxia_i2c_remove(struct platform_device *pdev)*/ 1565 /* LDV_COMMENT_BEGIN_PREP */ 1566 #define SCL_WAIT_TIMEOUT_NS 25000000 1567 #define I2C_XFER_TIMEOUT (msecs_to_jiffies(250)) 1568 #define I2C_STOP_TIMEOUT (msecs_to_jiffies(100)) 1569 #define FIFO_SIZE 8 1570 #define GLOBAL_CONTROL 0x00 1571 #define GLOBAL_MST_EN BIT(0) 1572 #define GLOBAL_SLV_EN BIT(1) 1573 #define GLOBAL_IBML_EN BIT(2) 1574 #define INTERRUPT_STATUS 0x04 1575 #define INTERRUPT_ENABLE 0x08 1576 #define INT_SLV BIT(1) 1577 #define INT_MST BIT(0) 1578 #define WAIT_TIMER_CONTROL 0x0c 1579 #define WT_EN BIT(15) 1580 #define WT_VALUE(_x) ((_x) & 0x7fff) 1581 #define IBML_TIMEOUT 0x10 1582 #define IBML_LOW_MEXT 0x14 1583 #define IBML_LOW_SEXT 0x18 1584 #define TIMER_CLOCK_DIV 0x1c 1585 #define I2C_BUS_MONITOR 0x20 1586 #define BM_SDAC BIT(3) 1587 #define BM_SCLC BIT(2) 1588 #define BM_SDAS BIT(1) 1589 #define BM_SCLS BIT(0) 1590 #define SOFT_RESET 0x24 1591 #define MST_COMMAND 0x28 1592 #define CMD_BUSY (1<<3) 1593 #define CMD_MANUAL (0x00 | CMD_BUSY) 1594 #define CMD_AUTO (0x01 | CMD_BUSY) 1595 #define MST_RX_XFER 0x2c 1596 #define MST_TX_XFER 0x30 1597 #define MST_ADDR_1 0x34 1598 #define MST_ADDR_2 0x38 1599 #define MST_DATA 0x3c 1600 #define MST_TX_FIFO 0x40 1601 #define MST_RX_FIFO 0x44 1602 #define MST_INT_ENABLE 0x48 1603 #define MST_INT_STATUS 0x4c 1604 #define MST_STATUS_RFL (1 << 13) 1605 #define MST_STATUS_TFL (1 << 12) 1606 #define MST_STATUS_SNS (1 << 11) 1607 #define MST_STATUS_SS (1 << 10) 1608 #define MST_STATUS_SCC (1 << 9) 1609 #define MST_STATUS_IP (1 << 8) 1610 #define MST_STATUS_TSS (1 << 7) 1611 #define MST_STATUS_AL (1 << 6) 1612 #define MST_STATUS_ND (1 << 5) 1613 #define MST_STATUS_NA (1 << 4) 1614 #define MST_STATUS_NAK (MST_STATUS_NA | \ 1615 MST_STATUS_ND) 1616 #define MST_STATUS_ERR (MST_STATUS_NAK | \ 1617 MST_STATUS_AL | \ 1618 MST_STATUS_IP | \ 1619 MST_STATUS_TSS) 1620 #define MST_TX_BYTES_XFRD 0x50 1621 #define MST_RX_BYTES_XFRD 0x54 1622 #define SCL_HIGH_PERIOD 0x80 1623 #define SCL_LOW_PERIOD 0x84 1624 #define SPIKE_FLTR_LEN 0x88 1625 #define SDA_SETUP_TIME 0x8c 1626 #define SDA_HOLD_TIME 0x90 1627 /* LDV_COMMENT_END_PREP */ 1628 /* LDV_COMMENT_FUNCTION_CALL Function from field "remove" from driver structure with callbacks "axxia_i2c_driver" */ 1629 ldv_handler_precall(); 1630 axxia_i2c_remove( var_group2); 1631 ldv_s_axxia_i2c_driver_platform_driver=0; 1632 1633 } 1634 1635 } 1636 1637 break; 1638 case 6: { 1639 1640 /** CALLBACK SECTION request_irq **/ 1641 LDV_IN_INTERRUPT=2; 1642 1643 /* content: static irqreturn_t axxia_i2c_isr(int irq, void *_dev)*/ 1644 /* LDV_COMMENT_BEGIN_PREP */ 1645 #define SCL_WAIT_TIMEOUT_NS 25000000 1646 #define I2C_XFER_TIMEOUT (msecs_to_jiffies(250)) 1647 #define I2C_STOP_TIMEOUT (msecs_to_jiffies(100)) 1648 #define FIFO_SIZE 8 1649 #define GLOBAL_CONTROL 0x00 1650 #define GLOBAL_MST_EN BIT(0) 1651 #define GLOBAL_SLV_EN BIT(1) 1652 #define GLOBAL_IBML_EN BIT(2) 1653 #define INTERRUPT_STATUS 0x04 1654 #define INTERRUPT_ENABLE 0x08 1655 #define INT_SLV BIT(1) 1656 #define INT_MST BIT(0) 1657 #define WAIT_TIMER_CONTROL 0x0c 1658 #define WT_EN BIT(15) 1659 #define WT_VALUE(_x) ((_x) & 0x7fff) 1660 #define IBML_TIMEOUT 0x10 1661 #define IBML_LOW_MEXT 0x14 1662 #define IBML_LOW_SEXT 0x18 1663 #define TIMER_CLOCK_DIV 0x1c 1664 #define I2C_BUS_MONITOR 0x20 1665 #define BM_SDAC BIT(3) 1666 #define BM_SCLC BIT(2) 1667 #define BM_SDAS BIT(1) 1668 #define BM_SCLS BIT(0) 1669 #define SOFT_RESET 0x24 1670 #define MST_COMMAND 0x28 1671 #define CMD_BUSY (1<<3) 1672 #define CMD_MANUAL (0x00 | CMD_BUSY) 1673 #define CMD_AUTO (0x01 | CMD_BUSY) 1674 #define MST_RX_XFER 0x2c 1675 #define MST_TX_XFER 0x30 1676 #define MST_ADDR_1 0x34 1677 #define MST_ADDR_2 0x38 1678 #define MST_DATA 0x3c 1679 #define MST_TX_FIFO 0x40 1680 #define MST_RX_FIFO 0x44 1681 #define MST_INT_ENABLE 0x48 1682 #define MST_INT_STATUS 0x4c 1683 #define MST_STATUS_RFL (1 << 13) 1684 #define MST_STATUS_TFL (1 << 12) 1685 #define MST_STATUS_SNS (1 << 11) 1686 #define MST_STATUS_SS (1 << 10) 1687 #define MST_STATUS_SCC (1 << 9) 1688 #define MST_STATUS_IP (1 << 8) 1689 #define MST_STATUS_TSS (1 << 7) 1690 #define MST_STATUS_AL (1 << 6) 1691 #define MST_STATUS_ND (1 << 5) 1692 #define MST_STATUS_NA (1 << 4) 1693 #define MST_STATUS_NAK (MST_STATUS_NA | \ 1694 MST_STATUS_ND) 1695 #define MST_STATUS_ERR (MST_STATUS_NAK | \ 1696 MST_STATUS_AL | \ 1697 MST_STATUS_IP | \ 1698 MST_STATUS_TSS) 1699 #define MST_TX_BYTES_XFRD 0x50 1700 #define MST_RX_BYTES_XFRD 0x54 1701 #define SCL_HIGH_PERIOD 0x80 1702 #define SCL_LOW_PERIOD 0x84 1703 #define SPIKE_FLTR_LEN 0x88 1704 #define SDA_SETUP_TIME 0x8c 1705 #define SDA_HOLD_TIME 0x90 1706 /* LDV_COMMENT_END_PREP */ 1707 /* LDV_COMMENT_FUNCTION_CALL */ 1708 ldv_handler_precall(); 1709 axxia_i2c_isr( var_axxia_i2c_isr_9_p0, var_axxia_i2c_isr_9_p1); 1710 LDV_IN_INTERRUPT=1; 1711 1712 1713 1714 } 1715 1716 break; 1717 default: break; 1718 1719 } 1720 1721 } 1722 1723 ldv_module_exit: 1724 1725 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */ 1726 ldv_final: ldv_check_final_state(); 1727 1728 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */ 1729 return; 1730 1731 } 1732 #endif 1733 1734 /* LDV_COMMENT_END_MAIN */ 1735 1736 #line 15 "/home/ldvuser/ldv/ref_launches/work/current--X--drivers--X--defaultlinux-4.8-rc1.tar.xz--X--320_7a--X--cpachecker/linux-4.8-rc1.tar.xz/csd_deg_dscv/11495/dscv_tempdir/dscv/ri/320_7a/drivers/i2c/busses/i2c-axxia.o.c.prepared"
1 2 #include <verifier/rcv.h> 3 #include <kernel-model/ERR.inc> 4 5 struct clk; 6 7 8 /* LDV_COMMENT_CHANGE_STATE Initialize counter to zero. */ 9 int ldv_counter_clk = 0; 10 11 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_clk_disable_clk') Release. */ 12 void ldv_clk_disable_clk(struct clk *clk) 13 { 14 /* LDV_COMMENT_CHANGE_STATE Increase counter. */ 15 ldv_counter_clk = 0; 16 } 17 18 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_clk_enable_clk') Reset counter. */ 19 int ldv_clk_enable_clk(void) 20 { 21 int retval = ldv_undef_int(); 22 if (!retval) 23 { 24 /* LDV_COMMENT_CHANGE_STATE Increase counter. */ 25 ldv_counter_clk = 1; 26 } 27 return retval; 28 } 29 30 31 /* LDV_COMMENT_CHANGE_STATE Initialize counter to zero. */ 32 int ldv_counter_i2c_clk_of_axxia_i2c_dev = 0; 33 34 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_clk_disable_i2c_clk_of_axxia_i2c_dev') Release. */ 35 void ldv_clk_disable_i2c_clk_of_axxia_i2c_dev(struct clk *clk) 36 { 37 /* LDV_COMMENT_CHANGE_STATE Increase counter. */ 38 ldv_counter_i2c_clk_of_axxia_i2c_dev = 0; 39 } 40 41 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_clk_enable_i2c_clk_of_axxia_i2c_dev') Reset counter. */ 42 int ldv_clk_enable_i2c_clk_of_axxia_i2c_dev(void) 43 { 44 int retval = ldv_undef_int(); 45 if (!retval) 46 { 47 /* LDV_COMMENT_CHANGE_STATE Increase counter. */ 48 ldv_counter_i2c_clk_of_axxia_i2c_dev = 1; 49 } 50 return retval; 51 } 52 53 54 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_check_final_state') Check that all clk are freed at the end */ 55 void ldv_check_final_state(void) 56 { 57 /* LDV_COMMENT_ASSERT Spin 'clk' must be unlocked at the end */ 58 ldv_assert(ldv_counter_clk == 0); 59 /* LDV_COMMENT_ASSERT Spin 'i2c_clk_of_axxia_i2c_dev' must be unlocked at the end */ 60 ldv_assert(ldv_counter_i2c_clk_of_axxia_i2c_dev == 0); 61 }
1 #ifndef _LDV_ERR_ 2 #define _LDV_ERR_ 3 4 #include <linux/kernel.h> 5 6 /* LDV_COMMENT_MODEL_FUNCTION_DEFENITION(name='ldv_is_err') This function return result of checking if pointer is impossible. */ 7 bool ldv_is_err(const void *ptr) 8 { 9 /*LDV_COMMENT_RETURN Return value of function ldv_is_err_val().*/ 10 return ((unsigned long)ptr > LDV_PTR_MAX); 11 } 12 13 /* LDV_COMMENT_MODEL_FUNCTION_DEFENITION(name='ldv_err_ptr') This function return pointer. */ 14 void* ldv_err_ptr(long error) 15 { 16 /*LDV_COMMENT_RETURN Return error pointer.*/ 17 return (void *)(LDV_PTR_MAX - error); 18 } 19 20 /* LDV_COMMENT_MODEL_FUNCTION_DEFENITION(name='ldv_ptr_err') This function return error if pointer is impossible. */ 21 long ldv_ptr_err(const void *ptr) 22 { 23 /*LDV_COMMENT_RETURN Return error code.*/ 24 return (long)(LDV_PTR_MAX - (unsigned long)ptr); 25 } 26 27 /* LDV_COMMENT_MODEL_FUNCTION_DEFENITION(name='ldv_is_err_or_null') This function check if pointer is impossible or null. */ 28 bool ldv_is_err_or_null(const void *ptr) 29 { 30 /*LDV_COMMENT_RETURN Return 0 if pointer is possible and not zero, and 1 in other cases*/ 31 return !ptr || ldv_is_err((unsigned long)ptr); 32 } 33 34 #endif /* _LDV_ERR_ */
1 #ifndef _LDV_RCV_H_ 2 #define _LDV_RCV_H_ 3 4 /* If expr evaluates to zero, ldv_assert() causes a program to reach the error 5 label like the standard assert(). */ 6 #define ldv_assert(expr) ((expr) ? 0 : ldv_error()) 7 8 /* The error label wrapper. It is used because of some static verifiers (like 9 BLAST) don't accept multiple error labels through a program. */ 10 static inline void ldv_error(void) 11 { 12 LDV_ERROR: goto LDV_ERROR; 13 } 14 15 /* If expr evaluates to zero, ldv_assume() causes an infinite loop that is 16 avoided by verifiers. */ 17 #define ldv_assume(expr) ((expr) ? 0 : ldv_stop()) 18 19 /* Infinite loop, that causes verifiers to skip such paths. */ 20 static inline void ldv_stop(void) { 21 LDV_STOP: goto LDV_STOP; 22 } 23 24 /* Special nondeterministic functions. */ 25 int ldv_undef_int(void); 26 void *ldv_undef_ptr(void); 27 unsigned long ldv_undef_ulong(void); 28 long ldv_undef_long(void); 29 /* Return nondeterministic negative integer number. */ 30 static inline int ldv_undef_int_negative(void) 31 { 32 int ret = ldv_undef_int(); 33 34 ldv_assume(ret < 0); 35 36 return ret; 37 } 38 /* Return nondeterministic nonpositive integer number. */ 39 static inline int ldv_undef_int_nonpositive(void) 40 { 41 int ret = ldv_undef_int(); 42 43 ldv_assume(ret <= 0); 44 45 return ret; 46 } 47 48 /* Add explicit model for __builin_expect GCC function. Without the model a 49 return value will be treated as nondetermined by verifiers. */ 50 static inline long __builtin_expect(long exp, long c) 51 { 52 return exp; 53 } 54 55 /* This function causes the program to exit abnormally. GCC implements this 56 function by using a target-dependent mechanism (such as intentionally executing 57 an illegal instruction) or by calling abort. The mechanism used may vary from 58 release to release so you should not rely on any particular implementation. 59 http://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html */ 60 static inline void __builtin_trap(void) 61 { 62 ldv_assert(0); 63 } 64 65 /* The constant is for simulating an error of ldv_undef_ptr() function. */ 66 #define LDV_PTR_MAX 2012 67 68 #endif /* _LDV_RCV_H_ */
1 #ifndef __LINUX_COMPLETION_H 2 #define __LINUX_COMPLETION_H 3 4 /* 5 * (C) Copyright 2001 Linus Torvalds 6 * 7 * Atomic wait-for-completion handler data structures. 8 * See kernel/sched/completion.c for details. 9 */ 10 11 #include <linux/wait.h> 12 13 /* 14 * struct completion - structure used to maintain state for a "completion" 15 * 16 * This is the opaque structure used to maintain the state for a "completion". 17 * Completions currently use a FIFO to queue threads that have to wait for 18 * the "completion" event. 19 * 20 * See also: complete(), wait_for_completion() (and friends _timeout, 21 * _interruptible, _interruptible_timeout, and _killable), init_completion(), 22 * reinit_completion(), and macros DECLARE_COMPLETION(), 23 * DECLARE_COMPLETION_ONSTACK(). 24 */ 25 struct completion { 26 unsigned int done; 27 wait_queue_head_t wait; 28 }; 29 30 #define COMPLETION_INITIALIZER(work) \ 31 { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) } 32 33 #define COMPLETION_INITIALIZER_ONSTACK(work) \ 34 ({ init_completion(&work); work; }) 35 36 /** 37 * DECLARE_COMPLETION - declare and initialize a completion structure 38 * @work: identifier for the completion structure 39 * 40 * This macro declares and initializes a completion structure. Generally used 41 * for static declarations. You should use the _ONSTACK variant for automatic 42 * variables. 43 */ 44 #define DECLARE_COMPLETION(work) \ 45 struct completion work = COMPLETION_INITIALIZER(work) 46 47 /* 48 * Lockdep needs to run a non-constant initializer for on-stack 49 * completions - so we use the _ONSTACK() variant for those that 50 * are on the kernel stack: 51 */ 52 /** 53 * DECLARE_COMPLETION_ONSTACK - declare and initialize a completion structure 54 * @work: identifier for the completion structure 55 * 56 * This macro declares and initializes a completion structure on the kernel 57 * stack. 58 */ 59 #ifdef CONFIG_LOCKDEP 60 # define DECLARE_COMPLETION_ONSTACK(work) \ 61 struct completion work = COMPLETION_INITIALIZER_ONSTACK(work) 62 #else 63 # define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work) 64 #endif 65 66 /** 67 * init_completion - Initialize a dynamically allocated completion 68 * @x: pointer to completion structure that is to be initialized 69 * 70 * This inline function will initialize a dynamically created completion 71 * structure. 72 */ 73 static inline void init_completion(struct completion *x) 74 { 75 x->done = 0; 76 init_waitqueue_head(&x->wait); 77 } 78 79 /** 80 * reinit_completion - reinitialize a completion structure 81 * @x: pointer to completion structure that is to be reinitialized 82 * 83 * This inline function should be used to reinitialize a completion structure so it can 84 * be reused. This is especially important after complete_all() is used. 85 */ 86 static inline void reinit_completion(struct completion *x) 87 { 88 x->done = 0; 89 } 90 91 extern void wait_for_completion(struct completion *); 92 extern void wait_for_completion_io(struct completion *); 93 extern int wait_for_completion_interruptible(struct completion *x); 94 extern int wait_for_completion_killable(struct completion *x); 95 extern unsigned long wait_for_completion_timeout(struct completion *x, 96 unsigned long timeout); 97 extern unsigned long wait_for_completion_io_timeout(struct completion *x, 98 unsigned long timeout); 99 extern long wait_for_completion_interruptible_timeout( 100 struct completion *x, unsigned long timeout); 101 extern long wait_for_completion_killable_timeout( 102 struct completion *x, unsigned long timeout); 103 extern bool try_wait_for_completion(struct completion *x); 104 extern bool completion_done(struct completion *x); 105 106 extern void complete(struct completion *); 107 extern void complete_all(struct completion *); 108 109 #endif
1 /* 2 * device.h - generic, centralized driver model 3 * 4 * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org> 5 * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de> 6 * Copyright (c) 2008-2009 Novell Inc. 7 * 8 * This file is released under the GPLv2 9 * 10 * See Documentation/driver-model/ for more information. 11 */ 12 13 #ifndef _DEVICE_H_ 14 #define _DEVICE_H_ 15 16 #include <linux/ioport.h> 17 #include <linux/kobject.h> 18 #include <linux/klist.h> 19 #include <linux/list.h> 20 #include <linux/lockdep.h> 21 #include <linux/compiler.h> 22 #include <linux/types.h> 23 #include <linux/mutex.h> 24 #include <linux/pinctrl/devinfo.h> 25 #include <linux/pm.h> 26 #include <linux/atomic.h> 27 #include <linux/ratelimit.h> 28 #include <linux/uidgid.h> 29 #include <linux/gfp.h> 30 #include <asm/device.h> 31 32 struct device; 33 struct device_private; 34 struct device_driver; 35 struct driver_private; 36 struct module; 37 struct class; 38 struct subsys_private; 39 struct bus_type; 40 struct device_node; 41 struct fwnode_handle; 42 struct iommu_ops; 43 struct iommu_group; 44 45 struct bus_attribute { 46 struct attribute attr; 47 ssize_t (*show)(struct bus_type *bus, char *buf); 48 ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count); 49 }; 50 51 #define BUS_ATTR(_name, _mode, _show, _store) \ 52 struct bus_attribute bus_attr_##_name = __ATTR(_name, _mode, _show, _store) 53 #define BUS_ATTR_RW(_name) \ 54 struct bus_attribute bus_attr_##_name = __ATTR_RW(_name) 55 #define BUS_ATTR_RO(_name) \ 56 struct bus_attribute bus_attr_##_name = __ATTR_RO(_name) 57 58 extern int __must_check bus_create_file(struct bus_type *, 59 struct bus_attribute *); 60 extern void bus_remove_file(struct bus_type *, struct bus_attribute *); 61 62 /** 63 * struct bus_type - The bus type of the device 64 * 65 * @name: The name of the bus. 66 * @dev_name: Used for subsystems to enumerate devices like ("foo%u", dev->id). 67 * @dev_root: Default device to use as the parent. 68 * @dev_attrs: Default attributes of the devices on the bus. 69 * @bus_groups: Default attributes of the bus. 70 * @dev_groups: Default attributes of the devices on the bus. 71 * @drv_groups: Default attributes of the device drivers on the bus. 72 * @match: Called, perhaps multiple times, whenever a new device or driver 73 * is added for this bus. It should return a positive value if the 74 * given device can be handled by the given driver and zero 75 * otherwise. It may also return error code if determining that 76 * the driver supports the device is not possible. In case of 77 * -EPROBE_DEFER it will queue the device for deferred probing. 78 * @uevent: Called when a device is added, removed, or a few other things 79 * that generate uevents to add the environment variables. 80 * @probe: Called when a new device or driver add to this bus, and callback 81 * the specific driver's probe to initial the matched device. 82 * @remove: Called when a device removed from this bus. 83 * @shutdown: Called at shut-down time to quiesce the device. 84 * 85 * @online: Called to put the device back online (after offlining it). 86 * @offline: Called to put the device offline for hot-removal. May fail. 87 * 88 * @suspend: Called when a device on this bus wants to go to sleep mode. 89 * @resume: Called to bring a device on this bus out of sleep mode. 90 * @pm: Power management operations of this bus, callback the specific 91 * device driver's pm-ops. 92 * @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU 93 * driver implementations to a bus and allow the driver to do 94 * bus-specific setup 95 * @p: The private data of the driver core, only the driver core can 96 * touch this. 97 * @lock_key: Lock class key for use by the lock validator 98 * 99 * A bus is a channel between the processor and one or more devices. For the 100 * purposes of the device model, all devices are connected via a bus, even if 101 * it is an internal, virtual, "platform" bus. Buses can plug into each other. 102 * A USB controller is usually a PCI device, for example. The device model 103 * represents the actual connections between buses and the devices they control. 104 * A bus is represented by the bus_type structure. It contains the name, the 105 * default attributes, the bus' methods, PM operations, and the driver core's 106 * private data. 107 */ 108 struct bus_type { 109 const char *name; 110 const char *dev_name; 111 struct device *dev_root; 112 struct device_attribute *dev_attrs; /* use dev_groups instead */ 113 const struct attribute_group **bus_groups; 114 const struct attribute_group **dev_groups; 115 const struct attribute_group **drv_groups; 116 117 int (*match)(struct device *dev, struct device_driver *drv); 118 int (*uevent)(struct device *dev, struct kobj_uevent_env *env); 119 int (*probe)(struct device *dev); 120 int (*remove)(struct device *dev); 121 void (*shutdown)(struct device *dev); 122 123 int (*online)(struct device *dev); 124 int (*offline)(struct device *dev); 125 126 int (*suspend)(struct device *dev, pm_message_t state); 127 int (*resume)(struct device *dev); 128 129 const struct dev_pm_ops *pm; 130 131 const struct iommu_ops *iommu_ops; 132 133 struct subsys_private *p; 134 struct lock_class_key lock_key; 135 }; 136 137 extern int __must_check bus_register(struct bus_type *bus); 138 139 extern void bus_unregister(struct bus_type *bus); 140 141 extern int __must_check bus_rescan_devices(struct bus_type *bus); 142 143 /* iterator helpers for buses */ 144 struct subsys_dev_iter { 145 struct klist_iter ki; 146 const struct device_type *type; 147 }; 148 void subsys_dev_iter_init(struct subsys_dev_iter *iter, 149 struct bus_type *subsys, 150 struct device *start, 151 const struct device_type *type); 152 struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter); 153 void subsys_dev_iter_exit(struct subsys_dev_iter *iter); 154 155 int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data, 156 int (*fn)(struct device *dev, void *data)); 157 struct device *bus_find_device(struct bus_type *bus, struct device *start, 158 void *data, 159 int (*match)(struct device *dev, void *data)); 160 struct device *bus_find_device_by_name(struct bus_type *bus, 161 struct device *start, 162 const char *name); 163 struct device *subsys_find_device_by_id(struct bus_type *bus, unsigned int id, 164 struct device *hint); 165 int bus_for_each_drv(struct bus_type *bus, struct device_driver *start, 166 void *data, int (*fn)(struct device_driver *, void *)); 167 void bus_sort_breadthfirst(struct bus_type *bus, 168 int (*compare)(const struct device *a, 169 const struct device *b)); 170 /* 171 * Bus notifiers: Get notified of addition/removal of devices 172 * and binding/unbinding of drivers to devices. 173 * In the long run, it should be a replacement for the platform 174 * notify hooks. 175 */ 176 struct notifier_block; 177 178 extern int bus_register_notifier(struct bus_type *bus, 179 struct notifier_block *nb); 180 extern int bus_unregister_notifier(struct bus_type *bus, 181 struct notifier_block *nb); 182 183 /* All 4 notifers below get called with the target struct device * 184 * as an argument. Note that those functions are likely to be called 185 * with the device lock held in the core, so be careful. 186 */ 187 #define BUS_NOTIFY_ADD_DEVICE 0x00000001 /* device added */ 188 #define BUS_NOTIFY_DEL_DEVICE 0x00000002 /* device to be removed */ 189 #define BUS_NOTIFY_REMOVED_DEVICE 0x00000003 /* device removed */ 190 #define BUS_NOTIFY_BIND_DRIVER 0x00000004 /* driver about to be 191 bound */ 192 #define BUS_NOTIFY_BOUND_DRIVER 0x00000005 /* driver bound to device */ 193 #define BUS_NOTIFY_UNBIND_DRIVER 0x00000006 /* driver about to be 194 unbound */ 195 #define BUS_NOTIFY_UNBOUND_DRIVER 0x00000007 /* driver is unbound 196 from the device */ 197 #define BUS_NOTIFY_DRIVER_NOT_BOUND 0x00000008 /* driver fails to be bound */ 198 199 extern struct kset *bus_get_kset(struct bus_type *bus); 200 extern struct klist *bus_get_device_klist(struct bus_type *bus); 201 202 /** 203 * enum probe_type - device driver probe type to try 204 * Device drivers may opt in for special handling of their 205 * respective probe routines. This tells the core what to 206 * expect and prefer. 207 * 208 * @PROBE_DEFAULT_STRATEGY: Used by drivers that work equally well 209 * whether probed synchronously or asynchronously. 210 * @PROBE_PREFER_ASYNCHRONOUS: Drivers for "slow" devices which 211 * probing order is not essential for booting the system may 212 * opt into executing their probes asynchronously. 213 * @PROBE_FORCE_SYNCHRONOUS: Use this to annotate drivers that need 214 * their probe routines to run synchronously with driver and 215 * device registration (with the exception of -EPROBE_DEFER 216 * handling - re-probing always ends up being done asynchronously). 217 * 218 * Note that the end goal is to switch the kernel to use asynchronous 219 * probing by default, so annotating drivers with 220 * %PROBE_PREFER_ASYNCHRONOUS is a temporary measure that allows us 221 * to speed up boot process while we are validating the rest of the 222 * drivers. 223 */ 224 enum probe_type { 225 PROBE_DEFAULT_STRATEGY, 226 PROBE_PREFER_ASYNCHRONOUS, 227 PROBE_FORCE_SYNCHRONOUS, 228 }; 229 230 /** 231 * struct device_driver - The basic device driver structure 232 * @name: Name of the device driver. 233 * @bus: The bus which the device of this driver belongs to. 234 * @owner: The module owner. 235 * @mod_name: Used for built-in modules. 236 * @suppress_bind_attrs: Disables bind/unbind via sysfs. 237 * @probe_type: Type of the probe (synchronous or asynchronous) to use. 238 * @of_match_table: The open firmware table. 239 * @acpi_match_table: The ACPI match table. 240 * @probe: Called to query the existence of a specific device, 241 * whether this driver can work with it, and bind the driver 242 * to a specific device. 243 * @remove: Called when the device is removed from the system to 244 * unbind a device from this driver. 245 * @shutdown: Called at shut-down time to quiesce the device. 246 * @suspend: Called to put the device to sleep mode. Usually to a 247 * low power state. 248 * @resume: Called to bring a device from sleep mode. 249 * @groups: Default attributes that get created by the driver core 250 * automatically. 251 * @pm: Power management operations of the device which matched 252 * this driver. 253 * @p: Driver core's private data, no one other than the driver 254 * core can touch this. 255 * 256 * The device driver-model tracks all of the drivers known to the system. 257 * The main reason for this tracking is to enable the driver core to match 258 * up drivers with new devices. Once drivers are known objects within the 259 * system, however, a number of other things become possible. Device drivers 260 * can export information and configuration variables that are independent 261 * of any specific device. 262 */ 263 struct device_driver { 264 const char *name; 265 struct bus_type *bus; 266 267 struct module *owner; 268 const char *mod_name; /* used for built-in modules */ 269 270 bool suppress_bind_attrs; /* disables bind/unbind via sysfs */ 271 enum probe_type probe_type; 272 273 const struct of_device_id *of_match_table; 274 const struct acpi_device_id *acpi_match_table; 275 276 int (*probe) (struct device *dev); 277 int (*remove) (struct device *dev); 278 void (*shutdown) (struct device *dev); 279 int (*suspend) (struct device *dev, pm_message_t state); 280 int (*resume) (struct device *dev); 281 const struct attribute_group **groups; 282 283 const struct dev_pm_ops *pm; 284 285 struct driver_private *p; 286 }; 287 288 289 extern int __must_check driver_register(struct device_driver *drv); 290 extern void driver_unregister(struct device_driver *drv); 291 292 extern struct device_driver *driver_find(const char *name, 293 struct bus_type *bus); 294 extern int driver_probe_done(void); 295 extern void wait_for_device_probe(void); 296 297 298 /* sysfs interface for exporting driver attributes */ 299 300 struct driver_attribute { 301 struct attribute attr; 302 ssize_t (*show)(struct device_driver *driver, char *buf); 303 ssize_t (*store)(struct device_driver *driver, const char *buf, 304 size_t count); 305 }; 306 307 #define DRIVER_ATTR(_name, _mode, _show, _store) \ 308 struct driver_attribute driver_attr_##_name = __ATTR(_name, _mode, _show, _store) 309 #define DRIVER_ATTR_RW(_name) \ 310 struct driver_attribute driver_attr_##_name = __ATTR_RW(_name) 311 #define DRIVER_ATTR_RO(_name) \ 312 struct driver_attribute driver_attr_##_name = __ATTR_RO(_name) 313 #define DRIVER_ATTR_WO(_name) \ 314 struct driver_attribute driver_attr_##_name = __ATTR_WO(_name) 315 316 extern int __must_check driver_create_file(struct device_driver *driver, 317 const struct driver_attribute *attr); 318 extern void driver_remove_file(struct device_driver *driver, 319 const struct driver_attribute *attr); 320 321 extern int __must_check driver_for_each_device(struct device_driver *drv, 322 struct device *start, 323 void *data, 324 int (*fn)(struct device *dev, 325 void *)); 326 struct device *driver_find_device(struct device_driver *drv, 327 struct device *start, void *data, 328 int (*match)(struct device *dev, void *data)); 329 330 /** 331 * struct subsys_interface - interfaces to device functions 332 * @name: name of the device function 333 * @subsys: subsytem of the devices to attach to 334 * @node: the list of functions registered at the subsystem 335 * @add_dev: device hookup to device function handler 336 * @remove_dev: device hookup to device function handler 337 * 338 * Simple interfaces attached to a subsystem. Multiple interfaces can 339 * attach to a subsystem and its devices. Unlike drivers, they do not 340 * exclusively claim or control devices. Interfaces usually represent 341 * a specific functionality of a subsystem/class of devices. 342 */ 343 struct subsys_interface { 344 const char *name; 345 struct bus_type *subsys; 346 struct list_head node; 347 int (*add_dev)(struct device *dev, struct subsys_interface *sif); 348 void (*remove_dev)(struct device *dev, struct subsys_interface *sif); 349 }; 350 351 int subsys_interface_register(struct subsys_interface *sif); 352 void subsys_interface_unregister(struct subsys_interface *sif); 353 354 int subsys_system_register(struct bus_type *subsys, 355 const struct attribute_group **groups); 356 int subsys_virtual_register(struct bus_type *subsys, 357 const struct attribute_group **groups); 358 359 /** 360 * struct class - device classes 361 * @name: Name of the class. 362 * @owner: The module owner. 363 * @class_attrs: Default attributes of this class. 364 * @dev_groups: Default attributes of the devices that belong to the class. 365 * @dev_kobj: The kobject that represents this class and links it into the hierarchy. 366 * @dev_uevent: Called when a device is added, removed from this class, or a 367 * few other things that generate uevents to add the environment 368 * variables. 369 * @devnode: Callback to provide the devtmpfs. 370 * @class_release: Called to release this class. 371 * @dev_release: Called to release the device. 372 * @suspend: Used to put the device to sleep mode, usually to a low power 373 * state. 374 * @resume: Used to bring the device from the sleep mode. 375 * @ns_type: Callbacks so sysfs can detemine namespaces. 376 * @namespace: Namespace of the device belongs to this class. 377 * @pm: The default device power management operations of this class. 378 * @p: The private data of the driver core, no one other than the 379 * driver core can touch this. 380 * 381 * A class is a higher-level view of a device that abstracts out low-level 382 * implementation details. Drivers may see a SCSI disk or an ATA disk, but, 383 * at the class level, they are all simply disks. Classes allow user space 384 * to work with devices based on what they do, rather than how they are 385 * connected or how they work. 386 */ 387 struct class { 388 const char *name; 389 struct module *owner; 390 391 struct class_attribute *class_attrs; 392 const struct attribute_group **dev_groups; 393 struct kobject *dev_kobj; 394 395 int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env); 396 char *(*devnode)(struct device *dev, umode_t *mode); 397 398 void (*class_release)(struct class *class); 399 void (*dev_release)(struct device *dev); 400 401 int (*suspend)(struct device *dev, pm_message_t state); 402 int (*resume)(struct device *dev); 403 404 const struct kobj_ns_type_operations *ns_type; 405 const void *(*namespace)(struct device *dev); 406 407 const struct dev_pm_ops *pm; 408 409 struct subsys_private *p; 410 }; 411 412 struct class_dev_iter { 413 struct klist_iter ki; 414 const struct device_type *type; 415 }; 416 417 extern struct kobject *sysfs_dev_block_kobj; 418 extern struct kobject *sysfs_dev_char_kobj; 419 extern int __must_check __class_register(struct class *class, 420 struct lock_class_key *key); 421 extern void class_unregister(struct class *class); 422 423 /* This is a #define to keep the compiler from merging different 424 * instances of the __key variable */ 425 #define class_register(class) \ 426 ({ \ 427 static struct lock_class_key __key; \ 428 __class_register(class, &__key); \ 429 }) 430 431 struct class_compat; 432 struct class_compat *class_compat_register(const char *name); 433 void class_compat_unregister(struct class_compat *cls); 434 int class_compat_create_link(struct class_compat *cls, struct device *dev, 435 struct device *device_link); 436 void class_compat_remove_link(struct class_compat *cls, struct device *dev, 437 struct device *device_link); 438 439 extern void class_dev_iter_init(struct class_dev_iter *iter, 440 struct class *class, 441 struct device *start, 442 const struct device_type *type); 443 extern struct device *class_dev_iter_next(struct class_dev_iter *iter); 444 extern void class_dev_iter_exit(struct class_dev_iter *iter); 445 446 extern int class_for_each_device(struct class *class, struct device *start, 447 void *data, 448 int (*fn)(struct device *dev, void *data)); 449 extern struct device *class_find_device(struct class *class, 450 struct device *start, const void *data, 451 int (*match)(struct device *, const void *)); 452 453 struct class_attribute { 454 struct attribute attr; 455 ssize_t (*show)(struct class *class, struct class_attribute *attr, 456 char *buf); 457 ssize_t (*store)(struct class *class, struct class_attribute *attr, 458 const char *buf, size_t count); 459 }; 460 461 #define CLASS_ATTR(_name, _mode, _show, _store) \ 462 struct class_attribute class_attr_##_name = __ATTR(_name, _mode, _show, _store) 463 #define CLASS_ATTR_RW(_name) \ 464 struct class_attribute class_attr_##_name = __ATTR_RW(_name) 465 #define CLASS_ATTR_RO(_name) \ 466 struct class_attribute class_attr_##_name = __ATTR_RO(_name) 467 468 extern int __must_check class_create_file_ns(struct class *class, 469 const struct class_attribute *attr, 470 const void *ns); 471 extern void class_remove_file_ns(struct class *class, 472 const struct class_attribute *attr, 473 const void *ns); 474 475 static inline int __must_check class_create_file(struct class *class, 476 const struct class_attribute *attr) 477 { 478 return class_create_file_ns(class, attr, NULL); 479 } 480 481 static inline void class_remove_file(struct class *class, 482 const struct class_attribute *attr) 483 { 484 return class_remove_file_ns(class, attr, NULL); 485 } 486 487 /* Simple class attribute that is just a static string */ 488 struct class_attribute_string { 489 struct class_attribute attr; 490 char *str; 491 }; 492 493 /* Currently read-only only */ 494 #define _CLASS_ATTR_STRING(_name, _mode, _str) \ 495 { __ATTR(_name, _mode, show_class_attr_string, NULL), _str } 496 #define CLASS_ATTR_STRING(_name, _mode, _str) \ 497 struct class_attribute_string class_attr_##_name = \ 498 _CLASS_ATTR_STRING(_name, _mode, _str) 499 500 extern ssize_t show_class_attr_string(struct class *class, struct class_attribute *attr, 501 char *buf); 502 503 struct class_interface { 504 struct list_head node; 505 struct class *class; 506 507 int (*add_dev) (struct device *, struct class_interface *); 508 void (*remove_dev) (struct device *, struct class_interface *); 509 }; 510 511 extern int __must_check class_interface_register(struct class_interface *); 512 extern void class_interface_unregister(struct class_interface *); 513 514 extern struct class * __must_check __class_create(struct module *owner, 515 const char *name, 516 struct lock_class_key *key); 517 extern void class_destroy(struct class *cls); 518 519 /* This is a #define to keep the compiler from merging different 520 * instances of the __key variable */ 521 #define class_create(owner, name) \ 522 ({ \ 523 static struct lock_class_key __key; \ 524 __class_create(owner, name, &__key); \ 525 }) 526 527 /* 528 * The type of device, "struct device" is embedded in. A class 529 * or bus can contain devices of different types 530 * like "partitions" and "disks", "mouse" and "event". 531 * This identifies the device type and carries type-specific 532 * information, equivalent to the kobj_type of a kobject. 533 * If "name" is specified, the uevent will contain it in 534 * the DEVTYPE variable. 535 */ 536 struct device_type { 537 const char *name; 538 const struct attribute_group **groups; 539 int (*uevent)(struct device *dev, struct kobj_uevent_env *env); 540 char *(*devnode)(struct device *dev, umode_t *mode, 541 kuid_t *uid, kgid_t *gid); 542 void (*release)(struct device *dev); 543 544 const struct dev_pm_ops *pm; 545 }; 546 547 /* interface for exporting device attributes */ 548 struct device_attribute { 549 struct attribute attr; 550 ssize_t (*show)(struct device *dev, struct device_attribute *attr, 551 char *buf); 552 ssize_t (*store)(struct device *dev, struct device_attribute *attr, 553 const char *buf, size_t count); 554 }; 555 556 struct dev_ext_attribute { 557 struct device_attribute attr; 558 void *var; 559 }; 560 561 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr, 562 char *buf); 563 ssize_t device_store_ulong(struct device *dev, struct device_attribute *attr, 564 const char *buf, size_t count); 565 ssize_t device_show_int(struct device *dev, struct device_attribute *attr, 566 char *buf); 567 ssize_t device_store_int(struct device *dev, struct device_attribute *attr, 568 const char *buf, size_t count); 569 ssize_t device_show_bool(struct device *dev, struct device_attribute *attr, 570 char *buf); 571 ssize_t device_store_bool(struct device *dev, struct device_attribute *attr, 572 const char *buf, size_t count); 573 574 #define DEVICE_ATTR(_name, _mode, _show, _store) \ 575 struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store) 576 #define DEVICE_ATTR_RW(_name) \ 577 struct device_attribute dev_attr_##_name = __ATTR_RW(_name) 578 #define DEVICE_ATTR_RO(_name) \ 579 struct device_attribute dev_attr_##_name = __ATTR_RO(_name) 580 #define DEVICE_ATTR_WO(_name) \ 581 struct device_attribute dev_attr_##_name = __ATTR_WO(_name) 582 #define DEVICE_ULONG_ATTR(_name, _mode, _var) \ 583 struct dev_ext_attribute dev_attr_##_name = \ 584 { __ATTR(_name, _mode, device_show_ulong, device_store_ulong), &(_var) } 585 #define DEVICE_INT_ATTR(_name, _mode, _var) \ 586 struct dev_ext_attribute dev_attr_##_name = \ 587 { __ATTR(_name, _mode, device_show_int, device_store_int), &(_var) } 588 #define DEVICE_BOOL_ATTR(_name, _mode, _var) \ 589 struct dev_ext_attribute dev_attr_##_name = \ 590 { __ATTR(_name, _mode, device_show_bool, device_store_bool), &(_var) } 591 #define DEVICE_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \ 592 struct device_attribute dev_attr_##_name = \ 593 __ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) 594 595 extern int device_create_file(struct device *device, 596 const struct device_attribute *entry); 597 extern void device_remove_file(struct device *dev, 598 const struct device_attribute *attr); 599 extern bool device_remove_file_self(struct device *dev, 600 const struct device_attribute *attr); 601 extern int __must_check device_create_bin_file(struct device *dev, 602 const struct bin_attribute *attr); 603 extern void device_remove_bin_file(struct device *dev, 604 const struct bin_attribute *attr); 605 606 /* device resource management */ 607 typedef void (*dr_release_t)(struct device *dev, void *res); 608 typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data); 609 610 #ifdef CONFIG_DEBUG_DEVRES 611 extern void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, 612 int nid, const char *name) __malloc; 613 #define devres_alloc(release, size, gfp) \ 614 __devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release) 615 #define devres_alloc_node(release, size, gfp, nid) \ 616 __devres_alloc_node(release, size, gfp, nid, #release) 617 #else 618 extern void *devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, 619 int nid) __malloc; 620 static inline void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp) 621 { 622 return devres_alloc_node(release, size, gfp, NUMA_NO_NODE); 623 } 624 #endif 625 626 extern void devres_for_each_res(struct device *dev, dr_release_t release, 627 dr_match_t match, void *match_data, 628 void (*fn)(struct device *, void *, void *), 629 void *data); 630 extern void devres_free(void *res); 631 extern void devres_add(struct device *dev, void *res); 632 extern void *devres_find(struct device *dev, dr_release_t release, 633 dr_match_t match, void *match_data); 634 extern void *devres_get(struct device *dev, void *new_res, 635 dr_match_t match, void *match_data); 636 extern void *devres_remove(struct device *dev, dr_release_t release, 637 dr_match_t match, void *match_data); 638 extern int devres_destroy(struct device *dev, dr_release_t release, 639 dr_match_t match, void *match_data); 640 extern int devres_release(struct device *dev, dr_release_t release, 641 dr_match_t match, void *match_data); 642 643 /* devres group */ 644 extern void * __must_check devres_open_group(struct device *dev, void *id, 645 gfp_t gfp); 646 extern void devres_close_group(struct device *dev, void *id); 647 extern void devres_remove_group(struct device *dev, void *id); 648 extern int devres_release_group(struct device *dev, void *id); 649 650 /* managed devm_k.alloc/kfree for device drivers */ 651 extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) __malloc; 652 extern __printf(3, 0) 653 char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, 654 va_list ap) __malloc; 655 extern __printf(3, 4) 656 char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) __malloc; 657 static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp) 658 { 659 return devm_kmalloc(dev, size, gfp | __GFP_ZERO); 660 } 661 static inline void *devm_kmalloc_array(struct device *dev, 662 size_t n, size_t size, gfp_t flags) 663 { 664 if (size != 0 && n > SIZE_MAX / size) 665 return NULL; 666 return devm_kmalloc(dev, n * size, flags); 667 } 668 static inline void *devm_kcalloc(struct device *dev, 669 size_t n, size_t size, gfp_t flags) 670 { 671 return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO); 672 } 673 extern void devm_kfree(struct device *dev, void *p); 674 extern char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __malloc; 675 extern void *devm_kmemdup(struct device *dev, const void *src, size_t len, 676 gfp_t gfp); 677 678 extern unsigned long devm_get_free_pages(struct device *dev, 679 gfp_t gfp_mask, unsigned int order); 680 extern void devm_free_pages(struct device *dev, unsigned long addr); 681 682 void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res); 683 684 /* allows to add/remove a custom action to devres stack */ 685 int devm_add_action(struct device *dev, void (*action)(void *), void *data); 686 void devm_remove_action(struct device *dev, void (*action)(void *), void *data); 687 688 static inline int devm_add_action_or_reset(struct device *dev, 689 void (*action)(void *), void *data) 690 { 691 int ret; 692 693 ret = devm_add_action(dev, action, data); 694 if (ret) 695 action(data); 696 697 return ret; 698 } 699 700 struct device_dma_parameters { 701 /* 702 * a low level driver may set these to teach IOMMU code about 703 * sg limitations. 704 */ 705 unsigned int max_segment_size; 706 unsigned long segment_boundary_mask; 707 }; 708 709 /** 710 * struct device - The basic device structure 711 * @parent: The device's "parent" device, the device to which it is attached. 712 * In most cases, a parent device is some sort of bus or host 713 * controller. If parent is NULL, the device, is a top-level device, 714 * which is not usually what you want. 715 * @p: Holds the private data of the driver core portions of the device. 716 * See the comment of the struct device_private for detail. 717 * @kobj: A top-level, abstract class from which other classes are derived. 718 * @init_name: Initial name of the device. 719 * @type: The type of device. 720 * This identifies the device type and carries type-specific 721 * information. 722 * @mutex: Mutex to synchronize calls to its driver. 723 * @bus: Type of bus device is on. 724 * @driver: Which driver has allocated this 725 * @platform_data: Platform data specific to the device. 726 * Example: For devices on custom boards, as typical of embedded 727 * and SOC based hardware, Linux often uses platform_data to point 728 * to board-specific structures describing devices and how they 729 * are wired. That can include what ports are available, chip 730 * variants, which GPIO pins act in what additional roles, and so 731 * on. This shrinks the "Board Support Packages" (BSPs) and 732 * minimizes board-specific #ifdefs in drivers. 733 * @driver_data: Private pointer for driver specific info. 734 * @power: For device power management. 735 * See Documentation/power/devices.txt for details. 736 * @pm_domain: Provide callbacks that are executed during system suspend, 737 * hibernation, system resume and during runtime PM transitions 738 * along with subsystem-level and driver-level callbacks. 739 * @pins: For device pin management. 740 * See Documentation/pinctrl.txt for details. 741 * @msi_list: Hosts MSI descriptors 742 * @msi_domain: The generic MSI domain this device is using. 743 * @numa_node: NUMA node this device is close to. 744 * @dma_mask: Dma mask (if dma'ble device). 745 * @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all 746 * hardware supports 64-bit addresses for consistent allocations 747 * such descriptors. 748 * @dma_pfn_offset: offset of DMA memory range relatively of RAM 749 * @dma_parms: A low level driver may set these to teach IOMMU code about 750 * segment limitations. 751 * @dma_pools: Dma pools (if dma'ble device). 752 * @dma_mem: Internal for coherent mem override. 753 * @cma_area: Contiguous memory area for dma allocations 754 * @archdata: For arch-specific additions. 755 * @of_node: Associated device tree node. 756 * @fwnode: Associated device node supplied by platform firmware. 757 * @devt: For creating the sysfs "dev". 758 * @id: device instance 759 * @devres_lock: Spinlock to protect the resource of the device. 760 * @devres_head: The resources list of the device. 761 * @knode_class: The node used to add the device to the class list. 762 * @class: The class of the device. 763 * @groups: Optional attribute groups. 764 * @release: Callback to free the device after all references have 765 * gone away. This should be set by the allocator of the 766 * device (i.e. the bus driver that discovered the device). 767 * @iommu_group: IOMMU group the device belongs to. 768 * 769 * @offline_disabled: If set, the device is permanently online. 770 * @offline: Set after successful invocation of bus type's .offline(). 771 * 772 * At the lowest level, every device in a Linux system is represented by an 773 * instance of struct device. The device structure contains the information 774 * that the device model core needs to model the system. Most subsystems, 775 * however, track additional information about the devices they host. As a 776 * result, it is rare for devices to be represented by bare device structures; 777 * instead, that structure, like kobject structures, is usually embedded within 778 * a higher-level representation of the device. 779 */ 780 struct device { 781 struct device *parent; 782 783 struct device_private *p; 784 785 struct kobject kobj; 786 const char *init_name; /* initial name of the device */ 787 const struct device_type *type; 788 789 struct mutex mutex; /* mutex to synchronize calls to 790 * its driver. 791 */ 792 793 struct bus_type *bus; /* type of bus device is on */ 794 struct device_driver *driver; /* which driver has allocated this 795 device */ 796 void *platform_data; /* Platform specific data, device 797 core doesn't touch it */ 798 void *driver_data; /* Driver data, set and get with 799 dev_set/get_drvdata */ 800 struct dev_pm_info power; 801 struct dev_pm_domain *pm_domain; 802 803 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN 804 struct irq_domain *msi_domain; 805 #endif 806 #ifdef CONFIG_PINCTRL 807 struct dev_pin_info *pins; 808 #endif 809 #ifdef CONFIG_GENERIC_MSI_IRQ 810 struct list_head msi_list; 811 #endif 812 813 #ifdef CONFIG_NUMA 814 int numa_node; /* NUMA node this device is close to */ 815 #endif 816 u64 *dma_mask; /* dma mask (if dma'able device) */ 817 u64 coherent_dma_mask;/* Like dma_mask, but for 818 alloc_coherent mappings as 819 not all hardware supports 820 64 bit addresses for consistent 821 allocations such descriptors. */ 822 unsigned long dma_pfn_offset; 823 824 struct device_dma_parameters *dma_parms; 825 826 struct list_head dma_pools; /* dma pools (if dma'ble) */ 827 828 struct dma_coherent_mem *dma_mem; /* internal for coherent mem 829 override */ 830 #ifdef CONFIG_DMA_CMA 831 struct cma *cma_area; /* contiguous memory area for dma 832 allocations */ 833 #endif 834 /* arch specific additions */ 835 struct dev_archdata archdata; 836 837 struct device_node *of_node; /* associated device tree node */ 838 struct fwnode_handle *fwnode; /* firmware device node */ 839 840 dev_t devt; /* dev_t, creates the sysfs "dev" */ 841 u32 id; /* device instance */ 842 843 spinlock_t devres_lock; 844 struct list_head devres_head; 845 846 struct klist_node knode_class; 847 struct class *class; 848 const struct attribute_group **groups; /* optional groups */ 849 850 void (*release)(struct device *dev); 851 struct iommu_group *iommu_group; 852 853 bool offline_disabled:1; 854 bool offline:1; 855 }; 856 857 static inline struct device *kobj_to_dev(struct kobject *kobj) 858 { 859 return container_of(kobj, struct device, kobj); 860 } 861 862 /* Get the wakeup routines, which depend on struct device */ 863 #include <linux/pm_wakeup.h> 864 865 static inline const char *dev_name(const struct device *dev) 866 { 867 /* Use the init name until the kobject becomes available */ 868 if (dev->init_name) 869 return dev->init_name; 870 871 return kobject_name(&dev->kobj); 872 } 873 874 extern __printf(2, 3) 875 int dev_set_name(struct device *dev, const char *name, ...); 876 877 #ifdef CONFIG_NUMA 878 static inline int dev_to_node(struct device *dev) 879 { 880 return dev->numa_node; 881 } 882 static inline void set_dev_node(struct device *dev, int node) 883 { 884 dev->numa_node = node; 885 } 886 #else 887 static inline int dev_to_node(struct device *dev) 888 { 889 return -1; 890 } 891 static inline void set_dev_node(struct device *dev, int node) 892 { 893 } 894 #endif 895 896 static inline struct irq_domain *dev_get_msi_domain(const struct device *dev) 897 { 898 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN 899 return dev->msi_domain; 900 #else 901 return NULL; 902 #endif 903 } 904 905 static inline void dev_set_msi_domain(struct device *dev, struct irq_domain *d) 906 { 907 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN 908 dev->msi_domain = d; 909 #endif 910 } 911 912 static inline void *dev_get_drvdata(const struct device *dev) 913 { 914 return dev->driver_data; 915 } 916 917 static inline void dev_set_drvdata(struct device *dev, void *data) 918 { 919 dev->driver_data = data; 920 } 921 922 static inline struct pm_subsys_data *dev_to_psd(struct device *dev) 923 { 924 return dev ? dev->power.subsys_data : NULL; 925 } 926 927 static inline unsigned int dev_get_uevent_suppress(const struct device *dev) 928 { 929 return dev->kobj.uevent_suppress; 930 } 931 932 static inline void dev_set_uevent_suppress(struct device *dev, int val) 933 { 934 dev->kobj.uevent_suppress = val; 935 } 936 937 static inline int device_is_registered(struct device *dev) 938 { 939 return dev->kobj.state_in_sysfs; 940 } 941 942 static inline void device_enable_async_suspend(struct device *dev) 943 { 944 if (!dev->power.is_prepared) 945 dev->power.async_suspend = true; 946 } 947 948 static inline void device_disable_async_suspend(struct device *dev) 949 { 950 if (!dev->power.is_prepared) 951 dev->power.async_suspend = false; 952 } 953 954 static inline bool device_async_suspend_enabled(struct device *dev) 955 { 956 return !!dev->power.async_suspend; 957 } 958 959 static inline void dev_pm_syscore_device(struct device *dev, bool val) 960 { 961 #ifdef CONFIG_PM_SLEEP 962 dev->power.syscore = val; 963 #endif 964 } 965 966 static inline void device_lock(struct device *dev) 967 { 968 mutex_lock(&dev->mutex); 969 } 970 971 static inline int device_lock_interruptible(struct device *dev) 972 { 973 return mutex_lock_interruptible(&dev->mutex); 974 } 975 976 static inline int device_trylock(struct device *dev) 977 { 978 return mutex_trylock(&dev->mutex); 979 } 980 981 static inline void device_unlock(struct device *dev) 982 { 983 mutex_unlock(&dev->mutex); 984 } 985 986 static inline void device_lock_assert(struct device *dev) 987 { 988 lockdep_assert_held(&dev->mutex); 989 } 990 991 static inline struct device_node *dev_of_node(struct device *dev) 992 { 993 if (!IS_ENABLED(CONFIG_OF)) 994 return NULL; 995 return dev->of_node; 996 } 997 998 void driver_init(void); 999 1000 /* 1001 * High level routines for use by the bus drivers 1002 */ 1003 extern int __must_check device_register(struct device *dev); 1004 extern void device_unregister(struct device *dev); 1005 extern void device_initialize(struct device *dev); 1006 extern int __must_check device_add(struct device *dev); 1007 extern void device_del(struct device *dev); 1008 extern int device_for_each_child(struct device *dev, void *data, 1009 int (*fn)(struct device *dev, void *data)); 1010 extern int device_for_each_child_reverse(struct device *dev, void *data, 1011 int (*fn)(struct device *dev, void *data)); 1012 extern struct device *device_find_child(struct device *dev, void *data, 1013 int (*match)(struct device *dev, void *data)); 1014 extern int device_rename(struct device *dev, const char *new_name); 1015 extern int device_move(struct device *dev, struct device *new_parent, 1016 enum dpm_order dpm_order); 1017 extern const char *device_get_devnode(struct device *dev, 1018 umode_t *mode, kuid_t *uid, kgid_t *gid, 1019 const char **tmp); 1020 1021 static inline bool device_supports_offline(struct device *dev) 1022 { 1023 return dev->bus && dev->bus->offline && dev->bus->online; 1024 } 1025 1026 extern void lock_device_hotplug(void); 1027 extern void unlock_device_hotplug(void); 1028 extern int lock_device_hotplug_sysfs(void); 1029 extern int device_offline(struct device *dev); 1030 extern int device_online(struct device *dev); 1031 extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode); 1032 extern void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode); 1033 1034 /* 1035 * Root device objects for grouping under /sys/devices 1036 */ 1037 extern struct device *__root_device_register(const char *name, 1038 struct module *owner); 1039 1040 /* This is a macro to avoid include problems with THIS_MODULE */ 1041 #define root_device_register(name) \ 1042 __root_device_register(name, THIS_MODULE) 1043 1044 extern void root_device_unregister(struct device *root); 1045 1046 static inline void *dev_get_platdata(const struct device *dev) 1047 { 1048 return dev->platform_data; 1049 } 1050 1051 /* 1052 * Manual binding of a device to driver. See drivers/base/bus.c 1053 * for information on use. 1054 */ 1055 extern int __must_check device_bind_driver(struct device *dev); 1056 extern void device_release_driver(struct device *dev); 1057 extern int __must_check device_attach(struct device *dev); 1058 extern int __must_check driver_attach(struct device_driver *drv); 1059 extern void device_initial_probe(struct device *dev); 1060 extern int __must_check device_reprobe(struct device *dev); 1061 1062 extern bool device_is_bound(struct device *dev); 1063 1064 /* 1065 * Easy functions for dynamically creating devices on the fly 1066 */ 1067 extern __printf(5, 0) 1068 struct device *device_create_vargs(struct class *cls, struct device *parent, 1069 dev_t devt, void *drvdata, 1070 const char *fmt, va_list vargs); 1071 extern __printf(5, 6) 1072 struct device *device_create(struct class *cls, struct device *parent, 1073 dev_t devt, void *drvdata, 1074 const char *fmt, ...); 1075 extern __printf(6, 7) 1076 struct device *device_create_with_groups(struct class *cls, 1077 struct device *parent, dev_t devt, void *drvdata, 1078 const struct attribute_group **groups, 1079 const char *fmt, ...); 1080 extern void device_destroy(struct class *cls, dev_t devt); 1081 1082 /* 1083 * Platform "fixup" functions - allow the platform to have their say 1084 * about devices and actions that the general device layer doesn't 1085 * know about. 1086 */ 1087 /* Notify platform of device discovery */ 1088 extern int (*platform_notify)(struct device *dev); 1089 1090 extern int (*platform_notify_remove)(struct device *dev); 1091 1092 1093 /* 1094 * get_device - atomically increment the reference count for the device. 1095 * 1096 */ 1097 extern struct device *get_device(struct device *dev); 1098 extern void put_device(struct device *dev); 1099 1100 #ifdef CONFIG_DEVTMPFS 1101 extern int devtmpfs_create_node(struct device *dev); 1102 extern int devtmpfs_delete_node(struct device *dev); 1103 extern int devtmpfs_mount(const char *mntdir); 1104 #else 1105 static inline int devtmpfs_create_node(struct device *dev) { return 0; } 1106 static inline int devtmpfs_delete_node(struct device *dev) { return 0; } 1107 static inline int devtmpfs_mount(const char *mountpoint) { return 0; } 1108 #endif 1109 1110 /* drivers/base/power/shutdown.c */ 1111 extern void device_shutdown(void); 1112 1113 /* debugging and troubleshooting/diagnostic helpers. */ 1114 extern const char *dev_driver_string(const struct device *dev); 1115 1116 1117 #ifdef CONFIG_PRINTK 1118 1119 extern __printf(3, 0) 1120 int dev_vprintk_emit(int level, const struct device *dev, 1121 const char *fmt, va_list args); 1122 extern __printf(3, 4) 1123 int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...); 1124 1125 extern __printf(3, 4) 1126 void dev_printk(const char *level, const struct device *dev, 1127 const char *fmt, ...); 1128 extern __printf(2, 3) 1129 void dev_emerg(const struct device *dev, const char *fmt, ...); 1130 extern __printf(2, 3) 1131 void dev_alert(const struct device *dev, const char *fmt, ...); 1132 extern __printf(2, 3) 1133 void dev_crit(const struct device *dev, const char *fmt, ...); 1134 extern __printf(2, 3) 1135 void dev_err(const struct device *dev, const char *fmt, ...); 1136 extern __printf(2, 3) 1137 void dev_warn(const struct device *dev, const char *fmt, ...); 1138 extern __printf(2, 3) 1139 void dev_notice(const struct device *dev, const char *fmt, ...); 1140 extern __printf(2, 3) 1141 void _dev_info(const struct device *dev, const char *fmt, ...); 1142 1143 #else 1144 1145 static inline __printf(3, 0) 1146 int dev_vprintk_emit(int level, const struct device *dev, 1147 const char *fmt, va_list args) 1148 { return 0; } 1149 static inline __printf(3, 4) 1150 int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...) 1151 { return 0; } 1152 1153 static inline void __dev_printk(const char *level, const struct device *dev, 1154 struct va_format *vaf) 1155 {} 1156 static inline __printf(3, 4) 1157 void dev_printk(const char *level, const struct device *dev, 1158 const char *fmt, ...) 1159 {} 1160 1161 static inline __printf(2, 3) 1162 void dev_emerg(const struct device *dev, const char *fmt, ...) 1163 {} 1164 static inline __printf(2, 3) 1165 void dev_crit(const struct device *dev, const char *fmt, ...) 1166 {} 1167 static inline __printf(2, 3) 1168 void dev_alert(const struct device *dev, const char *fmt, ...) 1169 {} 1170 static inline __printf(2, 3) 1171 void dev_err(const struct device *dev, const char *fmt, ...) 1172 {} 1173 static inline __printf(2, 3) 1174 void dev_warn(const struct device *dev, const char *fmt, ...) 1175 {} 1176 static inline __printf(2, 3) 1177 void dev_notice(const struct device *dev, const char *fmt, ...) 1178 {} 1179 static inline __printf(2, 3) 1180 void _dev_info(const struct device *dev, const char *fmt, ...) 1181 {} 1182 1183 #endif 1184 1185 /* 1186 * Stupid hackaround for existing uses of non-printk uses dev_info 1187 * 1188 * Note that the definition of dev_info below is actually _dev_info 1189 * and a macro is used to avoid redefining dev_info 1190 */ 1191 1192 #define dev_info(dev, fmt, arg...) _dev_info(dev, fmt, ##arg) 1193 1194 #if defined(CONFIG_DYNAMIC_DEBUG) 1195 #define dev_dbg(dev, format, ...) \ 1196 do { \ 1197 dynamic_dev_dbg(dev, format, ##__VA_ARGS__); \ 1198 } while (0) 1199 #elif defined(DEBUG) 1200 #define dev_dbg(dev, format, arg...) \ 1201 dev_printk(KERN_DEBUG, dev, format, ##arg) 1202 #else 1203 #define dev_dbg(dev, format, arg...) \ 1204 ({ \ 1205 if (0) \ 1206 dev_printk(KERN_DEBUG, dev, format, ##arg); \ 1207 }) 1208 #endif 1209 1210 #ifdef CONFIG_PRINTK 1211 #define dev_level_once(dev_level, dev, fmt, ...) \ 1212 do { \ 1213 static bool __print_once __read_mostly; \ 1214 \ 1215 if (!__print_once) { \ 1216 __print_once = true; \ 1217 dev_level(dev, fmt, ##__VA_ARGS__); \ 1218 } \ 1219 } while (0) 1220 #else 1221 #define dev_level_once(dev_level, dev, fmt, ...) \ 1222 do { \ 1223 if (0) \ 1224 dev_level(dev, fmt, ##__VA_ARGS__); \ 1225 } while (0) 1226 #endif 1227 1228 #define dev_emerg_once(dev, fmt, ...) \ 1229 dev_level_once(dev_emerg, dev, fmt, ##__VA_ARGS__) 1230 #define dev_alert_once(dev, fmt, ...) \ 1231 dev_level_once(dev_alert, dev, fmt, ##__VA_ARGS__) 1232 #define dev_crit_once(dev, fmt, ...) \ 1233 dev_level_once(dev_crit, dev, fmt, ##__VA_ARGS__) 1234 #define dev_err_once(dev, fmt, ...) \ 1235 dev_level_once(dev_err, dev, fmt, ##__VA_ARGS__) 1236 #define dev_warn_once(dev, fmt, ...) \ 1237 dev_level_once(dev_warn, dev, fmt, ##__VA_ARGS__) 1238 #define dev_notice_once(dev, fmt, ...) \ 1239 dev_level_once(dev_notice, dev, fmt, ##__VA_ARGS__) 1240 #define dev_info_once(dev, fmt, ...) \ 1241 dev_level_once(dev_info, dev, fmt, ##__VA_ARGS__) 1242 #define dev_dbg_once(dev, fmt, ...) \ 1243 dev_level_once(dev_dbg, dev, fmt, ##__VA_ARGS__) 1244 1245 #define dev_level_ratelimited(dev_level, dev, fmt, ...) \ 1246 do { \ 1247 static DEFINE_RATELIMIT_STATE(_rs, \ 1248 DEFAULT_RATELIMIT_INTERVAL, \ 1249 DEFAULT_RATELIMIT_BURST); \ 1250 if (__ratelimit(&_rs)) \ 1251 dev_level(dev, fmt, ##__VA_ARGS__); \ 1252 } while (0) 1253 1254 #define dev_emerg_ratelimited(dev, fmt, ...) \ 1255 dev_level_ratelimited(dev_emerg, dev, fmt, ##__VA_ARGS__) 1256 #define dev_alert_ratelimited(dev, fmt, ...) \ 1257 dev_level_ratelimited(dev_alert, dev, fmt, ##__VA_ARGS__) 1258 #define dev_crit_ratelimited(dev, fmt, ...) \ 1259 dev_level_ratelimited(dev_crit, dev, fmt, ##__VA_ARGS__) 1260 #define dev_err_ratelimited(dev, fmt, ...) \ 1261 dev_level_ratelimited(dev_err, dev, fmt, ##__VA_ARGS__) 1262 #define dev_warn_ratelimited(dev, fmt, ...) \ 1263 dev_level_ratelimited(dev_warn, dev, fmt, ##__VA_ARGS__) 1264 #define dev_notice_ratelimited(dev, fmt, ...) \ 1265 dev_level_ratelimited(dev_notice, dev, fmt, ##__VA_ARGS__) 1266 #define dev_info_ratelimited(dev, fmt, ...) \ 1267 dev_level_ratelimited(dev_info, dev, fmt, ##__VA_ARGS__) 1268 #if defined(CONFIG_DYNAMIC_DEBUG) 1269 /* descriptor check is first to prevent flooding with "callbacks suppressed" */ 1270 #define dev_dbg_ratelimited(dev, fmt, ...) \ 1271 do { \ 1272 static DEFINE_RATELIMIT_STATE(_rs, \ 1273 DEFAULT_RATELIMIT_INTERVAL, \ 1274 DEFAULT_RATELIMIT_BURST); \ 1275 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ 1276 if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \ 1277 __ratelimit(&_rs)) \ 1278 __dynamic_dev_dbg(&descriptor, dev, fmt, \ 1279 ##__VA_ARGS__); \ 1280 } while (0) 1281 #elif defined(DEBUG) 1282 #define dev_dbg_ratelimited(dev, fmt, ...) \ 1283 do { \ 1284 static DEFINE_RATELIMIT_STATE(_rs, \ 1285 DEFAULT_RATELIMIT_INTERVAL, \ 1286 DEFAULT_RATELIMIT_BURST); \ 1287 if (__ratelimit(&_rs)) \ 1288 dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \ 1289 } while (0) 1290 #else 1291 #define dev_dbg_ratelimited(dev, fmt, ...) \ 1292 do { \ 1293 if (0) \ 1294 dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \ 1295 } while (0) 1296 #endif 1297 1298 #ifdef VERBOSE_DEBUG 1299 #define dev_vdbg dev_dbg 1300 #else 1301 #define dev_vdbg(dev, format, arg...) \ 1302 ({ \ 1303 if (0) \ 1304 dev_printk(KERN_DEBUG, dev, format, ##arg); \ 1305 }) 1306 #endif 1307 1308 /* 1309 * dev_WARN*() acts like dev_printk(), but with the key difference of 1310 * using WARN/WARN_ONCE to include file/line information and a backtrace. 1311 */ 1312 #define dev_WARN(dev, format, arg...) \ 1313 WARN(1, "%s %s: " format, dev_driver_string(dev), dev_name(dev), ## arg); 1314 1315 #define dev_WARN_ONCE(dev, condition, format, arg...) \ 1316 WARN_ONCE(condition, "%s %s: " format, \ 1317 dev_driver_string(dev), dev_name(dev), ## arg) 1318 1319 /* Create alias, so I can be autoloaded. */ 1320 #define MODULE_ALIAS_CHARDEV(major,minor) \ 1321 MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor)) 1322 #define MODULE_ALIAS_CHARDEV_MAJOR(major) \ 1323 MODULE_ALIAS("char-major-" __stringify(major) "-*") 1324 1325 #ifdef CONFIG_SYSFS_DEPRECATED 1326 extern long sysfs_deprecated; 1327 #else 1328 #define sysfs_deprecated 0 1329 #endif 1330 1331 /** 1332 * module_driver() - Helper macro for drivers that don't do anything 1333 * special in module init/exit. This eliminates a lot of boilerplate. 1334 * Each module may only use this macro once, and calling it replaces 1335 * module_init() and module_exit(). 1336 * 1337 * @__driver: driver name 1338 * @__register: register function for this driver type 1339 * @__unregister: unregister function for this driver type 1340 * @...: Additional arguments to be passed to __register and __unregister. 1341 * 1342 * Use this macro to construct bus specific macros for registering 1343 * drivers, and do not use it on its own. 1344 */ 1345 #define module_driver(__driver, __register, __unregister, ...) \ 1346 static int __init __driver##_init(void) \ 1347 { \ 1348 return __register(&(__driver) , ##__VA_ARGS__); \ 1349 } \ 1350 module_init(__driver##_init); \ 1351 static void __exit __driver##_exit(void) \ 1352 { \ 1353 __unregister(&(__driver) , ##__VA_ARGS__); \ 1354 } \ 1355 module_exit(__driver##_exit); 1356 1357 /** 1358 * builtin_driver() - Helper macro for drivers that don't do anything 1359 * special in init and have no exit. This eliminates some boilerplate. 1360 * Each driver may only use this macro once, and calling it replaces 1361 * device_initcall (or in some cases, the legacy __initcall). This is 1362 * meant to be a direct parallel of module_driver() above but without 1363 * the __exit stuff that is not used for builtin cases. 1364 * 1365 * @__driver: driver name 1366 * @__register: register function for this driver type 1367 * @...: Additional arguments to be passed to __register 1368 * 1369 * Use this macro to construct bus specific macros for registering 1370 * drivers, and do not use it on its own. 1371 */ 1372 #define builtin_driver(__driver, __register, ...) \ 1373 static int __init __driver##_init(void) \ 1374 { \ 1375 return __register(&(__driver) , ##__VA_ARGS__); \ 1376 } \ 1377 device_initcall(__driver##_init); 1378 1379 #endif /* _DEVICE_H_ */
1 /* ------------------------------------------------------------------------- */ 2 /* */ 3 /* i2c.h - definitions for the i2c-bus interface */ 4 /* */ 5 /* ------------------------------------------------------------------------- */ 6 /* Copyright (C) 1995-2000 Simon G. Vogl 7 8 This program is free software; you can redistribute it and/or modify 9 it under the terms of the GNU General Public License as published by 10 the Free Software Foundation; either version 2 of the License, or 11 (at your option) any later version. 12 13 This program is distributed in the hope that it will be useful, 14 but WITHOUT ANY WARRANTY; without even the implied warranty of 15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 GNU General Public License for more details. 17 18 You should have received a copy of the GNU General Public License 19 along with this program; if not, write to the Free Software 20 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, 21 MA 02110-1301 USA. */ 22 /* ------------------------------------------------------------------------- */ 23 24 /* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and 25 Frodo Looijaard <frodol@dds.nl> */ 26 #ifndef _LINUX_I2C_H 27 #define _LINUX_I2C_H 28 29 #include <linux/mod_devicetable.h> 30 #include <linux/device.h> /* for struct device */ 31 #include <linux/sched.h> /* for completion */ 32 #include <linux/mutex.h> 33 #include <linux/of.h> /* for struct device_node */ 34 #include <linux/swab.h> /* for swab16 */ 35 #include <uapi/linux/i2c.h> 36 37 extern struct bus_type i2c_bus_type; 38 extern struct device_type i2c_adapter_type; 39 40 /* --- General options ------------------------------------------------ */ 41 42 struct i2c_msg; 43 struct i2c_algorithm; 44 struct i2c_adapter; 45 struct i2c_client; 46 struct i2c_driver; 47 union i2c_smbus_data; 48 struct i2c_board_info; 49 enum i2c_slave_event; 50 typedef int (*i2c_slave_cb_t)(struct i2c_client *, enum i2c_slave_event, u8 *); 51 52 struct module; 53 54 #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) 55 /* 56 * The master routines are the ones normally used to transmit data to devices 57 * on a bus (or read from them). Apart from two basic transfer functions to 58 * transmit one message at a time, a more complex version can be used to 59 * transmit an arbitrary number of messages without interruption. 60 * @count must be be less than 64k since msg.len is u16. 61 */ 62 extern int i2c_master_send(const struct i2c_client *client, const char *buf, 63 int count); 64 extern int i2c_master_recv(const struct i2c_client *client, char *buf, 65 int count); 66 67 /* Transfer num messages. 68 */ 69 extern int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, 70 int num); 71 /* Unlocked flavor */ 72 extern int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, 73 int num); 74 75 /* This is the very generalized SMBus access routine. You probably do not 76 want to use this, though; one of the functions below may be much easier, 77 and probably just as fast. 78 Note that we use i2c_adapter here, because you do not need a specific 79 smbus adapter to call this function. */ 80 extern s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr, 81 unsigned short flags, char read_write, u8 command, 82 int size, union i2c_smbus_data *data); 83 84 /* Now follow the 'nice' access routines. These also document the calling 85 conventions of i2c_smbus_xfer. */ 86 87 extern s32 i2c_smbus_read_byte(const struct i2c_client *client); 88 extern s32 i2c_smbus_write_byte(const struct i2c_client *client, u8 value); 89 extern s32 i2c_smbus_read_byte_data(const struct i2c_client *client, 90 u8 command); 91 extern s32 i2c_smbus_write_byte_data(const struct i2c_client *client, 92 u8 command, u8 value); 93 extern s32 i2c_smbus_read_word_data(const struct i2c_client *client, 94 u8 command); 95 extern s32 i2c_smbus_write_word_data(const struct i2c_client *client, 96 u8 command, u16 value); 97 98 static inline s32 99 i2c_smbus_read_word_swapped(const struct i2c_client *client, u8 command) 100 { 101 s32 value = i2c_smbus_read_word_data(client, command); 102 103 return (value < 0) ? value : swab16(value); 104 } 105 106 static inline s32 107 i2c_smbus_write_word_swapped(const struct i2c_client *client, 108 u8 command, u16 value) 109 { 110 return i2c_smbus_write_word_data(client, command, swab16(value)); 111 } 112 113 /* Returns the number of read bytes */ 114 extern s32 i2c_smbus_read_block_data(const struct i2c_client *client, 115 u8 command, u8 *values); 116 extern s32 i2c_smbus_write_block_data(const struct i2c_client *client, 117 u8 command, u8 length, const u8 *values); 118 /* Returns the number of read bytes */ 119 extern s32 i2c_smbus_read_i2c_block_data(const struct i2c_client *client, 120 u8 command, u8 length, u8 *values); 121 extern s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client, 122 u8 command, u8 length, 123 const u8 *values); 124 extern s32 125 i2c_smbus_read_i2c_block_data_or_emulated(const struct i2c_client *client, 126 u8 command, u8 length, u8 *values); 127 #endif /* I2C */ 128 129 enum i2c_alert_protocol { 130 I2C_PROTOCOL_SMBUS_ALERT, 131 I2C_PROTOCOL_SMBUS_HOST_NOTIFY, 132 }; 133 134 /** 135 * struct i2c_driver - represent an I2C device driver 136 * @class: What kind of i2c device we instantiate (for detect) 137 * @attach_adapter: Callback for bus addition (deprecated) 138 * @probe: Callback for device binding 139 * @remove: Callback for device unbinding 140 * @shutdown: Callback for device shutdown 141 * @alert: Alert callback, for example for the SMBus alert protocol 142 * @command: Callback for bus-wide signaling (optional) 143 * @driver: Device driver model driver 144 * @id_table: List of I2C devices supported by this driver 145 * @detect: Callback for device detection 146 * @address_list: The I2C addresses to probe (for detect) 147 * @clients: List of detected clients we created (for i2c-core use only) 148 * 149 * The driver.owner field should be set to the module owner of this driver. 150 * The driver.name field should be set to the name of this driver. 151 * 152 * For automatic device detection, both @detect and @address_list must 153 * be defined. @class should also be set, otherwise only devices forced 154 * with module parameters will be created. The detect function must 155 * fill at least the name field of the i2c_board_info structure it is 156 * handed upon successful detection, and possibly also the flags field. 157 * 158 * If @detect is missing, the driver will still work fine for enumerated 159 * devices. Detected devices simply won't be supported. This is expected 160 * for the many I2C/SMBus devices which can't be detected reliably, and 161 * the ones which can always be enumerated in practice. 162 * 163 * The i2c_client structure which is handed to the @detect callback is 164 * not a real i2c_client. It is initialized just enough so that you can 165 * call i2c_smbus_read_byte_data and friends on it. Don't do anything 166 * else with it. In particular, calling dev_dbg and friends on it is 167 * not allowed. 168 */ 169 struct i2c_driver { 170 unsigned int class; 171 172 /* Notifies the driver that a new bus has appeared. You should avoid 173 * using this, it will be removed in a near future. 174 */ 175 int (*attach_adapter)(struct i2c_adapter *) __deprecated; 176 177 /* Standard driver model interfaces */ 178 int (*probe)(struct i2c_client *, const struct i2c_device_id *); 179 int (*remove)(struct i2c_client *); 180 181 /* driver model interfaces that don't relate to enumeration */ 182 void (*shutdown)(struct i2c_client *); 183 184 /* Alert callback, for example for the SMBus alert protocol. 185 * The format and meaning of the data value depends on the protocol. 186 * For the SMBus alert protocol, there is a single bit of data passed 187 * as the alert response's low bit ("event flag"). 188 * For the SMBus Host Notify protocol, the data corresponds to the 189 * 16-bit payload data reported by the slave device acting as master. 190 */ 191 void (*alert)(struct i2c_client *, enum i2c_alert_protocol protocol, 192 unsigned int data); 193 194 /* a ioctl like command that can be used to perform specific functions 195 * with the device. 196 */ 197 int (*command)(struct i2c_client *client, unsigned int cmd, void *arg); 198 199 struct device_driver driver; 200 const struct i2c_device_id *id_table; 201 202 /* Device detection callback for automatic device creation */ 203 int (*detect)(struct i2c_client *, struct i2c_board_info *); 204 const unsigned short *address_list; 205 struct list_head clients; 206 }; 207 #define to_i2c_driver(d) container_of(d, struct i2c_driver, driver) 208 209 /** 210 * struct i2c_client - represent an I2C slave device 211 * @flags: I2C_CLIENT_TEN indicates the device uses a ten bit chip address; 212 * I2C_CLIENT_PEC indicates it uses SMBus Packet Error Checking 213 * @addr: Address used on the I2C bus connected to the parent adapter. 214 * @name: Indicates the type of the device, usually a chip name that's 215 * generic enough to hide second-sourcing and compatible revisions. 216 * @adapter: manages the bus segment hosting this I2C device 217 * @dev: Driver model device node for the slave. 218 * @irq: indicates the IRQ generated by this device (if any) 219 * @detected: member of an i2c_driver.clients list or i2c-core's 220 * userspace_devices list 221 * @slave_cb: Callback when I2C slave mode of an adapter is used. The adapter 222 * calls it to pass on slave events to the slave driver. 223 * 224 * An i2c_client identifies a single device (i.e. chip) connected to an 225 * i2c bus. The behaviour exposed to Linux is defined by the driver 226 * managing the device. 227 */ 228 struct i2c_client { 229 unsigned short flags; /* div., see below */ 230 unsigned short addr; /* chip address - NOTE: 7bit */ 231 /* addresses are stored in the */ 232 /* _LOWER_ 7 bits */ 233 char name[I2C_NAME_SIZE]; 234 struct i2c_adapter *adapter; /* the adapter we sit on */ 235 struct device dev; /* the device structure */ 236 int irq; /* irq issued by device */ 237 struct list_head detected; 238 #if IS_ENABLED(CONFIG_I2C_SLAVE) 239 i2c_slave_cb_t slave_cb; /* callback for slave mode */ 240 #endif 241 }; 242 #define to_i2c_client(d) container_of(d, struct i2c_client, dev) 243 244 extern struct i2c_client *i2c_verify_client(struct device *dev); 245 extern struct i2c_adapter *i2c_verify_adapter(struct device *dev); 246 247 static inline struct i2c_client *kobj_to_i2c_client(struct kobject *kobj) 248 { 249 struct device * const dev = container_of(kobj, struct device, kobj); 250 return to_i2c_client(dev); 251 } 252 253 static inline void *i2c_get_clientdata(const struct i2c_client *dev) 254 { 255 return dev_get_drvdata(&dev->dev); 256 } 257 258 static inline void i2c_set_clientdata(struct i2c_client *dev, void *data) 259 { 260 dev_set_drvdata(&dev->dev, data); 261 } 262 263 /* I2C slave support */ 264 265 #if IS_ENABLED(CONFIG_I2C_SLAVE) 266 enum i2c_slave_event { 267 I2C_SLAVE_READ_REQUESTED, 268 I2C_SLAVE_WRITE_REQUESTED, 269 I2C_SLAVE_READ_PROCESSED, 270 I2C_SLAVE_WRITE_RECEIVED, 271 I2C_SLAVE_STOP, 272 }; 273 274 extern int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb); 275 extern int i2c_slave_unregister(struct i2c_client *client); 276 277 static inline int i2c_slave_event(struct i2c_client *client, 278 enum i2c_slave_event event, u8 *val) 279 { 280 return client->slave_cb(client, event, val); 281 } 282 #endif 283 284 /** 285 * struct i2c_board_info - template for device creation 286 * @type: chip type, to initialize i2c_client.name 287 * @flags: to initialize i2c_client.flags 288 * @addr: stored in i2c_client.addr 289 * @platform_data: stored in i2c_client.dev.platform_data 290 * @archdata: copied into i2c_client.dev.archdata 291 * @of_node: pointer to OpenFirmware device node 292 * @fwnode: device node supplied by the platform firmware 293 * @irq: stored in i2c_client.irq 294 * 295 * I2C doesn't actually support hardware probing, although controllers and 296 * devices may be able to use I2C_SMBUS_QUICK to tell whether or not there's 297 * a device at a given address. Drivers commonly need more information than 298 * that, such as chip type, configuration, associated IRQ, and so on. 299 * 300 * i2c_board_info is used to build tables of information listing I2C devices 301 * that are present. This information is used to grow the driver model tree. 302 * For mainboards this is done statically using i2c_register_board_info(); 303 * bus numbers identify adapters that aren't yet available. For add-on boards, 304 * i2c_new_device() does this dynamically with the adapter already known. 305 */ 306 struct i2c_board_info { 307 char type[I2C_NAME_SIZE]; 308 unsigned short flags; 309 unsigned short addr; 310 void *platform_data; 311 struct dev_archdata *archdata; 312 struct device_node *of_node; 313 struct fwnode_handle *fwnode; 314 int irq; 315 }; 316 317 /** 318 * I2C_BOARD_INFO - macro used to list an i2c device and its address 319 * @dev_type: identifies the device type 320 * @dev_addr: the device's address on the bus. 321 * 322 * This macro initializes essential fields of a struct i2c_board_info, 323 * declaring what has been provided on a particular board. Optional 324 * fields (such as associated irq, or device-specific platform_data) 325 * are provided using conventional syntax. 326 */ 327 #define I2C_BOARD_INFO(dev_type, dev_addr) \ 328 .type = dev_type, .addr = (dev_addr) 329 330 331 #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) 332 /* Add-on boards should register/unregister their devices; e.g. a board 333 * with integrated I2C, a config eeprom, sensors, and a codec that's 334 * used in conjunction with the primary hardware. 335 */ 336 extern struct i2c_client * 337 i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info); 338 339 /* If you don't know the exact address of an I2C device, use this variant 340 * instead, which can probe for device presence in a list of possible 341 * addresses. The "probe" callback function is optional. If it is provided, 342 * it must return 1 on successful probe, 0 otherwise. If it is not provided, 343 * a default probing method is used. 344 */ 345 extern struct i2c_client * 346 i2c_new_probed_device(struct i2c_adapter *adap, 347 struct i2c_board_info *info, 348 unsigned short const *addr_list, 349 int (*probe)(struct i2c_adapter *, unsigned short addr)); 350 351 /* Common custom probe functions */ 352 extern int i2c_probe_func_quick_read(struct i2c_adapter *, unsigned short addr); 353 354 /* For devices that use several addresses, use i2c_new_dummy() to make 355 * client handles for the extra addresses. 356 */ 357 extern struct i2c_client * 358 i2c_new_dummy(struct i2c_adapter *adap, u16 address); 359 360 extern struct i2c_client * 361 i2c_new_secondary_device(struct i2c_client *client, 362 const char *name, 363 u16 default_addr); 364 365 extern void i2c_unregister_device(struct i2c_client *); 366 #endif /* I2C */ 367 368 /* Mainboard arch_initcall() code should register all its I2C devices. 369 * This is done at arch_initcall time, before declaring any i2c adapters. 370 * Modules for add-on boards must use other calls. 371 */ 372 #ifdef CONFIG_I2C_BOARDINFO 373 extern int 374 i2c_register_board_info(int busnum, struct i2c_board_info const *info, 375 unsigned n); 376 #else 377 static inline int 378 i2c_register_board_info(int busnum, struct i2c_board_info const *info, 379 unsigned n) 380 { 381 return 0; 382 } 383 #endif /* I2C_BOARDINFO */ 384 385 /** 386 * struct i2c_algorithm - represent I2C transfer method 387 * @master_xfer: Issue a set of i2c transactions to the given I2C adapter 388 * defined by the msgs array, with num messages available to transfer via 389 * the adapter specified by adap. 390 * @smbus_xfer: Issue smbus transactions to the given I2C adapter. If this 391 * is not present, then the bus layer will try and convert the SMBus calls 392 * into I2C transfers instead. 393 * @functionality: Return the flags that this algorithm/adapter pair supports 394 * from the I2C_FUNC_* flags. 395 * @reg_slave: Register given client to I2C slave mode of this adapter 396 * @unreg_slave: Unregister given client from I2C slave mode of this adapter 397 * 398 * The following structs are for those who like to implement new bus drivers: 399 * i2c_algorithm is the interface to a class of hardware solutions which can 400 * be addressed using the same bus algorithms - i.e. bit-banging or the PCF8584 401 * to name two of the most common. 402 * 403 * The return codes from the @master_xfer field should indicate the type of 404 * error code that occurred during the transfer, as documented in the kernel 405 * Documentation file Documentation/i2c/fault-codes. 406 */ 407 struct i2c_algorithm { 408 /* If an adapter algorithm can't do I2C-level access, set master_xfer 409 to NULL. If an adapter algorithm can do SMBus access, set 410 smbus_xfer. If set to NULL, the SMBus protocol is simulated 411 using common I2C messages */ 412 /* master_xfer should return the number of messages successfully 413 processed, or a negative value on error */ 414 int (*master_xfer)(struct i2c_adapter *adap, struct i2c_msg *msgs, 415 int num); 416 int (*smbus_xfer) (struct i2c_adapter *adap, u16 addr, 417 unsigned short flags, char read_write, 418 u8 command, int size, union i2c_smbus_data *data); 419 420 /* To determine what the adapter supports */ 421 u32 (*functionality) (struct i2c_adapter *); 422 423 #if IS_ENABLED(CONFIG_I2C_SLAVE) 424 int (*reg_slave)(struct i2c_client *client); 425 int (*unreg_slave)(struct i2c_client *client); 426 #endif 427 }; 428 429 /** 430 * struct i2c_timings - I2C timing information 431 * @bus_freq_hz: the bus frequency in Hz 432 * @scl_rise_ns: time SCL signal takes to rise in ns; t(r) in the I2C specification 433 * @scl_fall_ns: time SCL signal takes to fall in ns; t(f) in the I2C specification 434 * @scl_int_delay_ns: time IP core additionally needs to setup SCL in ns 435 * @sda_fall_ns: time SDA signal takes to fall in ns; t(f) in the I2C specification 436 */ 437 struct i2c_timings { 438 u32 bus_freq_hz; 439 u32 scl_rise_ns; 440 u32 scl_fall_ns; 441 u32 scl_int_delay_ns; 442 u32 sda_fall_ns; 443 }; 444 445 /** 446 * struct i2c_bus_recovery_info - I2C bus recovery information 447 * @recover_bus: Recover routine. Either pass driver's recover_bus() routine, or 448 * i2c_generic_scl_recovery() or i2c_generic_gpio_recovery(). 449 * @get_scl: This gets current value of SCL line. Mandatory for generic SCL 450 * recovery. Used internally for generic GPIO recovery. 451 * @set_scl: This sets/clears SCL line. Mandatory for generic SCL recovery. Used 452 * internally for generic GPIO recovery. 453 * @get_sda: This gets current value of SDA line. Optional for generic SCL 454 * recovery. Used internally, if sda_gpio is a valid GPIO, for generic GPIO 455 * recovery. 456 * @prepare_recovery: This will be called before starting recovery. Platform may 457 * configure padmux here for SDA/SCL line or something else they want. 458 * @unprepare_recovery: This will be called after completing recovery. Platform 459 * may configure padmux here for SDA/SCL line or something else they want. 460 * @scl_gpio: gpio number of the SCL line. Only required for GPIO recovery. 461 * @sda_gpio: gpio number of the SDA line. Only required for GPIO recovery. 462 */ 463 struct i2c_bus_recovery_info { 464 int (*recover_bus)(struct i2c_adapter *); 465 466 int (*get_scl)(struct i2c_adapter *); 467 void (*set_scl)(struct i2c_adapter *, int val); 468 int (*get_sda)(struct i2c_adapter *); 469 470 void (*prepare_recovery)(struct i2c_adapter *); 471 void (*unprepare_recovery)(struct i2c_adapter *); 472 473 /* gpio recovery */ 474 int scl_gpio; 475 int sda_gpio; 476 }; 477 478 int i2c_recover_bus(struct i2c_adapter *adap); 479 480 /* Generic recovery routines */ 481 int i2c_generic_gpio_recovery(struct i2c_adapter *adap); 482 int i2c_generic_scl_recovery(struct i2c_adapter *adap); 483 484 /** 485 * struct i2c_adapter_quirks - describe flaws of an i2c adapter 486 * @flags: see I2C_AQ_* for possible flags and read below 487 * @max_num_msgs: maximum number of messages per transfer 488 * @max_write_len: maximum length of a write message 489 * @max_read_len: maximum length of a read message 490 * @max_comb_1st_msg_len: maximum length of the first msg in a combined message 491 * @max_comb_2nd_msg_len: maximum length of the second msg in a combined message 492 * 493 * Note about combined messages: Some I2C controllers can only send one message 494 * per transfer, plus something called combined message or write-then-read. 495 * This is (usually) a small write message followed by a read message and 496 * barely enough to access register based devices like EEPROMs. There is a flag 497 * to support this mode. It implies max_num_msg = 2 and does the length checks 498 * with max_comb_*_len because combined message mode usually has its own 499 * limitations. Because of HW implementations, some controllers can actually do 500 * write-then-anything or other variants. To support that, write-then-read has 501 * been broken out into smaller bits like write-first and read-second which can 502 * be combined as needed. 503 */ 504 505 struct i2c_adapter_quirks { 506 u64 flags; 507 int max_num_msgs; 508 u16 max_write_len; 509 u16 max_read_len; 510 u16 max_comb_1st_msg_len; 511 u16 max_comb_2nd_msg_len; 512 }; 513 514 /* enforce max_num_msgs = 2 and use max_comb_*_len for length checks */ 515 #define I2C_AQ_COMB BIT(0) 516 /* first combined message must be write */ 517 #define I2C_AQ_COMB_WRITE_FIRST BIT(1) 518 /* second combined message must be read */ 519 #define I2C_AQ_COMB_READ_SECOND BIT(2) 520 /* both combined messages must have the same target address */ 521 #define I2C_AQ_COMB_SAME_ADDR BIT(3) 522 /* convenience macro for typical write-then read case */ 523 #define I2C_AQ_COMB_WRITE_THEN_READ (I2C_AQ_COMB | I2C_AQ_COMB_WRITE_FIRST | \ 524 I2C_AQ_COMB_READ_SECOND | I2C_AQ_COMB_SAME_ADDR) 525 /* clock stretching is not supported */ 526 #define I2C_AQ_NO_CLK_STRETCH BIT(4) 527 528 /* 529 * i2c_adapter is the structure used to identify a physical i2c bus along 530 * with the access algorithms necessary to access it. 531 */ 532 struct i2c_adapter { 533 struct module *owner; 534 unsigned int class; /* classes to allow probing for */ 535 const struct i2c_algorithm *algo; /* the algorithm to access the bus */ 536 void *algo_data; 537 538 /* data fields that are valid for all devices */ 539 struct rt_mutex bus_lock; 540 struct rt_mutex mux_lock; 541 542 int timeout; /* in jiffies */ 543 int retries; 544 struct device dev; /* the adapter device */ 545 546 int nr; 547 char name[48]; 548 struct completion dev_released; 549 550 struct mutex userspace_clients_lock; 551 struct list_head userspace_clients; 552 553 struct i2c_bus_recovery_info *bus_recovery_info; 554 const struct i2c_adapter_quirks *quirks; 555 556 void (*lock_bus)(struct i2c_adapter *, unsigned int flags); 557 int (*trylock_bus)(struct i2c_adapter *, unsigned int flags); 558 void (*unlock_bus)(struct i2c_adapter *, unsigned int flags); 559 }; 560 #define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev) 561 562 static inline void *i2c_get_adapdata(const struct i2c_adapter *dev) 563 { 564 return dev_get_drvdata(&dev->dev); 565 } 566 567 static inline void i2c_set_adapdata(struct i2c_adapter *dev, void *data) 568 { 569 dev_set_drvdata(&dev->dev, data); 570 } 571 572 static inline struct i2c_adapter * 573 i2c_parent_is_i2c_adapter(const struct i2c_adapter *adapter) 574 { 575 #if IS_ENABLED(CONFIG_I2C_MUX) 576 struct device *parent = adapter->dev.parent; 577 578 if (parent != NULL && parent->type == &i2c_adapter_type) 579 return to_i2c_adapter(parent); 580 else 581 #endif 582 return NULL; 583 } 584 585 int i2c_for_each_dev(void *data, int (*fn)(struct device *, void *)); 586 587 /* Adapter locking functions, exported for shared pin cases */ 588 #define I2C_LOCK_ROOT_ADAPTER BIT(0) 589 #define I2C_LOCK_SEGMENT BIT(1) 590 591 /** 592 * i2c_lock_bus - Get exclusive access to an I2C bus segment 593 * @adapter: Target I2C bus segment 594 * @flags: I2C_LOCK_ROOT_ADAPTER locks the root i2c adapter, I2C_LOCK_SEGMENT 595 * locks only this branch in the adapter tree 596 */ 597 static inline void 598 i2c_lock_bus(struct i2c_adapter *adapter, unsigned int flags) 599 { 600 adapter->lock_bus(adapter, flags); 601 } 602 603 /** 604 * i2c_unlock_bus - Release exclusive access to an I2C bus segment 605 * @adapter: Target I2C bus segment 606 * @flags: I2C_LOCK_ROOT_ADAPTER unlocks the root i2c adapter, I2C_LOCK_SEGMENT 607 * unlocks only this branch in the adapter tree 608 */ 609 static inline void 610 i2c_unlock_bus(struct i2c_adapter *adapter, unsigned int flags) 611 { 612 adapter->unlock_bus(adapter, flags); 613 } 614 615 static inline void 616 i2c_lock_adapter(struct i2c_adapter *adapter) 617 { 618 i2c_lock_bus(adapter, I2C_LOCK_ROOT_ADAPTER); 619 } 620 621 static inline void 622 i2c_unlock_adapter(struct i2c_adapter *adapter) 623 { 624 i2c_unlock_bus(adapter, I2C_LOCK_ROOT_ADAPTER); 625 } 626 627 /*flags for the client struct: */ 628 #define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */ 629 #define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */ 630 /* Must equal I2C_M_TEN below */ 631 #define I2C_CLIENT_SLAVE 0x20 /* we are the slave */ 632 #define I2C_CLIENT_WAKE 0x80 /* for board_info; true iff can wake */ 633 #define I2C_CLIENT_SCCB 0x9000 /* Use Omnivision SCCB protocol */ 634 /* Must match I2C_M_STOP|IGNORE_NAK */ 635 636 /* i2c adapter classes (bitmask) */ 637 #define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */ 638 #define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */ 639 #define I2C_CLASS_SPD (1<<7) /* Memory modules */ 640 #define I2C_CLASS_DEPRECATED (1<<8) /* Warn users that adapter will stop using classes */ 641 642 /* Internal numbers to terminate lists */ 643 #define I2C_CLIENT_END 0xfffeU 644 645 /* Construct an I2C_CLIENT_END-terminated array of i2c addresses */ 646 #define I2C_ADDRS(addr, addrs...) \ 647 ((const unsigned short []){ addr, ## addrs, I2C_CLIENT_END }) 648 649 650 /* ----- functions exported by i2c.o */ 651 652 /* administration... 653 */ 654 #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) 655 extern int i2c_add_adapter(struct i2c_adapter *); 656 extern void i2c_del_adapter(struct i2c_adapter *); 657 extern int i2c_add_numbered_adapter(struct i2c_adapter *); 658 659 extern int i2c_register_driver(struct module *, struct i2c_driver *); 660 extern void i2c_del_driver(struct i2c_driver *); 661 662 /* use a define to avoid include chaining to get THIS_MODULE */ 663 #define i2c_add_driver(driver) \ 664 i2c_register_driver(THIS_MODULE, driver) 665 666 extern struct i2c_client *i2c_use_client(struct i2c_client *client); 667 extern void i2c_release_client(struct i2c_client *client); 668 669 /* call the i2c_client->command() of all attached clients with 670 * the given arguments */ 671 extern void i2c_clients_command(struct i2c_adapter *adap, 672 unsigned int cmd, void *arg); 673 674 extern struct i2c_adapter *i2c_get_adapter(int nr); 675 extern void i2c_put_adapter(struct i2c_adapter *adap); 676 677 void i2c_parse_fw_timings(struct device *dev, struct i2c_timings *t, bool use_defaults); 678 679 /* Return the functionality mask */ 680 static inline u32 i2c_get_functionality(struct i2c_adapter *adap) 681 { 682 return adap->algo->functionality(adap); 683 } 684 685 /* Return 1 if adapter supports everything we need, 0 if not. */ 686 static inline int i2c_check_functionality(struct i2c_adapter *adap, u32 func) 687 { 688 return (func & i2c_get_functionality(adap)) == func; 689 } 690 691 /** 692 * i2c_check_quirks() - Function for checking the quirk flags in an i2c adapter 693 * @adap: i2c adapter 694 * @quirks: quirk flags 695 * 696 * Return: true if the adapter has all the specified quirk flags, false if not 697 */ 698 static inline bool i2c_check_quirks(struct i2c_adapter *adap, u64 quirks) 699 { 700 if (!adap->quirks) 701 return false; 702 return (adap->quirks->flags & quirks) == quirks; 703 } 704 705 /* Return the adapter number for a specific adapter */ 706 static inline int i2c_adapter_id(struct i2c_adapter *adap) 707 { 708 return adap->nr; 709 } 710 711 static inline u8 i2c_8bit_addr_from_msg(const struct i2c_msg *msg) 712 { 713 return (msg->addr << 1) | (msg->flags & I2C_M_RD ? 1 : 0); 714 } 715 716 /** 717 * module_i2c_driver() - Helper macro for registering a modular I2C driver 718 * @__i2c_driver: i2c_driver struct 719 * 720 * Helper macro for I2C drivers which do not do anything special in module 721 * init/exit. This eliminates a lot of boilerplate. Each module may only 722 * use this macro once, and calling it replaces module_init() and module_exit() 723 */ 724 #define module_i2c_driver(__i2c_driver) \ 725 module_driver(__i2c_driver, i2c_add_driver, \ 726 i2c_del_driver) 727 728 /** 729 * builtin_i2c_driver() - Helper macro for registering a builtin I2C driver 730 * @__i2c_driver: i2c_driver struct 731 * 732 * Helper macro for I2C drivers which do not do anything special in their 733 * init. This eliminates a lot of boilerplate. Each driver may only 734 * use this macro once, and calling it replaces device_initcall(). 735 */ 736 #define builtin_i2c_driver(__i2c_driver) \ 737 builtin_driver(__i2c_driver, i2c_add_driver) 738 739 #endif /* I2C */ 740 741 #if IS_ENABLED(CONFIG_OF) 742 /* must call put_device() when done with returned i2c_client device */ 743 extern struct i2c_client *of_find_i2c_device_by_node(struct device_node *node); 744 745 /* must call put_device() when done with returned i2c_adapter device */ 746 extern struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node); 747 748 /* must call i2c_put_adapter() when done with returned i2c_adapter device */ 749 struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node); 750 751 #else 752 753 static inline struct i2c_client *of_find_i2c_device_by_node(struct device_node *node) 754 { 755 return NULL; 756 } 757 758 static inline struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node) 759 { 760 return NULL; 761 } 762 763 static inline struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node) 764 { 765 return NULL; 766 } 767 #endif /* CONFIG_OF */ 768 769 #endif /* _LINUX_I2C_H */
1 /* interrupt.h */ 2 #ifndef _LINUX_INTERRUPT_H 3 #define _LINUX_INTERRUPT_H 4 5 #include <linux/kernel.h> 6 #include <linux/linkage.h> 7 #include <linux/bitops.h> 8 #include <linux/preempt.h> 9 #include <linux/cpumask.h> 10 #include <linux/irqreturn.h> 11 #include <linux/irqnr.h> 12 #include <linux/hardirq.h> 13 #include <linux/irqflags.h> 14 #include <linux/hrtimer.h> 15 #include <linux/kref.h> 16 #include <linux/workqueue.h> 17 18 #include <linux/atomic.h> 19 #include <asm/ptrace.h> 20 #include <asm/irq.h> 21 22 /* 23 * These correspond to the IORESOURCE_IRQ_* defines in 24 * linux/ioport.h to select the interrupt line behaviour. When 25 * requesting an interrupt without specifying a IRQF_TRIGGER, the 26 * setting should be assumed to be "as already configured", which 27 * may be as per machine or firmware initialisation. 28 */ 29 #define IRQF_TRIGGER_NONE 0x00000000 30 #define IRQF_TRIGGER_RISING 0x00000001 31 #define IRQF_TRIGGER_FALLING 0x00000002 32 #define IRQF_TRIGGER_HIGH 0x00000004 33 #define IRQF_TRIGGER_LOW 0x00000008 34 #define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \ 35 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING) 36 #define IRQF_TRIGGER_PROBE 0x00000010 37 38 /* 39 * These flags used only by the kernel as part of the 40 * irq handling routines. 41 * 42 * IRQF_SHARED - allow sharing the irq among several devices 43 * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur 44 * IRQF_TIMER - Flag to mark this interrupt as timer interrupt 45 * IRQF_PERCPU - Interrupt is per cpu 46 * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing 47 * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is 48 * registered first in an shared interrupt is considered for 49 * performance reasons) 50 * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished. 51 * Used by threaded interrupts which need to keep the 52 * irq line disabled until the threaded handler has been run. 53 * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend. Does not guarantee 54 * that this interrupt will wake the system from a suspended 55 * state. See Documentation/power/suspend-and-interrupts.txt 56 * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set 57 * IRQF_NO_THREAD - Interrupt cannot be threaded 58 * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device 59 * resume time. 60 * IRQF_COND_SUSPEND - If the IRQ is shared with a NO_SUSPEND user, execute this 61 * interrupt handler after suspending interrupts. For system 62 * wakeup devices users need to implement wakeup detection in 63 * their interrupt handlers. 64 */ 65 #define IRQF_SHARED 0x00000080 66 #define IRQF_PROBE_SHARED 0x00000100 67 #define __IRQF_TIMER 0x00000200 68 #define IRQF_PERCPU 0x00000400 69 #define IRQF_NOBALANCING 0x00000800 70 #define IRQF_IRQPOLL 0x00001000 71 #define IRQF_ONESHOT 0x00002000 72 #define IRQF_NO_SUSPEND 0x00004000 73 #define IRQF_FORCE_RESUME 0x00008000 74 #define IRQF_NO_THREAD 0x00010000 75 #define IRQF_EARLY_RESUME 0x00020000 76 #define IRQF_COND_SUSPEND 0x00040000 77 78 #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) 79 80 /* 81 * These values can be returned by request_any_context_irq() and 82 * describe the context the interrupt will be run in. 83 * 84 * IRQC_IS_HARDIRQ - interrupt runs in hardirq context 85 * IRQC_IS_NESTED - interrupt runs in a nested threaded context 86 */ 87 enum { 88 IRQC_IS_HARDIRQ = 0, 89 IRQC_IS_NESTED, 90 }; 91 92 typedef irqreturn_t (*irq_handler_t)(int, void *); 93 94 /** 95 * struct irqaction - per interrupt action descriptor 96 * @handler: interrupt handler function 97 * @name: name of the device 98 * @dev_id: cookie to identify the device 99 * @percpu_dev_id: cookie to identify the device 100 * @next: pointer to the next irqaction for shared interrupts 101 * @irq: interrupt number 102 * @flags: flags (see IRQF_* above) 103 * @thread_fn: interrupt handler function for threaded interrupts 104 * @thread: thread pointer for threaded interrupts 105 * @secondary: pointer to secondary irqaction (force threading) 106 * @thread_flags: flags related to @thread 107 * @thread_mask: bitmask for keeping track of @thread activity 108 * @dir: pointer to the proc/irq/NN/name entry 109 */ 110 struct irqaction { 111 irq_handler_t handler; 112 void *dev_id; 113 void __percpu *percpu_dev_id; 114 struct irqaction *next; 115 irq_handler_t thread_fn; 116 struct task_struct *thread; 117 struct irqaction *secondary; 118 unsigned int irq; 119 unsigned int flags; 120 unsigned long thread_flags; 121 unsigned long thread_mask; 122 const char *name; 123 struct proc_dir_entry *dir; 124 } ____cacheline_internodealigned_in_smp; 125 126 extern irqreturn_t no_action(int cpl, void *dev_id); 127 128 /* 129 * If a (PCI) device interrupt is not connected we set dev->irq to 130 * IRQ_NOTCONNECTED. This causes request_irq() to fail with -ENOTCONN, so we 131 * can distingiush that case from other error returns. 132 * 133 * 0x80000000 is guaranteed to be outside the available range of interrupts 134 * and easy to distinguish from other possible incorrect values. 135 */ 136 #define IRQ_NOTCONNECTED (1U << 31) 137 138 extern int __must_check 139 request_threaded_irq(unsigned int irq, irq_handler_t handler, 140 irq_handler_t thread_fn, 141 unsigned long flags, const char *name, void *dev); 142 143 static inline int __must_check 144 request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, 145 const char *name, void *dev) 146 { 147 return request_threaded_irq(irq, handler, NULL, flags, name, dev); 148 } 149 150 extern int __must_check 151 request_any_context_irq(unsigned int irq, irq_handler_t handler, 152 unsigned long flags, const char *name, void *dev_id); 153 154 extern int __must_check 155 request_percpu_irq(unsigned int irq, irq_handler_t handler, 156 const char *devname, void __percpu *percpu_dev_id); 157 158 extern void free_irq(unsigned int, void *); 159 extern void free_percpu_irq(unsigned int, void __percpu *); 160 161 struct device; 162 163 extern int __must_check 164 devm_request_threaded_irq(struct device *dev, unsigned int irq, 165 irq_handler_t handler, irq_handler_t thread_fn, 166 unsigned long irqflags, const char *devname, 167 void *dev_id); 168 169 static inline int __must_check 170 devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler, 171 unsigned long irqflags, const char *devname, void *dev_id) 172 { 173 return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags, 174 devname, dev_id); 175 } 176 177 extern int __must_check 178 devm_request_any_context_irq(struct device *dev, unsigned int irq, 179 irq_handler_t handler, unsigned long irqflags, 180 const char *devname, void *dev_id); 181 182 extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id); 183 184 /* 185 * On lockdep we dont want to enable hardirqs in hardirq 186 * context. Use local_irq_enable_in_hardirq() to annotate 187 * kernel code that has to do this nevertheless (pretty much 188 * the only valid case is for old/broken hardware that is 189 * insanely slow). 190 * 191 * NOTE: in theory this might break fragile code that relies 192 * on hardirq delivery - in practice we dont seem to have such 193 * places left. So the only effect should be slightly increased 194 * irqs-off latencies. 195 */ 196 #ifdef CONFIG_LOCKDEP 197 # define local_irq_enable_in_hardirq() do { } while (0) 198 #else 199 # define local_irq_enable_in_hardirq() local_irq_enable() 200 #endif 201 202 extern void disable_irq_nosync(unsigned int irq); 203 extern bool disable_hardirq(unsigned int irq); 204 extern void disable_irq(unsigned int irq); 205 extern void disable_percpu_irq(unsigned int irq); 206 extern void enable_irq(unsigned int irq); 207 extern void enable_percpu_irq(unsigned int irq, unsigned int type); 208 extern bool irq_percpu_is_enabled(unsigned int irq); 209 extern void irq_wake_thread(unsigned int irq, void *dev_id); 210 211 /* The following three functions are for the core kernel use only. */ 212 extern void suspend_device_irqs(void); 213 extern void resume_device_irqs(void); 214 215 /** 216 * struct irq_affinity_notify - context for notification of IRQ affinity changes 217 * @irq: Interrupt to which notification applies 218 * @kref: Reference count, for internal use 219 * @work: Work item, for internal use 220 * @notify: Function to be called on change. This will be 221 * called in process context. 222 * @release: Function to be called on release. This will be 223 * called in process context. Once registered, the 224 * structure must only be freed when this function is 225 * called or later. 226 */ 227 struct irq_affinity_notify { 228 unsigned int irq; 229 struct kref kref; 230 struct work_struct work; 231 void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask); 232 void (*release)(struct kref *ref); 233 }; 234 235 #if defined(CONFIG_SMP) 236 237 extern cpumask_var_t irq_default_affinity; 238 239 /* Internal implementation. Use the helpers below */ 240 extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask, 241 bool force); 242 243 /** 244 * irq_set_affinity - Set the irq affinity of a given irq 245 * @irq: Interrupt to set affinity 246 * @cpumask: cpumask 247 * 248 * Fails if cpumask does not contain an online CPU 249 */ 250 static inline int 251 irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) 252 { 253 return __irq_set_affinity(irq, cpumask, false); 254 } 255 256 /** 257 * irq_force_affinity - Force the irq affinity of a given irq 258 * @irq: Interrupt to set affinity 259 * @cpumask: cpumask 260 * 261 * Same as irq_set_affinity, but without checking the mask against 262 * online cpus. 263 * 264 * Solely for low level cpu hotplug code, where we need to make per 265 * cpu interrupts affine before the cpu becomes online. 266 */ 267 static inline int 268 irq_force_affinity(unsigned int irq, const struct cpumask *cpumask) 269 { 270 return __irq_set_affinity(irq, cpumask, true); 271 } 272 273 extern int irq_can_set_affinity(unsigned int irq); 274 extern int irq_select_affinity(unsigned int irq); 275 276 extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m); 277 278 extern int 279 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); 280 281 struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs); 282 283 #else /* CONFIG_SMP */ 284 285 static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) 286 { 287 return -EINVAL; 288 } 289 290 static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask) 291 { 292 return 0; 293 } 294 295 static inline int irq_can_set_affinity(unsigned int irq) 296 { 297 return 0; 298 } 299 300 static inline int irq_select_affinity(unsigned int irq) { return 0; } 301 302 static inline int irq_set_affinity_hint(unsigned int irq, 303 const struct cpumask *m) 304 { 305 return -EINVAL; 306 } 307 308 static inline int 309 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) 310 { 311 return 0; 312 } 313 314 static inline struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs) 315 { 316 *nr_vecs = 1; 317 return NULL; 318 } 319 #endif /* CONFIG_SMP */ 320 321 /* 322 * Special lockdep variants of irq disabling/enabling. 323 * These should be used for locking constructs that 324 * know that a particular irq context which is disabled, 325 * and which is the only irq-context user of a lock, 326 * that it's safe to take the lock in the irq-disabled 327 * section without disabling hardirqs. 328 * 329 * On !CONFIG_LOCKDEP they are equivalent to the normal 330 * irq disable/enable methods. 331 */ 332 static inline void disable_irq_nosync_lockdep(unsigned int irq) 333 { 334 disable_irq_nosync(irq); 335 #ifdef CONFIG_LOCKDEP 336 local_irq_disable(); 337 #endif 338 } 339 340 static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags) 341 { 342 disable_irq_nosync(irq); 343 #ifdef CONFIG_LOCKDEP 344 local_irq_save(*flags); 345 #endif 346 } 347 348 static inline void disable_irq_lockdep(unsigned int irq) 349 { 350 disable_irq(irq); 351 #ifdef CONFIG_LOCKDEP 352 local_irq_disable(); 353 #endif 354 } 355 356 static inline void enable_irq_lockdep(unsigned int irq) 357 { 358 #ifdef CONFIG_LOCKDEP 359 local_irq_enable(); 360 #endif 361 enable_irq(irq); 362 } 363 364 static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags) 365 { 366 #ifdef CONFIG_LOCKDEP 367 local_irq_restore(*flags); 368 #endif 369 enable_irq(irq); 370 } 371 372 /* IRQ wakeup (PM) control: */ 373 extern int irq_set_irq_wake(unsigned int irq, unsigned int on); 374 375 static inline int enable_irq_wake(unsigned int irq) 376 { 377 return irq_set_irq_wake(irq, 1); 378 } 379 380 static inline int disable_irq_wake(unsigned int irq) 381 { 382 return irq_set_irq_wake(irq, 0); 383 } 384 385 /* 386 * irq_get_irqchip_state/irq_set_irqchip_state specific flags 387 */ 388 enum irqchip_irq_state { 389 IRQCHIP_STATE_PENDING, /* Is interrupt pending? */ 390 IRQCHIP_STATE_ACTIVE, /* Is interrupt in progress? */ 391 IRQCHIP_STATE_MASKED, /* Is interrupt masked? */ 392 IRQCHIP_STATE_LINE_LEVEL, /* Is IRQ line high? */ 393 }; 394 395 extern int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which, 396 bool *state); 397 extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, 398 bool state); 399 400 #ifdef CONFIG_IRQ_FORCED_THREADING 401 extern bool force_irqthreads; 402 #else 403 #define force_irqthreads (0) 404 #endif 405 406 #ifndef __ARCH_SET_SOFTIRQ_PENDING 407 #define set_softirq_pending(x) (local_softirq_pending() = (x)) 408 #define or_softirq_pending(x) (local_softirq_pending() |= (x)) 409 #endif 410 411 /* Some architectures might implement lazy enabling/disabling of 412 * interrupts. In some cases, such as stop_machine, we might want 413 * to ensure that after a local_irq_disable(), interrupts have 414 * really been disabled in hardware. Such architectures need to 415 * implement the following hook. 416 */ 417 #ifndef hard_irq_disable 418 #define hard_irq_disable() do { } while(0) 419 #endif 420 421 /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high 422 frequency threaded job scheduling. For almost all the purposes 423 tasklets are more than enough. F.e. all serial device BHs et 424 al. should be converted to tasklets, not to softirqs. 425 */ 426 427 enum 428 { 429 HI_SOFTIRQ=0, 430 TIMER_SOFTIRQ, 431 NET_TX_SOFTIRQ, 432 NET_RX_SOFTIRQ, 433 BLOCK_SOFTIRQ, 434 IRQ_POLL_SOFTIRQ, 435 TASKLET_SOFTIRQ, 436 SCHED_SOFTIRQ, 437 HRTIMER_SOFTIRQ, /* Unused, but kept as tools rely on the 438 numbering. Sigh! */ 439 RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ 440 441 NR_SOFTIRQS 442 }; 443 444 #define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ)) 445 446 /* map softirq index to softirq name. update 'softirq_to_name' in 447 * kernel/softirq.c when adding a new softirq. 448 */ 449 extern const char * const softirq_to_name[NR_SOFTIRQS]; 450 451 /* softirq mask and active fields moved to irq_cpustat_t in 452 * asm/hardirq.h to get better cache usage. KAO 453 */ 454 455 struct softirq_action 456 { 457 void (*action)(struct softirq_action *); 458 }; 459 460 asmlinkage void do_softirq(void); 461 asmlinkage void __do_softirq(void); 462 463 #ifdef __ARCH_HAS_DO_SOFTIRQ 464 void do_softirq_own_stack(void); 465 #else 466 static inline void do_softirq_own_stack(void) 467 { 468 __do_softirq(); 469 } 470 #endif 471 472 extern void open_softirq(int nr, void (*action)(struct softirq_action *)); 473 extern void softirq_init(void); 474 extern void __raise_softirq_irqoff(unsigned int nr); 475 476 extern void raise_softirq_irqoff(unsigned int nr); 477 extern void raise_softirq(unsigned int nr); 478 479 DECLARE_PER_CPU(struct task_struct *, ksoftirqd); 480 481 static inline struct task_struct *this_cpu_ksoftirqd(void) 482 { 483 return this_cpu_read(ksoftirqd); 484 } 485 486 /* Tasklets --- multithreaded analogue of BHs. 487 488 Main feature differing them of generic softirqs: tasklet 489 is running only on one CPU simultaneously. 490 491 Main feature differing them of BHs: different tasklets 492 may be run simultaneously on different CPUs. 493 494 Properties: 495 * If tasklet_schedule() is called, then tasklet is guaranteed 496 to be executed on some cpu at least once after this. 497 * If the tasklet is already scheduled, but its execution is still not 498 started, it will be executed only once. 499 * If this tasklet is already running on another CPU (or schedule is called 500 from tasklet itself), it is rescheduled for later. 501 * Tasklet is strictly serialized wrt itself, but not 502 wrt another tasklets. If client needs some intertask synchronization, 503 he makes it with spinlocks. 504 */ 505 506 struct tasklet_struct 507 { 508 struct tasklet_struct *next; 509 unsigned long state; 510 atomic_t count; 511 void (*func)(unsigned long); 512 unsigned long data; 513 }; 514 515 #define DECLARE_TASKLET(name, func, data) \ 516 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data } 517 518 #define DECLARE_TASKLET_DISABLED(name, func, data) \ 519 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data } 520 521 522 enum 523 { 524 TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */ 525 TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ 526 }; 527 528 #ifdef CONFIG_SMP 529 static inline int tasklet_trylock(struct tasklet_struct *t) 530 { 531 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); 532 } 533 534 static inline void tasklet_unlock(struct tasklet_struct *t) 535 { 536 smp_mb__before_atomic(); 537 clear_bit(TASKLET_STATE_RUN, &(t)->state); 538 } 539 540 static inline void tasklet_unlock_wait(struct tasklet_struct *t) 541 { 542 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); } 543 } 544 #else 545 #define tasklet_trylock(t) 1 546 #define tasklet_unlock_wait(t) do { } while (0) 547 #define tasklet_unlock(t) do { } while (0) 548 #endif 549 550 extern void __tasklet_schedule(struct tasklet_struct *t); 551 552 static inline void tasklet_schedule(struct tasklet_struct *t) 553 { 554 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) 555 __tasklet_schedule(t); 556 } 557 558 extern void __tasklet_hi_schedule(struct tasklet_struct *t); 559 560 static inline void tasklet_hi_schedule(struct tasklet_struct *t) 561 { 562 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) 563 __tasklet_hi_schedule(t); 564 } 565 566 extern void __tasklet_hi_schedule_first(struct tasklet_struct *t); 567 568 /* 569 * This version avoids touching any other tasklets. Needed for kmemcheck 570 * in order not to take any page faults while enqueueing this tasklet; 571 * consider VERY carefully whether you really need this or 572 * tasklet_hi_schedule()... 573 */ 574 static inline void tasklet_hi_schedule_first(struct tasklet_struct *t) 575 { 576 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) 577 __tasklet_hi_schedule_first(t); 578 } 579 580 581 static inline void tasklet_disable_nosync(struct tasklet_struct *t) 582 { 583 atomic_inc(&t->count); 584 smp_mb__after_atomic(); 585 } 586 587 static inline void tasklet_disable(struct tasklet_struct *t) 588 { 589 tasklet_disable_nosync(t); 590 tasklet_unlock_wait(t); 591 smp_mb(); 592 } 593 594 static inline void tasklet_enable(struct tasklet_struct *t) 595 { 596 smp_mb__before_atomic(); 597 atomic_dec(&t->count); 598 } 599 600 extern void tasklet_kill(struct tasklet_struct *t); 601 extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); 602 extern void tasklet_init(struct tasklet_struct *t, 603 void (*func)(unsigned long), unsigned long data); 604 605 struct tasklet_hrtimer { 606 struct hrtimer timer; 607 struct tasklet_struct tasklet; 608 enum hrtimer_restart (*function)(struct hrtimer *); 609 }; 610 611 extern void 612 tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer, 613 enum hrtimer_restart (*function)(struct hrtimer *), 614 clockid_t which_clock, enum hrtimer_mode mode); 615 616 static inline 617 void tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time, 618 const enum hrtimer_mode mode) 619 { 620 hrtimer_start(&ttimer->timer, time, mode); 621 } 622 623 static inline 624 void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer) 625 { 626 hrtimer_cancel(&ttimer->timer); 627 tasklet_kill(&ttimer->tasklet); 628 } 629 630 /* 631 * Autoprobing for irqs: 632 * 633 * probe_irq_on() and probe_irq_off() provide robust primitives 634 * for accurate IRQ probing during kernel initialization. They are 635 * reasonably simple to use, are not "fooled" by spurious interrupts, 636 * and, unlike other attempts at IRQ probing, they do not get hung on 637 * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards). 638 * 639 * For reasonably foolproof probing, use them as follows: 640 * 641 * 1. clear and/or mask the device's internal interrupt. 642 * 2. sti(); 643 * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs 644 * 4. enable the device and cause it to trigger an interrupt. 645 * 5. wait for the device to interrupt, using non-intrusive polling or a delay. 646 * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple 647 * 7. service the device to clear its pending interrupt. 648 * 8. loop again if paranoia is required. 649 * 650 * probe_irq_on() returns a mask of allocated irq's. 651 * 652 * probe_irq_off() takes the mask as a parameter, 653 * and returns the irq number which occurred, 654 * or zero if none occurred, or a negative irq number 655 * if more than one irq occurred. 656 */ 657 658 #if !defined(CONFIG_GENERIC_IRQ_PROBE) 659 static inline unsigned long probe_irq_on(void) 660 { 661 return 0; 662 } 663 static inline int probe_irq_off(unsigned long val) 664 { 665 return 0; 666 } 667 static inline unsigned int probe_irq_mask(unsigned long val) 668 { 669 return 0; 670 } 671 #else 672 extern unsigned long probe_irq_on(void); /* returns 0 on failure */ 673 extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */ 674 extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */ 675 #endif 676 677 #ifdef CONFIG_PROC_FS 678 /* Initialize /proc/irq/ */ 679 extern void init_irq_proc(void); 680 #else 681 static inline void init_irq_proc(void) 682 { 683 } 684 #endif 685 686 struct seq_file; 687 int show_interrupts(struct seq_file *p, void *v); 688 int arch_show_interrupts(struct seq_file *p, int prec); 689 690 extern int early_irq_init(void); 691 extern int arch_probe_nr_irqs(void); 692 extern int arch_early_irq_init(void); 693 694 #if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN) 695 /* 696 * We want to know which function is an entrypoint of a hardirq or a softirq. 697 */ 698 #define __irq_entry __attribute__((__section__(".irqentry.text"))) 699 #define __softirq_entry \ 700 __attribute__((__section__(".softirqentry.text"))) 701 702 /* Limits of hardirq entrypoints */ 703 extern char __irqentry_text_start[]; 704 extern char __irqentry_text_end[]; 705 /* Limits of softirq entrypoints */ 706 extern char __softirqentry_text_start[]; 707 extern char __softirqentry_text_end[]; 708 709 #else 710 #define __irq_entry 711 #define __softirq_entry 712 #endif 713 714 #endif
1 #ifndef _LINUX_JIFFIES_H 2 #define _LINUX_JIFFIES_H 3 4 #include <linux/math64.h> 5 #include <linux/kernel.h> 6 #include <linux/types.h> 7 #include <linux/time.h> 8 #include <linux/timex.h> 9 #include <asm/param.h> /* for HZ */ 10 #include <generated/timeconst.h> 11 12 /* 13 * The following defines establish the engineering parameters of the PLL 14 * model. The HZ variable establishes the timer interrupt frequency, 100 Hz 15 * for the SunOS kernel, 256 Hz for the Ultrix kernel and 1024 Hz for the 16 * OSF/1 kernel. The SHIFT_HZ define expresses the same value as the 17 * nearest power of two in order to avoid hardware multiply operations. 18 */ 19 #if HZ >= 12 && HZ < 24 20 # define SHIFT_HZ 4 21 #elif HZ >= 24 && HZ < 48 22 # define SHIFT_HZ 5 23 #elif HZ >= 48 && HZ < 96 24 # define SHIFT_HZ 6 25 #elif HZ >= 96 && HZ < 192 26 # define SHIFT_HZ 7 27 #elif HZ >= 192 && HZ < 384 28 # define SHIFT_HZ 8 29 #elif HZ >= 384 && HZ < 768 30 # define SHIFT_HZ 9 31 #elif HZ >= 768 && HZ < 1536 32 # define SHIFT_HZ 10 33 #elif HZ >= 1536 && HZ < 3072 34 # define SHIFT_HZ 11 35 #elif HZ >= 3072 && HZ < 6144 36 # define SHIFT_HZ 12 37 #elif HZ >= 6144 && HZ < 12288 38 # define SHIFT_HZ 13 39 #else 40 # error Invalid value of HZ. 41 #endif 42 43 /* Suppose we want to divide two numbers NOM and DEN: NOM/DEN, then we can 44 * improve accuracy by shifting LSH bits, hence calculating: 45 * (NOM << LSH) / DEN 46 * This however means trouble for large NOM, because (NOM << LSH) may no 47 * longer fit in 32 bits. The following way of calculating this gives us 48 * some slack, under the following conditions: 49 * - (NOM / DEN) fits in (32 - LSH) bits. 50 * - (NOM % DEN) fits in (32 - LSH) bits. 51 */ 52 #define SH_DIV(NOM,DEN,LSH) ( (((NOM) / (DEN)) << (LSH)) \ 53 + ((((NOM) % (DEN)) << (LSH)) + (DEN) / 2) / (DEN)) 54 55 /* LATCH is used in the interval timer and ftape setup. */ 56 #define LATCH ((CLOCK_TICK_RATE + HZ/2) / HZ) /* For divider */ 57 58 extern int register_refined_jiffies(long clock_tick_rate); 59 60 /* TICK_NSEC is the time between ticks in nsec assuming SHIFTED_HZ */ 61 #define TICK_NSEC ((NSEC_PER_SEC+HZ/2)/HZ) 62 63 /* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */ 64 #define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ) 65 66 /* some arch's have a small-data section that can be accessed register-relative 67 * but that can only take up to, say, 4-byte variables. jiffies being part of 68 * an 8-byte variable may not be correctly accessed unless we force the issue 69 */ 70 #define __jiffy_data __attribute__((section(".data"))) 71 72 /* 73 * The 64-bit value is not atomic - you MUST NOT read it 74 * without sampling the sequence number in jiffies_lock. 75 * get_jiffies_64() will do this for you as appropriate. 76 */ 77 extern u64 __jiffy_data jiffies_64; 78 extern unsigned long volatile __jiffy_data jiffies; 79 80 #if (BITS_PER_LONG < 64) 81 u64 get_jiffies_64(void); 82 #else 83 static inline u64 get_jiffies_64(void) 84 { 85 return (u64)jiffies; 86 } 87 #endif 88 89 /* 90 * These inlines deal with timer wrapping correctly. You are 91 * strongly encouraged to use them 92 * 1. Because people otherwise forget 93 * 2. Because if the timer wrap changes in future you won't have to 94 * alter your driver code. 95 * 96 * time_after(a,b) returns true if the time a is after time b. 97 * 98 * Do this with "<0" and ">=0" to only test the sign of the result. A 99 * good compiler would generate better code (and a really good compiler 100 * wouldn't care). Gcc is currently neither. 101 */ 102 #define time_after(a,b) \ 103 (typecheck(unsigned long, a) && \ 104 typecheck(unsigned long, b) && \ 105 ((long)((b) - (a)) < 0)) 106 #define time_before(a,b) time_after(b,a) 107 108 #define time_after_eq(a,b) \ 109 (typecheck(unsigned long, a) && \ 110 typecheck(unsigned long, b) && \ 111 ((long)((a) - (b)) >= 0)) 112 #define time_before_eq(a,b) time_after_eq(b,a) 113 114 /* 115 * Calculate whether a is in the range of [b, c]. 116 */ 117 #define time_in_range(a,b,c) \ 118 (time_after_eq(a,b) && \ 119 time_before_eq(a,c)) 120 121 /* 122 * Calculate whether a is in the range of [b, c). 123 */ 124 #define time_in_range_open(a,b,c) \ 125 (time_after_eq(a,b) && \ 126 time_before(a,c)) 127 128 /* Same as above, but does so with platform independent 64bit types. 129 * These must be used when utilizing jiffies_64 (i.e. return value of 130 * get_jiffies_64() */ 131 #define time_after64(a,b) \ 132 (typecheck(__u64, a) && \ 133 typecheck(__u64, b) && \ 134 ((__s64)((b) - (a)) < 0)) 135 #define time_before64(a,b) time_after64(b,a) 136 137 #define time_after_eq64(a,b) \ 138 (typecheck(__u64, a) && \ 139 typecheck(__u64, b) && \ 140 ((__s64)((a) - (b)) >= 0)) 141 #define time_before_eq64(a,b) time_after_eq64(b,a) 142 143 #define time_in_range64(a, b, c) \ 144 (time_after_eq64(a, b) && \ 145 time_before_eq64(a, c)) 146 147 /* 148 * These four macros compare jiffies and 'a' for convenience. 149 */ 150 151 /* time_is_before_jiffies(a) return true if a is before jiffies */ 152 #define time_is_before_jiffies(a) time_after(jiffies, a) 153 154 /* time_is_after_jiffies(a) return true if a is after jiffies */ 155 #define time_is_after_jiffies(a) time_before(jiffies, a) 156 157 /* time_is_before_eq_jiffies(a) return true if a is before or equal to jiffies*/ 158 #define time_is_before_eq_jiffies(a) time_after_eq(jiffies, a) 159 160 /* time_is_after_eq_jiffies(a) return true if a is after or equal to jiffies*/ 161 #define time_is_after_eq_jiffies(a) time_before_eq(jiffies, a) 162 163 /* 164 * Have the 32 bit jiffies value wrap 5 minutes after boot 165 * so jiffies wrap bugs show up earlier. 166 */ 167 #define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ)) 168 169 /* 170 * Change timeval to jiffies, trying to avoid the 171 * most obvious overflows.. 172 * 173 * And some not so obvious. 174 * 175 * Note that we don't want to return LONG_MAX, because 176 * for various timeout reasons we often end up having 177 * to wait "jiffies+1" in order to guarantee that we wait 178 * at _least_ "jiffies" - so "jiffies+1" had better still 179 * be positive. 180 */ 181 #define MAX_JIFFY_OFFSET ((LONG_MAX >> 1)-1) 182 183 extern unsigned long preset_lpj; 184 185 /* 186 * We want to do realistic conversions of time so we need to use the same 187 * values the update wall clock code uses as the jiffies size. This value 188 * is: TICK_NSEC (which is defined in timex.h). This 189 * is a constant and is in nanoseconds. We will use scaled math 190 * with a set of scales defined here as SEC_JIFFIE_SC, USEC_JIFFIE_SC and 191 * NSEC_JIFFIE_SC. Note that these defines contain nothing but 192 * constants and so are computed at compile time. SHIFT_HZ (computed in 193 * timex.h) adjusts the scaling for different HZ values. 194 195 * Scaled math??? What is that? 196 * 197 * Scaled math is a way to do integer math on values that would, 198 * otherwise, either overflow, underflow, or cause undesired div 199 * instructions to appear in the execution path. In short, we "scale" 200 * up the operands so they take more bits (more precision, less 201 * underflow), do the desired operation and then "scale" the result back 202 * by the same amount. If we do the scaling by shifting we avoid the 203 * costly mpy and the dastardly div instructions. 204 205 * Suppose, for example, we want to convert from seconds to jiffies 206 * where jiffies is defined in nanoseconds as NSEC_PER_JIFFIE. The 207 * simple math is: jiff = (sec * NSEC_PER_SEC) / NSEC_PER_JIFFIE; We 208 * observe that (NSEC_PER_SEC / NSEC_PER_JIFFIE) is a constant which we 209 * might calculate at compile time, however, the result will only have 210 * about 3-4 bits of precision (less for smaller values of HZ). 211 * 212 * So, we scale as follows: 213 * jiff = (sec) * (NSEC_PER_SEC / NSEC_PER_JIFFIE); 214 * jiff = ((sec) * ((NSEC_PER_SEC * SCALE)/ NSEC_PER_JIFFIE)) / SCALE; 215 * Then we make SCALE a power of two so: 216 * jiff = ((sec) * ((NSEC_PER_SEC << SCALE)/ NSEC_PER_JIFFIE)) >> SCALE; 217 * Now we define: 218 * #define SEC_CONV = ((NSEC_PER_SEC << SCALE)/ NSEC_PER_JIFFIE)) 219 * jiff = (sec * SEC_CONV) >> SCALE; 220 * 221 * Often the math we use will expand beyond 32-bits so we tell C how to 222 * do this and pass the 64-bit result of the mpy through the ">> SCALE" 223 * which should take the result back to 32-bits. We want this expansion 224 * to capture as much precision as possible. At the same time we don't 225 * want to overflow so we pick the SCALE to avoid this. In this file, 226 * that means using a different scale for each range of HZ values (as 227 * defined in timex.h). 228 * 229 * For those who want to know, gcc will give a 64-bit result from a "*" 230 * operator if the result is a long long AND at least one of the 231 * operands is cast to long long (usually just prior to the "*" so as 232 * not to confuse it into thinking it really has a 64-bit operand, 233 * which, buy the way, it can do, but it takes more code and at least 2 234 * mpys). 235 236 * We also need to be aware that one second in nanoseconds is only a 237 * couple of bits away from overflowing a 32-bit word, so we MUST use 238 * 64-bits to get the full range time in nanoseconds. 239 240 */ 241 242 /* 243 * Here are the scales we will use. One for seconds, nanoseconds and 244 * microseconds. 245 * 246 * Within the limits of cpp we do a rough cut at the SEC_JIFFIE_SC and 247 * check if the sign bit is set. If not, we bump the shift count by 1. 248 * (Gets an extra bit of precision where we can use it.) 249 * We know it is set for HZ = 1024 and HZ = 100 not for 1000. 250 * Haven't tested others. 251 252 * Limits of cpp (for #if expressions) only long (no long long), but 253 * then we only need the most signicant bit. 254 */ 255 256 #define SEC_JIFFIE_SC (31 - SHIFT_HZ) 257 #if !((((NSEC_PER_SEC << 2) / TICK_NSEC) << (SEC_JIFFIE_SC - 2)) & 0x80000000) 258 #undef SEC_JIFFIE_SC 259 #define SEC_JIFFIE_SC (32 - SHIFT_HZ) 260 #endif 261 #define NSEC_JIFFIE_SC (SEC_JIFFIE_SC + 29) 262 #define SEC_CONVERSION ((unsigned long)((((u64)NSEC_PER_SEC << SEC_JIFFIE_SC) +\ 263 TICK_NSEC -1) / (u64)TICK_NSEC)) 264 265 #define NSEC_CONVERSION ((unsigned long)((((u64)1 << NSEC_JIFFIE_SC) +\ 266 TICK_NSEC -1) / (u64)TICK_NSEC)) 267 /* 268 * The maximum jiffie value is (MAX_INT >> 1). Here we translate that 269 * into seconds. The 64-bit case will overflow if we are not careful, 270 * so use the messy SH_DIV macro to do it. Still all constants. 271 */ 272 #if BITS_PER_LONG < 64 273 # define MAX_SEC_IN_JIFFIES \ 274 (long)((u64)((u64)MAX_JIFFY_OFFSET * TICK_NSEC) / NSEC_PER_SEC) 275 #else /* take care of overflow on 64 bits machines */ 276 # define MAX_SEC_IN_JIFFIES \ 277 (SH_DIV((MAX_JIFFY_OFFSET >> SEC_JIFFIE_SC) * TICK_NSEC, NSEC_PER_SEC, 1) - 1) 278 279 #endif 280 281 /* 282 * Convert various time units to each other: 283 */ 284 extern unsigned int jiffies_to_msecs(const unsigned long j); 285 extern unsigned int jiffies_to_usecs(const unsigned long j); 286 287 static inline u64 jiffies_to_nsecs(const unsigned long j) 288 { 289 return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC; 290 } 291 292 extern unsigned long __msecs_to_jiffies(const unsigned int m); 293 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) 294 /* 295 * HZ is equal to or smaller than 1000, and 1000 is a nice round 296 * multiple of HZ, divide with the factor between them, but round 297 * upwards: 298 */ 299 static inline unsigned long _msecs_to_jiffies(const unsigned int m) 300 { 301 return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ); 302 } 303 #elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) 304 /* 305 * HZ is larger than 1000, and HZ is a nice round multiple of 1000 - 306 * simply multiply with the factor between them. 307 * 308 * But first make sure the multiplication result cannot overflow: 309 */ 310 static inline unsigned long _msecs_to_jiffies(const unsigned int m) 311 { 312 if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET)) 313 return MAX_JIFFY_OFFSET; 314 return m * (HZ / MSEC_PER_SEC); 315 } 316 #else 317 /* 318 * Generic case - multiply, round and divide. But first check that if 319 * we are doing a net multiplication, that we wouldn't overflow: 320 */ 321 static inline unsigned long _msecs_to_jiffies(const unsigned int m) 322 { 323 if (HZ > MSEC_PER_SEC && m > jiffies_to_msecs(MAX_JIFFY_OFFSET)) 324 return MAX_JIFFY_OFFSET; 325 326 return (MSEC_TO_HZ_MUL32 * m + MSEC_TO_HZ_ADJ32) >> MSEC_TO_HZ_SHR32; 327 } 328 #endif 329 /** 330 * msecs_to_jiffies: - convert milliseconds to jiffies 331 * @m: time in milliseconds 332 * 333 * conversion is done as follows: 334 * 335 * - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET) 336 * 337 * - 'too large' values [that would result in larger than 338 * MAX_JIFFY_OFFSET values] mean 'infinite timeout' too. 339 * 340 * - all other values are converted to jiffies by either multiplying 341 * the input value by a factor or dividing it with a factor and 342 * handling any 32-bit overflows. 343 * for the details see __msecs_to_jiffies() 344 * 345 * msecs_to_jiffies() checks for the passed in value being a constant 346 * via __builtin_constant_p() allowing gcc to eliminate most of the 347 * code, __msecs_to_jiffies() is called if the value passed does not 348 * allow constant folding and the actual conversion must be done at 349 * runtime. 350 * the HZ range specific helpers _msecs_to_jiffies() are called both 351 * directly here and from __msecs_to_jiffies() in the case where 352 * constant folding is not possible. 353 */ 354 static __always_inline unsigned long msecs_to_jiffies(const unsigned int m) 355 { 356 if (__builtin_constant_p(m)) { 357 if ((int)m < 0) 358 return MAX_JIFFY_OFFSET; 359 return _msecs_to_jiffies(m); 360 } else { 361 return __msecs_to_jiffies(m); 362 } 363 } 364 365 extern unsigned long __usecs_to_jiffies(const unsigned int u); 366 #if !(USEC_PER_SEC % HZ) 367 static inline unsigned long _usecs_to_jiffies(const unsigned int u) 368 { 369 return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ); 370 } 371 #else 372 static inline unsigned long _usecs_to_jiffies(const unsigned int u) 373 { 374 return (USEC_TO_HZ_MUL32 * u + USEC_TO_HZ_ADJ32) 375 >> USEC_TO_HZ_SHR32; 376 } 377 #endif 378 379 /** 380 * usecs_to_jiffies: - convert microseconds to jiffies 381 * @u: time in microseconds 382 * 383 * conversion is done as follows: 384 * 385 * - 'too large' values [that would result in larger than 386 * MAX_JIFFY_OFFSET values] mean 'infinite timeout' too. 387 * 388 * - all other values are converted to jiffies by either multiplying 389 * the input value by a factor or dividing it with a factor and 390 * handling any 32-bit overflows as for msecs_to_jiffies. 391 * 392 * usecs_to_jiffies() checks for the passed in value being a constant 393 * via __builtin_constant_p() allowing gcc to eliminate most of the 394 * code, __usecs_to_jiffies() is called if the value passed does not 395 * allow constant folding and the actual conversion must be done at 396 * runtime. 397 * the HZ range specific helpers _usecs_to_jiffies() are called both 398 * directly here and from __msecs_to_jiffies() in the case where 399 * constant folding is not possible. 400 */ 401 static __always_inline unsigned long usecs_to_jiffies(const unsigned int u) 402 { 403 if (__builtin_constant_p(u)) { 404 if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET)) 405 return MAX_JIFFY_OFFSET; 406 return _usecs_to_jiffies(u); 407 } else { 408 return __usecs_to_jiffies(u); 409 } 410 } 411 412 extern unsigned long timespec64_to_jiffies(const struct timespec64 *value); 413 extern void jiffies_to_timespec64(const unsigned long jiffies, 414 struct timespec64 *value); 415 static inline unsigned long timespec_to_jiffies(const struct timespec *value) 416 { 417 struct timespec64 ts = timespec_to_timespec64(*value); 418 419 return timespec64_to_jiffies(&ts); 420 } 421 422 static inline void jiffies_to_timespec(const unsigned long jiffies, 423 struct timespec *value) 424 { 425 struct timespec64 ts; 426 427 jiffies_to_timespec64(jiffies, &ts); 428 *value = timespec64_to_timespec(ts); 429 } 430 431 extern unsigned long timeval_to_jiffies(const struct timeval *value); 432 extern void jiffies_to_timeval(const unsigned long jiffies, 433 struct timeval *value); 434 435 extern clock_t jiffies_to_clock_t(unsigned long x); 436 static inline clock_t jiffies_delta_to_clock_t(long delta) 437 { 438 return jiffies_to_clock_t(max(0L, delta)); 439 } 440 441 extern unsigned long clock_t_to_jiffies(unsigned long x); 442 extern u64 jiffies_64_to_clock_t(u64 x); 443 extern u64 nsec_to_clock_t(u64 x); 444 extern u64 nsecs_to_jiffies64(u64 n); 445 extern unsigned long nsecs_to_jiffies(u64 n); 446 447 #define TIMESTAMP_SIZE 30 448 449 #endif
1 #ifndef _LINUX_MATH64_H 2 #define _LINUX_MATH64_H 3 4 #include <linux/types.h> 5 #include <asm/div64.h> 6 7 #if BITS_PER_LONG == 64 8 9 #define div64_long(x, y) div64_s64((x), (y)) 10 #define div64_ul(x, y) div64_u64((x), (y)) 11 12 /** 13 * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder 14 * 15 * This is commonly provided by 32bit archs to provide an optimized 64bit 16 * divide. 17 */ 18 static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) 19 { 20 *remainder = dividend % divisor; 21 return dividend / divisor; 22 } 23 24 /** 25 * div_s64_rem - signed 64bit divide with 32bit divisor with remainder 26 */ 27 static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder) 28 { 29 *remainder = dividend % divisor; 30 return dividend / divisor; 31 } 32 33 /** 34 * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder 35 */ 36 static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder) 37 { 38 *remainder = dividend % divisor; 39 return dividend / divisor; 40 } 41 42 /** 43 * div64_u64 - unsigned 64bit divide with 64bit divisor 44 */ 45 static inline u64 div64_u64(u64 dividend, u64 divisor) 46 { 47 return dividend / divisor; 48 } 49 50 /** 51 * div64_s64 - signed 64bit divide with 64bit divisor 52 */ 53 static inline s64 div64_s64(s64 dividend, s64 divisor) 54 { 55 return dividend / divisor; 56 } 57 58 #elif BITS_PER_LONG == 32 59 60 #define div64_long(x, y) div_s64((x), (y)) 61 #define div64_ul(x, y) div_u64((x), (y)) 62 63 #ifndef div_u64_rem 64 static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) 65 { 66 *remainder = do_div(dividend, divisor); 67 return dividend; 68 } 69 #endif 70 71 #ifndef div_s64_rem 72 extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder); 73 #endif 74 75 #ifndef div64_u64_rem 76 extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder); 77 #endif 78 79 #ifndef div64_u64 80 extern u64 div64_u64(u64 dividend, u64 divisor); 81 #endif 82 83 #ifndef div64_s64 84 extern s64 div64_s64(s64 dividend, s64 divisor); 85 #endif 86 87 #endif /* BITS_PER_LONG */ 88 89 /** 90 * div_u64 - unsigned 64bit divide with 32bit divisor 91 * 92 * This is the most common 64bit divide and should be used if possible, 93 * as many 32bit archs can optimize this variant better than a full 64bit 94 * divide. 95 */ 96 #ifndef div_u64 97 static inline u64 div_u64(u64 dividend, u32 divisor) 98 { 99 u32 remainder; 100 return div_u64_rem(dividend, divisor, &remainder); 101 } 102 #endif 103 104 /** 105 * div_s64 - signed 64bit divide with 32bit divisor 106 */ 107 #ifndef div_s64 108 static inline s64 div_s64(s64 dividend, s32 divisor) 109 { 110 s32 remainder; 111 return div_s64_rem(dividend, divisor, &remainder); 112 } 113 #endif 114 115 u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder); 116 117 static __always_inline u32 118 __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) 119 { 120 u32 ret = 0; 121 122 while (dividend >= divisor) { 123 /* The following asm() prevents the compiler from 124 optimising this loop into a modulo operation. */ 125 asm("" : "+rm"(dividend)); 126 127 dividend -= divisor; 128 ret++; 129 } 130 131 *remainder = dividend; 132 133 return ret; 134 } 135 136 #if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__) 137 138 #ifndef mul_u64_u32_shr 139 static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift) 140 { 141 return (u64)(((unsigned int)a * mul) >> shift); 142 } 143 #endif /* mul_u64_u32_shr */ 144 145 #ifndef mul_u64_u64_shr 146 static inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift) 147 { 148 return (u64)(((unsigned int)a * mul) >> shift); 149 } 150 #endif /* mul_u64_u64_shr */ 151 152 #else 153 154 #ifndef mul_u64_u32_shr 155 static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift) 156 { 157 u32 ah, al; 158 u64 ret; 159 160 al = a; 161 ah = a >> 32; 162 163 ret = ((u64)al * mul) >> shift; 164 if (ah) 165 ret += ((u64)ah * mul) << (32 - shift); 166 167 return ret; 168 } 169 #endif /* mul_u64_u32_shr */ 170 171 #ifndef mul_u64_u64_shr 172 static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift) 173 { 174 union { 175 u64 ll; 176 struct { 177 #ifdef __BIG_ENDIAN 178 u32 high, low; 179 #else 180 u32 low, high; 181 #endif 182 } l; 183 } rl, rm, rn, rh, a0, b0; 184 u64 c; 185 186 a0.ll = a; 187 b0.ll = b; 188 189 rl.ll = (u64)a0.l.low * b0.l.low; 190 rm.ll = (u64)a0.l.low * b0.l.high; 191 rn.ll = (u64)a0.l.high * b0.l.low; 192 rh.ll = (u64)a0.l.high * b0.l.high; 193 194 /* 195 * Each of these lines computes a 64-bit intermediate result into "c", 196 * starting at bits 32-95. The low 32-bits go into the result of the 197 * multiplication, the high 32-bits are carried into the next step. 198 */ 199 rl.l.high = c = (u64)rl.l.high + rm.l.low + rn.l.low; 200 rh.l.low = c = (c >> 32) + rm.l.high + rn.l.high + rh.l.low; 201 rh.l.high = (c >> 32) + rh.l.high; 202 203 /* 204 * The 128-bit result of the multiplication is in rl.ll and rh.ll, 205 * shift it right and throw away the high part of the result. 206 */ 207 if (shift == 0) 208 return rl.ll; 209 if (shift < 64) 210 return (rl.ll >> shift) | (rh.ll << (64 - shift)); 211 return rh.ll >> (shift & 63); 212 } 213 #endif /* mul_u64_u64_shr */ 214 215 #endif 216 217 #ifndef mul_u64_u32_div 218 static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor) 219 { 220 union { 221 u64 ll; 222 struct { 223 #ifdef __BIG_ENDIAN 224 u32 high, low; 225 #else 226 u32 low, high; 227 #endif 228 } l; 229 } u, rl, rh; 230 231 u.ll = a; 232 rl.ll = (u64)u.l.low * mul; 233 rh.ll = (u64)u.l.high * mul + rl.l.high; 234 235 /* Bits 32-63 of the result will be in rh.l.low. */ 236 rl.l.high = do_div(rh.ll, divisor); 237 238 /* Bits 0-31 of the result will be in rl.l.low. */ 239 do_div(rl.ll, divisor); 240 241 rl.l.high = rh.l.low; 242 return rl.ll; 243 } 244 #endif /* mul_u64_u32_div */ 245 246 #endif /* _LINUX_MATH64_H */
1 #ifndef _LINUX_OF_H 2 #define _LINUX_OF_H 3 /* 4 * Definitions for talking to the Open Firmware PROM on 5 * Power Macintosh and other computers. 6 * 7 * Copyright (C) 1996-2005 Paul Mackerras. 8 * 9 * Updates for PPC64 by Peter Bergner & David Engebretsen, IBM Corp. 10 * Updates for SPARC64 by David S. Miller 11 * Derived from PowerPC and Sparc prom.h files by Stephen Rothwell, IBM Corp. 12 * 13 * This program is free software; you can redistribute it and/or 14 * modify it under the terms of the GNU General Public License 15 * as published by the Free Software Foundation; either version 16 * 2 of the License, or (at your option) any later version. 17 */ 18 #include <linux/types.h> 19 #include <linux/bitops.h> 20 #include <linux/errno.h> 21 #include <linux/kobject.h> 22 #include <linux/mod_devicetable.h> 23 #include <linux/spinlock.h> 24 #include <linux/topology.h> 25 #include <linux/notifier.h> 26 #include <linux/property.h> 27 #include <linux/list.h> 28 29 #include <asm/byteorder.h> 30 #include <asm/errno.h> 31 32 typedef u32 phandle; 33 typedef u32 ihandle; 34 35 struct property { 36 char *name; 37 int length; 38 void *value; 39 struct property *next; 40 unsigned long _flags; 41 unsigned int unique_id; 42 struct bin_attribute attr; 43 }; 44 45 #if defined(CONFIG_SPARC) 46 struct of_irq_controller; 47 #endif 48 49 struct device_node { 50 const char *name; 51 const char *type; 52 phandle phandle; 53 const char *full_name; 54 struct fwnode_handle fwnode; 55 56 struct property *properties; 57 struct property *deadprops; /* removed properties */ 58 struct device_node *parent; 59 struct device_node *child; 60 struct device_node *sibling; 61 struct kobject kobj; 62 unsigned long _flags; 63 void *data; 64 #if defined(CONFIG_SPARC) 65 const char *path_component_name; 66 unsigned int unique_id; 67 struct of_irq_controller *irq_trans; 68 #endif 69 }; 70 71 #define MAX_PHANDLE_ARGS 16 72 struct of_phandle_args { 73 struct device_node *np; 74 int args_count; 75 uint32_t args[MAX_PHANDLE_ARGS]; 76 }; 77 78 struct of_phandle_iterator { 79 /* Common iterator information */ 80 const char *cells_name; 81 int cell_count; 82 const struct device_node *parent; 83 84 /* List size information */ 85 const __be32 *list_end; 86 const __be32 *phandle_end; 87 88 /* Current position state */ 89 const __be32 *cur; 90 uint32_t cur_count; 91 phandle phandle; 92 struct device_node *node; 93 }; 94 95 struct of_reconfig_data { 96 struct device_node *dn; 97 struct property *prop; 98 struct property *old_prop; 99 }; 100 101 /* initialize a node */ 102 extern struct kobj_type of_node_ktype; 103 static inline void of_node_init(struct device_node *node) 104 { 105 kobject_init(&node->kobj, &of_node_ktype); 106 node->fwnode.type = FWNODE_OF; 107 } 108 109 /* true when node is initialized */ 110 static inline int of_node_is_initialized(struct device_node *node) 111 { 112 return node && node->kobj.state_initialized; 113 } 114 115 /* true when node is attached (i.e. present on sysfs) */ 116 static inline int of_node_is_attached(struct device_node *node) 117 { 118 return node && node->kobj.state_in_sysfs; 119 } 120 121 #ifdef CONFIG_OF_DYNAMIC 122 extern struct device_node *of_node_get(struct device_node *node); 123 extern void of_node_put(struct device_node *node); 124 #else /* CONFIG_OF_DYNAMIC */ 125 /* Dummy ref counting routines - to be implemented later */ 126 static inline struct device_node *of_node_get(struct device_node *node) 127 { 128 return node; 129 } 130 static inline void of_node_put(struct device_node *node) { } 131 #endif /* !CONFIG_OF_DYNAMIC */ 132 133 /* Pointer for first entry in chain of all nodes. */ 134 extern struct device_node *of_root; 135 extern struct device_node *of_chosen; 136 extern struct device_node *of_aliases; 137 extern struct device_node *of_stdout; 138 extern raw_spinlock_t devtree_lock; 139 140 /* flag descriptions (need to be visible even when !CONFIG_OF) */ 141 #define OF_DYNAMIC 1 /* node and properties were allocated via kmalloc */ 142 #define OF_DETACHED 2 /* node has been detached from the device tree */ 143 #define OF_POPULATED 3 /* device already created for the node */ 144 #define OF_POPULATED_BUS 4 /* of_platform_populate recursed to children of this node */ 145 146 #define OF_BAD_ADDR ((u64)-1) 147 148 #ifdef CONFIG_OF 149 void of_core_init(void); 150 151 static inline bool is_of_node(struct fwnode_handle *fwnode) 152 { 153 return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_OF; 154 } 155 156 static inline struct device_node *to_of_node(struct fwnode_handle *fwnode) 157 { 158 return is_of_node(fwnode) ? 159 container_of(fwnode, struct device_node, fwnode) : NULL; 160 } 161 162 static inline bool of_have_populated_dt(void) 163 { 164 return of_root != NULL; 165 } 166 167 static inline bool of_node_is_root(const struct device_node *node) 168 { 169 return node && (node->parent == NULL); 170 } 171 172 static inline int of_node_check_flag(struct device_node *n, unsigned long flag) 173 { 174 return test_bit(flag, &n->_flags); 175 } 176 177 static inline int of_node_test_and_set_flag(struct device_node *n, 178 unsigned long flag) 179 { 180 return test_and_set_bit(flag, &n->_flags); 181 } 182 183 static inline void of_node_set_flag(struct device_node *n, unsigned long flag) 184 { 185 set_bit(flag, &n->_flags); 186 } 187 188 static inline void of_node_clear_flag(struct device_node *n, unsigned long flag) 189 { 190 clear_bit(flag, &n->_flags); 191 } 192 193 static inline int of_property_check_flag(struct property *p, unsigned long flag) 194 { 195 return test_bit(flag, &p->_flags); 196 } 197 198 static inline void of_property_set_flag(struct property *p, unsigned long flag) 199 { 200 set_bit(flag, &p->_flags); 201 } 202 203 static inline void of_property_clear_flag(struct property *p, unsigned long flag) 204 { 205 clear_bit(flag, &p->_flags); 206 } 207 208 extern struct device_node *__of_find_all_nodes(struct device_node *prev); 209 extern struct device_node *of_find_all_nodes(struct device_node *prev); 210 211 /* 212 * OF address retrieval & translation 213 */ 214 215 /* Helper to read a big number; size is in cells (not bytes) */ 216 static inline u64 of_read_number(const __be32 *cell, int size) 217 { 218 u64 r = 0; 219 while (size--) 220 r = (r << 32) | be32_to_cpu(*(cell++)); 221 return r; 222 } 223 224 /* Like of_read_number, but we want an unsigned long result */ 225 static inline unsigned long of_read_ulong(const __be32 *cell, int size) 226 { 227 /* toss away upper bits if unsigned long is smaller than u64 */ 228 return of_read_number(cell, size); 229 } 230 231 #if defined(CONFIG_SPARC) 232 #include <asm/prom.h> 233 #endif 234 235 /* Default #address and #size cells. Allow arch asm/prom.h to override */ 236 #if !defined(OF_ROOT_NODE_ADDR_CELLS_DEFAULT) 237 #define OF_ROOT_NODE_ADDR_CELLS_DEFAULT 1 238 #define OF_ROOT_NODE_SIZE_CELLS_DEFAULT 1 239 #endif 240 241 #define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags) 242 #define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags) 243 244 static inline const char *of_node_full_name(const struct device_node *np) 245 { 246 return np ? np->full_name : "<no-node>"; 247 } 248 249 #define for_each_of_allnodes_from(from, dn) \ 250 for (dn = __of_find_all_nodes(from); dn; dn = __of_find_all_nodes(dn)) 251 #define for_each_of_allnodes(dn) for_each_of_allnodes_from(NULL, dn) 252 extern struct device_node *of_find_node_by_name(struct device_node *from, 253 const char *name); 254 extern struct device_node *of_find_node_by_type(struct device_node *from, 255 const char *type); 256 extern struct device_node *of_find_compatible_node(struct device_node *from, 257 const char *type, const char *compat); 258 extern struct device_node *of_find_matching_node_and_match( 259 struct device_node *from, 260 const struct of_device_id *matches, 261 const struct of_device_id **match); 262 263 extern struct device_node *of_find_node_opts_by_path(const char *path, 264 const char **opts); 265 static inline struct device_node *of_find_node_by_path(const char *path) 266 { 267 return of_find_node_opts_by_path(path, NULL); 268 } 269 270 extern struct device_node *of_find_node_by_phandle(phandle handle); 271 extern struct device_node *of_get_parent(const struct device_node *node); 272 extern struct device_node *of_get_next_parent(struct device_node *node); 273 extern struct device_node *of_get_next_child(const struct device_node *node, 274 struct device_node *prev); 275 extern struct device_node *of_get_next_available_child( 276 const struct device_node *node, struct device_node *prev); 277 278 extern struct device_node *of_get_child_by_name(const struct device_node *node, 279 const char *name); 280 281 /* cache lookup */ 282 extern struct device_node *of_find_next_cache_node(const struct device_node *); 283 extern struct device_node *of_find_node_with_property( 284 struct device_node *from, const char *prop_name); 285 286 extern struct property *of_find_property(const struct device_node *np, 287 const char *name, 288 int *lenp); 289 extern int of_property_count_elems_of_size(const struct device_node *np, 290 const char *propname, int elem_size); 291 extern int of_property_read_u32_index(const struct device_node *np, 292 const char *propname, 293 u32 index, u32 *out_value); 294 extern int of_property_read_u8_array(const struct device_node *np, 295 const char *propname, u8 *out_values, size_t sz); 296 extern int of_property_read_u16_array(const struct device_node *np, 297 const char *propname, u16 *out_values, size_t sz); 298 extern int of_property_read_u32_array(const struct device_node *np, 299 const char *propname, 300 u32 *out_values, 301 size_t sz); 302 extern int of_property_read_u64(const struct device_node *np, 303 const char *propname, u64 *out_value); 304 extern int of_property_read_u64_array(const struct device_node *np, 305 const char *propname, 306 u64 *out_values, 307 size_t sz); 308 309 extern int of_property_read_string(const struct device_node *np, 310 const char *propname, 311 const char **out_string); 312 extern int of_property_match_string(const struct device_node *np, 313 const char *propname, 314 const char *string); 315 extern int of_property_read_string_helper(const struct device_node *np, 316 const char *propname, 317 const char **out_strs, size_t sz, int index); 318 extern int of_device_is_compatible(const struct device_node *device, 319 const char *); 320 extern int of_device_compatible_match(struct device_node *device, 321 const char *const *compat); 322 extern bool of_device_is_available(const struct device_node *device); 323 extern bool of_device_is_big_endian(const struct device_node *device); 324 extern const void *of_get_property(const struct device_node *node, 325 const char *name, 326 int *lenp); 327 extern struct device_node *of_get_cpu_node(int cpu, unsigned int *thread); 328 #define for_each_property_of_node(dn, pp) \ 329 for (pp = dn->properties; pp != NULL; pp = pp->next) 330 331 extern int of_n_addr_cells(struct device_node *np); 332 extern int of_n_size_cells(struct device_node *np); 333 extern const struct of_device_id *of_match_node( 334 const struct of_device_id *matches, const struct device_node *node); 335 extern int of_modalias_node(struct device_node *node, char *modalias, int len); 336 extern void of_print_phandle_args(const char *msg, const struct of_phandle_args *args); 337 extern struct device_node *of_parse_phandle(const struct device_node *np, 338 const char *phandle_name, 339 int index); 340 extern int of_parse_phandle_with_args(const struct device_node *np, 341 const char *list_name, const char *cells_name, int index, 342 struct of_phandle_args *out_args); 343 extern int of_parse_phandle_with_fixed_args(const struct device_node *np, 344 const char *list_name, int cells_count, int index, 345 struct of_phandle_args *out_args); 346 extern int of_count_phandle_with_args(const struct device_node *np, 347 const char *list_name, const char *cells_name); 348 349 /* phandle iterator functions */ 350 extern int of_phandle_iterator_init(struct of_phandle_iterator *it, 351 const struct device_node *np, 352 const char *list_name, 353 const char *cells_name, 354 int cell_count); 355 356 extern int of_phandle_iterator_next(struct of_phandle_iterator *it); 357 extern int of_phandle_iterator_args(struct of_phandle_iterator *it, 358 uint32_t *args, 359 int size); 360 361 extern void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align)); 362 extern int of_alias_get_id(struct device_node *np, const char *stem); 363 extern int of_alias_get_highest_id(const char *stem); 364 365 extern int of_machine_is_compatible(const char *compat); 366 367 extern int of_add_property(struct device_node *np, struct property *prop); 368 extern int of_remove_property(struct device_node *np, struct property *prop); 369 extern int of_update_property(struct device_node *np, struct property *newprop); 370 371 /* For updating the device tree at runtime */ 372 #define OF_RECONFIG_ATTACH_NODE 0x0001 373 #define OF_RECONFIG_DETACH_NODE 0x0002 374 #define OF_RECONFIG_ADD_PROPERTY 0x0003 375 #define OF_RECONFIG_REMOVE_PROPERTY 0x0004 376 #define OF_RECONFIG_UPDATE_PROPERTY 0x0005 377 378 extern int of_attach_node(struct device_node *); 379 extern int of_detach_node(struct device_node *); 380 381 #define of_match_ptr(_ptr) (_ptr) 382 383 /* 384 * struct property *prop; 385 * const __be32 *p; 386 * u32 u; 387 * 388 * of_property_for_each_u32(np, "propname", prop, p, u) 389 * printk("U32 value: %x\n", u); 390 */ 391 const __be32 *of_prop_next_u32(struct property *prop, const __be32 *cur, 392 u32 *pu); 393 /* 394 * struct property *prop; 395 * const char *s; 396 * 397 * of_property_for_each_string(np, "propname", prop, s) 398 * printk("String value: %s\n", s); 399 */ 400 const char *of_prop_next_string(struct property *prop, const char *cur); 401 402 bool of_console_check(struct device_node *dn, char *name, int index); 403 404 #else /* CONFIG_OF */ 405 406 static inline void of_core_init(void) 407 { 408 } 409 410 static inline bool is_of_node(struct fwnode_handle *fwnode) 411 { 412 return false; 413 } 414 415 static inline struct device_node *to_of_node(struct fwnode_handle *fwnode) 416 { 417 return NULL; 418 } 419 420 static inline const char* of_node_full_name(const struct device_node *np) 421 { 422 return "<no-node>"; 423 } 424 425 static inline struct device_node *of_find_node_by_name(struct device_node *from, 426 const char *name) 427 { 428 return NULL; 429 } 430 431 static inline struct device_node *of_find_node_by_type(struct device_node *from, 432 const char *type) 433 { 434 return NULL; 435 } 436 437 static inline struct device_node *of_find_matching_node_and_match( 438 struct device_node *from, 439 const struct of_device_id *matches, 440 const struct of_device_id **match) 441 { 442 return NULL; 443 } 444 445 static inline struct device_node *of_find_node_by_path(const char *path) 446 { 447 return NULL; 448 } 449 450 static inline struct device_node *of_find_node_opts_by_path(const char *path, 451 const char **opts) 452 { 453 return NULL; 454 } 455 456 static inline struct device_node *of_find_node_by_phandle(phandle handle) 457 { 458 return NULL; 459 } 460 461 static inline struct device_node *of_get_parent(const struct device_node *node) 462 { 463 return NULL; 464 } 465 466 static inline struct device_node *of_get_next_child( 467 const struct device_node *node, struct device_node *prev) 468 { 469 return NULL; 470 } 471 472 static inline struct device_node *of_get_next_available_child( 473 const struct device_node *node, struct device_node *prev) 474 { 475 return NULL; 476 } 477 478 static inline struct device_node *of_find_node_with_property( 479 struct device_node *from, const char *prop_name) 480 { 481 return NULL; 482 } 483 484 static inline bool of_have_populated_dt(void) 485 { 486 return false; 487 } 488 489 static inline struct device_node *of_get_child_by_name( 490 const struct device_node *node, 491 const char *name) 492 { 493 return NULL; 494 } 495 496 static inline int of_device_is_compatible(const struct device_node *device, 497 const char *name) 498 { 499 return 0; 500 } 501 502 static inline bool of_device_is_available(const struct device_node *device) 503 { 504 return false; 505 } 506 507 static inline bool of_device_is_big_endian(const struct device_node *device) 508 { 509 return false; 510 } 511 512 static inline struct property *of_find_property(const struct device_node *np, 513 const char *name, 514 int *lenp) 515 { 516 return NULL; 517 } 518 519 static inline struct device_node *of_find_compatible_node( 520 struct device_node *from, 521 const char *type, 522 const char *compat) 523 { 524 return NULL; 525 } 526 527 static inline int of_property_count_elems_of_size(const struct device_node *np, 528 const char *propname, int elem_size) 529 { 530 return -ENOSYS; 531 } 532 533 static inline int of_property_read_u32_index(const struct device_node *np, 534 const char *propname, u32 index, u32 *out_value) 535 { 536 return -ENOSYS; 537 } 538 539 static inline int of_property_read_u8_array(const struct device_node *np, 540 const char *propname, u8 *out_values, size_t sz) 541 { 542 return -ENOSYS; 543 } 544 545 static inline int of_property_read_u16_array(const struct device_node *np, 546 const char *propname, u16 *out_values, size_t sz) 547 { 548 return -ENOSYS; 549 } 550 551 static inline int of_property_read_u32_array(const struct device_node *np, 552 const char *propname, 553 u32 *out_values, size_t sz) 554 { 555 return -ENOSYS; 556 } 557 558 static inline int of_property_read_u64_array(const struct device_node *np, 559 const char *propname, 560 u64 *out_values, size_t sz) 561 { 562 return -ENOSYS; 563 } 564 565 static inline int of_property_read_string(const struct device_node *np, 566 const char *propname, 567 const char **out_string) 568 { 569 return -ENOSYS; 570 } 571 572 static inline int of_property_read_string_helper(const struct device_node *np, 573 const char *propname, 574 const char **out_strs, size_t sz, int index) 575 { 576 return -ENOSYS; 577 } 578 579 static inline const void *of_get_property(const struct device_node *node, 580 const char *name, 581 int *lenp) 582 { 583 return NULL; 584 } 585 586 static inline struct device_node *of_get_cpu_node(int cpu, 587 unsigned int *thread) 588 { 589 return NULL; 590 } 591 592 static inline int of_property_read_u64(const struct device_node *np, 593 const char *propname, u64 *out_value) 594 { 595 return -ENOSYS; 596 } 597 598 static inline int of_property_match_string(const struct device_node *np, 599 const char *propname, 600 const char *string) 601 { 602 return -ENOSYS; 603 } 604 605 static inline struct device_node *of_parse_phandle(const struct device_node *np, 606 const char *phandle_name, 607 int index) 608 { 609 return NULL; 610 } 611 612 static inline int of_parse_phandle_with_args(const struct device_node *np, 613 const char *list_name, 614 const char *cells_name, 615 int index, 616 struct of_phandle_args *out_args) 617 { 618 return -ENOSYS; 619 } 620 621 static inline int of_parse_phandle_with_fixed_args(const struct device_node *np, 622 const char *list_name, int cells_count, int index, 623 struct of_phandle_args *out_args) 624 { 625 return -ENOSYS; 626 } 627 628 static inline int of_count_phandle_with_args(struct device_node *np, 629 const char *list_name, 630 const char *cells_name) 631 { 632 return -ENOSYS; 633 } 634 635 static inline int of_phandle_iterator_init(struct of_phandle_iterator *it, 636 const struct device_node *np, 637 const char *list_name, 638 const char *cells_name, 639 int cell_count) 640 { 641 return -ENOSYS; 642 } 643 644 static inline int of_phandle_iterator_next(struct of_phandle_iterator *it) 645 { 646 return -ENOSYS; 647 } 648 649 static inline int of_phandle_iterator_args(struct of_phandle_iterator *it, 650 uint32_t *args, 651 int size) 652 { 653 return 0; 654 } 655 656 static inline int of_alias_get_id(struct device_node *np, const char *stem) 657 { 658 return -ENOSYS; 659 } 660 661 static inline int of_alias_get_highest_id(const char *stem) 662 { 663 return -ENOSYS; 664 } 665 666 static inline int of_machine_is_compatible(const char *compat) 667 { 668 return 0; 669 } 670 671 static inline bool of_console_check(const struct device_node *dn, const char *name, int index) 672 { 673 return false; 674 } 675 676 static inline const __be32 *of_prop_next_u32(struct property *prop, 677 const __be32 *cur, u32 *pu) 678 { 679 return NULL; 680 } 681 682 static inline const char *of_prop_next_string(struct property *prop, 683 const char *cur) 684 { 685 return NULL; 686 } 687 688 static inline int of_node_check_flag(struct device_node *n, unsigned long flag) 689 { 690 return 0; 691 } 692 693 static inline int of_node_test_and_set_flag(struct device_node *n, 694 unsigned long flag) 695 { 696 return 0; 697 } 698 699 static inline void of_node_set_flag(struct device_node *n, unsigned long flag) 700 { 701 } 702 703 static inline void of_node_clear_flag(struct device_node *n, unsigned long flag) 704 { 705 } 706 707 static inline int of_property_check_flag(struct property *p, unsigned long flag) 708 { 709 return 0; 710 } 711 712 static inline void of_property_set_flag(struct property *p, unsigned long flag) 713 { 714 } 715 716 static inline void of_property_clear_flag(struct property *p, unsigned long flag) 717 { 718 } 719 720 #define of_match_ptr(_ptr) NULL 721 #define of_match_node(_matches, _node) NULL 722 #endif /* CONFIG_OF */ 723 724 /* Default string compare functions, Allow arch asm/prom.h to override */ 725 #if !defined(of_compat_cmp) 726 #define of_compat_cmp(s1, s2, l) strcasecmp((s1), (s2)) 727 #define of_prop_cmp(s1, s2) strcmp((s1), (s2)) 728 #define of_node_cmp(s1, s2) strcasecmp((s1), (s2)) 729 #endif 730 731 #if defined(CONFIG_OF) && defined(CONFIG_NUMA) 732 extern int of_node_to_nid(struct device_node *np); 733 #else 734 static inline int of_node_to_nid(struct device_node *device) 735 { 736 return NUMA_NO_NODE; 737 } 738 #endif 739 740 #ifdef CONFIG_OF_NUMA 741 extern int of_numa_init(void); 742 #else 743 static inline int of_numa_init(void) 744 { 745 return -ENOSYS; 746 } 747 #endif 748 749 static inline struct device_node *of_find_matching_node( 750 struct device_node *from, 751 const struct of_device_id *matches) 752 { 753 return of_find_matching_node_and_match(from, matches, NULL); 754 } 755 756 /** 757 * of_property_count_u8_elems - Count the number of u8 elements in a property 758 * 759 * @np: device node from which the property value is to be read. 760 * @propname: name of the property to be searched. 761 * 762 * Search for a property in a device node and count the number of u8 elements 763 * in it. Returns number of elements on sucess, -EINVAL if the property does 764 * not exist or its length does not match a multiple of u8 and -ENODATA if the 765 * property does not have a value. 766 */ 767 static inline int of_property_count_u8_elems(const struct device_node *np, 768 const char *propname) 769 { 770 return of_property_count_elems_of_size(np, propname, sizeof(u8)); 771 } 772 773 /** 774 * of_property_count_u16_elems - Count the number of u16 elements in a property 775 * 776 * @np: device node from which the property value is to be read. 777 * @propname: name of the property to be searched. 778 * 779 * Search for a property in a device node and count the number of u16 elements 780 * in it. Returns number of elements on sucess, -EINVAL if the property does 781 * not exist or its length does not match a multiple of u16 and -ENODATA if the 782 * property does not have a value. 783 */ 784 static inline int of_property_count_u16_elems(const struct device_node *np, 785 const char *propname) 786 { 787 return of_property_count_elems_of_size(np, propname, sizeof(u16)); 788 } 789 790 /** 791 * of_property_count_u32_elems - Count the number of u32 elements in a property 792 * 793 * @np: device node from which the property value is to be read. 794 * @propname: name of the property to be searched. 795 * 796 * Search for a property in a device node and count the number of u32 elements 797 * in it. Returns number of elements on sucess, -EINVAL if the property does 798 * not exist or its length does not match a multiple of u32 and -ENODATA if the 799 * property does not have a value. 800 */ 801 static inline int of_property_count_u32_elems(const struct device_node *np, 802 const char *propname) 803 { 804 return of_property_count_elems_of_size(np, propname, sizeof(u32)); 805 } 806 807 /** 808 * of_property_count_u64_elems - Count the number of u64 elements in a property 809 * 810 * @np: device node from which the property value is to be read. 811 * @propname: name of the property to be searched. 812 * 813 * Search for a property in a device node and count the number of u64 elements 814 * in it. Returns number of elements on sucess, -EINVAL if the property does 815 * not exist or its length does not match a multiple of u64 and -ENODATA if the 816 * property does not have a value. 817 */ 818 static inline int of_property_count_u64_elems(const struct device_node *np, 819 const char *propname) 820 { 821 return of_property_count_elems_of_size(np, propname, sizeof(u64)); 822 } 823 824 /** 825 * of_property_read_string_array() - Read an array of strings from a multiple 826 * strings property. 827 * @np: device node from which the property value is to be read. 828 * @propname: name of the property to be searched. 829 * @out_strs: output array of string pointers. 830 * @sz: number of array elements to read. 831 * 832 * Search for a property in a device tree node and retrieve a list of 833 * terminated string values (pointer to data, not a copy) in that property. 834 * 835 * If @out_strs is NULL, the number of strings in the property is returned. 836 */ 837 static inline int of_property_read_string_array(const struct device_node *np, 838 const char *propname, const char **out_strs, 839 size_t sz) 840 { 841 return of_property_read_string_helper(np, propname, out_strs, sz, 0); 842 } 843 844 /** 845 * of_property_count_strings() - Find and return the number of strings from a 846 * multiple strings property. 847 * @np: device node from which the property value is to be read. 848 * @propname: name of the property to be searched. 849 * 850 * Search for a property in a device tree node and retrieve the number of null 851 * terminated string contain in it. Returns the number of strings on 852 * success, -EINVAL if the property does not exist, -ENODATA if property 853 * does not have a value, and -EILSEQ if the string is not null-terminated 854 * within the length of the property data. 855 */ 856 static inline int of_property_count_strings(const struct device_node *np, 857 const char *propname) 858 { 859 return of_property_read_string_helper(np, propname, NULL, 0, 0); 860 } 861 862 /** 863 * of_property_read_string_index() - Find and read a string from a multiple 864 * strings property. 865 * @np: device node from which the property value is to be read. 866 * @propname: name of the property to be searched. 867 * @index: index of the string in the list of strings 868 * @out_string: pointer to null terminated return string, modified only if 869 * return value is 0. 870 * 871 * Search for a property in a device tree node and retrieve a null 872 * terminated string value (pointer to data, not a copy) in the list of strings 873 * contained in that property. 874 * Returns 0 on success, -EINVAL if the property does not exist, -ENODATA if 875 * property does not have a value, and -EILSEQ if the string is not 876 * null-terminated within the length of the property data. 877 * 878 * The out_string pointer is modified only if a valid string can be decoded. 879 */ 880 static inline int of_property_read_string_index(const struct device_node *np, 881 const char *propname, 882 int index, const char **output) 883 { 884 int rc = of_property_read_string_helper(np, propname, output, 1, index); 885 return rc < 0 ? rc : 0; 886 } 887 888 /** 889 * of_property_read_bool - Findfrom a property 890 * @np: device node from which the property value is to be read. 891 * @propname: name of the property to be searched. 892 * 893 * Search for a property in a device node. 894 * Returns true if the property exists false otherwise. 895 */ 896 static inline bool of_property_read_bool(const struct device_node *np, 897 const char *propname) 898 { 899 struct property *prop = of_find_property(np, propname, NULL); 900 901 return prop ? true : false; 902 } 903 904 static inline int of_property_read_u8(const struct device_node *np, 905 const char *propname, 906 u8 *out_value) 907 { 908 return of_property_read_u8_array(np, propname, out_value, 1); 909 } 910 911 static inline int of_property_read_u16(const struct device_node *np, 912 const char *propname, 913 u16 *out_value) 914 { 915 return of_property_read_u16_array(np, propname, out_value, 1); 916 } 917 918 static inline int of_property_read_u32(const struct device_node *np, 919 const char *propname, 920 u32 *out_value) 921 { 922 return of_property_read_u32_array(np, propname, out_value, 1); 923 } 924 925 static inline int of_property_read_s32(const struct device_node *np, 926 const char *propname, 927 s32 *out_value) 928 { 929 return of_property_read_u32(np, propname, (u32*) out_value); 930 } 931 932 #define of_for_each_phandle(it, err, np, ln, cn, cc) \ 933 for (of_phandle_iterator_init((it), (np), (ln), (cn), (cc)), \ 934 err = of_phandle_iterator_next(it); \ 935 err == 0; \ 936 err = of_phandle_iterator_next(it)) 937 938 #define of_property_for_each_u32(np, propname, prop, p, u) \ 939 for (prop = of_find_property(np, propname, NULL), \ 940 p = of_prop_next_u32(prop, NULL, &u); \ 941 p; \ 942 p = of_prop_next_u32(prop, p, &u)) 943 944 #define of_property_for_each_string(np, propname, prop, s) \ 945 for (prop = of_find_property(np, propname, NULL), \ 946 s = of_prop_next_string(prop, NULL); \ 947 s; \ 948 s = of_prop_next_string(prop, s)) 949 950 #define for_each_node_by_name(dn, name) \ 951 for (dn = of_find_node_by_name(NULL, name); dn; \ 952 dn = of_find_node_by_name(dn, name)) 953 #define for_each_node_by_type(dn, type) \ 954 for (dn = of_find_node_by_type(NULL, type); dn; \ 955 dn = of_find_node_by_type(dn, type)) 956 #define for_each_compatible_node(dn, type, compatible) \ 957 for (dn = of_find_compatible_node(NULL, type, compatible); dn; \ 958 dn = of_find_compatible_node(dn, type, compatible)) 959 #define for_each_matching_node(dn, matches) \ 960 for (dn = of_find_matching_node(NULL, matches); dn; \ 961 dn = of_find_matching_node(dn, matches)) 962 #define for_each_matching_node_and_match(dn, matches, match) \ 963 for (dn = of_find_matching_node_and_match(NULL, matches, match); \ 964 dn; dn = of_find_matching_node_and_match(dn, matches, match)) 965 966 #define for_each_child_of_node(parent, child) \ 967 for (child = of_get_next_child(parent, NULL); child != NULL; \ 968 child = of_get_next_child(parent, child)) 969 #define for_each_available_child_of_node(parent, child) \ 970 for (child = of_get_next_available_child(parent, NULL); child != NULL; \ 971 child = of_get_next_available_child(parent, child)) 972 973 #define for_each_node_with_property(dn, prop_name) \ 974 for (dn = of_find_node_with_property(NULL, prop_name); dn; \ 975 dn = of_find_node_with_property(dn, prop_name)) 976 977 static inline int of_get_child_count(const struct device_node *np) 978 { 979 struct device_node *child; 980 int num = 0; 981 982 for_each_child_of_node(np, child) 983 num++; 984 985 return num; 986 } 987 988 static inline int of_get_available_child_count(const struct device_node *np) 989 { 990 struct device_node *child; 991 int num = 0; 992 993 for_each_available_child_of_node(np, child) 994 num++; 995 996 return num; 997 } 998 999 #if defined(CONFIG_OF) && !defined(MODULE) 1000 #define _OF_DECLARE(table, name, compat, fn, fn_type) \ 1001 static const struct of_device_id __of_table_##name \ 1002 __used __section(__##table##_of_table) \ 1003 = { .compatible = compat, \ 1004 .data = (fn == (fn_type)NULL) ? fn : fn } 1005 #else 1006 #define _OF_DECLARE(table, name, compat, fn, fn_type) \ 1007 static const struct of_device_id __of_table_##name \ 1008 __attribute__((unused)) \ 1009 = { .compatible = compat, \ 1010 .data = (fn == (fn_type)NULL) ? fn : fn } 1011 #endif 1012 1013 typedef int (*of_init_fn_2)(struct device_node *, struct device_node *); 1014 typedef int (*of_init_fn_1_ret)(struct device_node *); 1015 typedef void (*of_init_fn_1)(struct device_node *); 1016 1017 #define OF_DECLARE_1(table, name, compat, fn) \ 1018 _OF_DECLARE(table, name, compat, fn, of_init_fn_1) 1019 #define OF_DECLARE_1_RET(table, name, compat, fn) \ 1020 _OF_DECLARE(table, name, compat, fn, of_init_fn_1_ret) 1021 #define OF_DECLARE_2(table, name, compat, fn) \ 1022 _OF_DECLARE(table, name, compat, fn, of_init_fn_2) 1023 1024 /** 1025 * struct of_changeset_entry - Holds a changeset entry 1026 * 1027 * @node: list_head for the log list 1028 * @action: notifier action 1029 * @np: pointer to the device node affected 1030 * @prop: pointer to the property affected 1031 * @old_prop: hold a pointer to the original property 1032 * 1033 * Every modification of the device tree during a changeset 1034 * is held in a list of of_changeset_entry structures. 1035 * That way we can recover from a partial application, or we can 1036 * revert the changeset 1037 */ 1038 struct of_changeset_entry { 1039 struct list_head node; 1040 unsigned long action; 1041 struct device_node *np; 1042 struct property *prop; 1043 struct property *old_prop; 1044 }; 1045 1046 /** 1047 * struct of_changeset - changeset tracker structure 1048 * 1049 * @entries: list_head for the changeset entries 1050 * 1051 * changesets are a convenient way to apply bulk changes to the 1052 * live tree. In case of an error, changes are rolled-back. 1053 * changesets live on after initial application, and if not 1054 * destroyed after use, they can be reverted in one single call. 1055 */ 1056 struct of_changeset { 1057 struct list_head entries; 1058 }; 1059 1060 enum of_reconfig_change { 1061 OF_RECONFIG_NO_CHANGE = 0, 1062 OF_RECONFIG_CHANGE_ADD, 1063 OF_RECONFIG_CHANGE_REMOVE, 1064 }; 1065 1066 #ifdef CONFIG_OF_DYNAMIC 1067 extern int of_reconfig_notifier_register(struct notifier_block *); 1068 extern int of_reconfig_notifier_unregister(struct notifier_block *); 1069 extern int of_reconfig_notify(unsigned long, struct of_reconfig_data *rd); 1070 extern int of_reconfig_get_state_change(unsigned long action, 1071 struct of_reconfig_data *arg); 1072 1073 extern void of_changeset_init(struct of_changeset *ocs); 1074 extern void of_changeset_destroy(struct of_changeset *ocs); 1075 extern int of_changeset_apply(struct of_changeset *ocs); 1076 extern int of_changeset_revert(struct of_changeset *ocs); 1077 extern int of_changeset_action(struct of_changeset *ocs, 1078 unsigned long action, struct device_node *np, 1079 struct property *prop); 1080 1081 static inline int of_changeset_attach_node(struct of_changeset *ocs, 1082 struct device_node *np) 1083 { 1084 return of_changeset_action(ocs, OF_RECONFIG_ATTACH_NODE, np, NULL); 1085 } 1086 1087 static inline int of_changeset_detach_node(struct of_changeset *ocs, 1088 struct device_node *np) 1089 { 1090 return of_changeset_action(ocs, OF_RECONFIG_DETACH_NODE, np, NULL); 1091 } 1092 1093 static inline int of_changeset_add_property(struct of_changeset *ocs, 1094 struct device_node *np, struct property *prop) 1095 { 1096 return of_changeset_action(ocs, OF_RECONFIG_ADD_PROPERTY, np, prop); 1097 } 1098 1099 static inline int of_changeset_remove_property(struct of_changeset *ocs, 1100 struct device_node *np, struct property *prop) 1101 { 1102 return of_changeset_action(ocs, OF_RECONFIG_REMOVE_PROPERTY, np, prop); 1103 } 1104 1105 static inline int of_changeset_update_property(struct of_changeset *ocs, 1106 struct device_node *np, struct property *prop) 1107 { 1108 return of_changeset_action(ocs, OF_RECONFIG_UPDATE_PROPERTY, np, prop); 1109 } 1110 #else /* CONFIG_OF_DYNAMIC */ 1111 static inline int of_reconfig_notifier_register(struct notifier_block *nb) 1112 { 1113 return -EINVAL; 1114 } 1115 static inline int of_reconfig_notifier_unregister(struct notifier_block *nb) 1116 { 1117 return -EINVAL; 1118 } 1119 static inline int of_reconfig_notify(unsigned long action, 1120 struct of_reconfig_data *arg) 1121 { 1122 return -EINVAL; 1123 } 1124 static inline int of_reconfig_get_state_change(unsigned long action, 1125 struct of_reconfig_data *arg) 1126 { 1127 return -EINVAL; 1128 } 1129 #endif /* CONFIG_OF_DYNAMIC */ 1130 1131 /* CONFIG_OF_RESOLVE api */ 1132 extern int of_resolve_phandles(struct device_node *tree); 1133 1134 /** 1135 * of_device_is_system_power_controller - Tells if system-power-controller is found for device_node 1136 * @np: Pointer to the given device_node 1137 * 1138 * return true if present false otherwise 1139 */ 1140 static inline bool of_device_is_system_power_controller(const struct device_node *np) 1141 { 1142 return of_property_read_bool(np, "system-power-controller"); 1143 } 1144 1145 /** 1146 * Overlay support 1147 */ 1148 1149 #ifdef CONFIG_OF_OVERLAY 1150 1151 /* ID based overlays; the API for external users */ 1152 int of_overlay_create(struct device_node *tree); 1153 int of_overlay_destroy(int id); 1154 int of_overlay_destroy_all(void); 1155 1156 #else 1157 1158 static inline int of_overlay_create(struct device_node *tree) 1159 { 1160 return -ENOTSUPP; 1161 } 1162 1163 static inline int of_overlay_destroy(int id) 1164 { 1165 return -ENOTSUPP; 1166 } 1167 1168 static inline int of_overlay_destroy_all(void) 1169 { 1170 return -ENOTSUPP; 1171 } 1172 1173 #endif 1174 1175 #endif /* _LINUX_OF_H */
1 /* 2 * platform_device.h - generic, centralized driver model 3 * 4 * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org> 5 * 6 * This file is released under the GPLv2 7 * 8 * See Documentation/driver-model/ for more information. 9 */ 10 11 #ifndef _PLATFORM_DEVICE_H_ 12 #define _PLATFORM_DEVICE_H_ 13 14 #include <linux/device.h> 15 #include <linux/mod_devicetable.h> 16 17 #define PLATFORM_DEVID_NONE (-1) 18 #define PLATFORM_DEVID_AUTO (-2) 19 20 struct mfd_cell; 21 struct property_entry; 22 23 struct platform_device { 24 const char *name; 25 int id; 26 bool id_auto; 27 struct device dev; 28 u32 num_resources; 29 struct resource *resource; 30 31 const struct platform_device_id *id_entry; 32 char *driver_override; /* Driver name to force a match */ 33 34 /* MFD cell pointer */ 35 struct mfd_cell *mfd_cell; 36 37 /* arch specific additions */ 38 struct pdev_archdata archdata; 39 }; 40 41 #define platform_get_device_id(pdev) ((pdev)->id_entry) 42 43 #define to_platform_device(x) container_of((x), struct platform_device, dev) 44 45 extern int platform_device_register(struct platform_device *); 46 extern void platform_device_unregister(struct platform_device *); 47 48 extern struct bus_type platform_bus_type; 49 extern struct device platform_bus; 50 51 extern void arch_setup_pdev_archdata(struct platform_device *); 52 extern struct resource *platform_get_resource(struct platform_device *, 53 unsigned int, unsigned int); 54 extern int platform_get_irq(struct platform_device *, unsigned int); 55 extern int platform_irq_count(struct platform_device *); 56 extern struct resource *platform_get_resource_byname(struct platform_device *, 57 unsigned int, 58 const char *); 59 extern int platform_get_irq_byname(struct platform_device *, const char *); 60 extern int platform_add_devices(struct platform_device **, int); 61 62 struct platform_device_info { 63 struct device *parent; 64 struct fwnode_handle *fwnode; 65 66 const char *name; 67 int id; 68 69 const struct resource *res; 70 unsigned int num_res; 71 72 const void *data; 73 size_t size_data; 74 u64 dma_mask; 75 76 struct property_entry *properties; 77 }; 78 extern struct platform_device *platform_device_register_full( 79 const struct platform_device_info *pdevinfo); 80 81 /** 82 * platform_device_register_resndata - add a platform-level device with 83 * resources and platform-specific data 84 * 85 * @parent: parent device for the device we're adding 86 * @name: base name of the device we're adding 87 * @id: instance id 88 * @res: set of resources that needs to be allocated for the device 89 * @num: number of resources 90 * @data: platform specific data for this platform device 91 * @size: size of platform specific data 92 * 93 * Returns &struct platform_device pointer on success, or ERR_PTR() on error. 94 */ 95 static inline struct platform_device *platform_device_register_resndata( 96 struct device *parent, const char *name, int id, 97 const struct resource *res, unsigned int num, 98 const void *data, size_t size) { 99 100 struct platform_device_info pdevinfo = { 101 .parent = parent, 102 .name = name, 103 .id = id, 104 .res = res, 105 .num_res = num, 106 .data = data, 107 .size_data = size, 108 .dma_mask = 0, 109 }; 110 111 return platform_device_register_full(&pdevinfo); 112 } 113 114 /** 115 * platform_device_register_simple - add a platform-level device and its resources 116 * @name: base name of the device we're adding 117 * @id: instance id 118 * @res: set of resources that needs to be allocated for the device 119 * @num: number of resources 120 * 121 * This function creates a simple platform device that requires minimal 122 * resource and memory management. Canned release function freeing memory 123 * allocated for the device allows drivers using such devices to be 124 * unloaded without waiting for the last reference to the device to be 125 * dropped. 126 * 127 * This interface is primarily intended for use with legacy drivers which 128 * probe hardware directly. Because such drivers create sysfs device nodes 129 * themselves, rather than letting system infrastructure handle such device 130 * enumeration tasks, they don't fully conform to the Linux driver model. 131 * In particular, when such drivers are built as modules, they can't be 132 * "hotplugged". 133 * 134 * Returns &struct platform_device pointer on success, or ERR_PTR() on error. 135 */ 136 static inline struct platform_device *platform_device_register_simple( 137 const char *name, int id, 138 const struct resource *res, unsigned int num) 139 { 140 return platform_device_register_resndata(NULL, name, id, 141 res, num, NULL, 0); 142 } 143 144 /** 145 * platform_device_register_data - add a platform-level device with platform-specific data 146 * @parent: parent device for the device we're adding 147 * @name: base name of the device we're adding 148 * @id: instance id 149 * @data: platform specific data for this platform device 150 * @size: size of platform specific data 151 * 152 * This function creates a simple platform device that requires minimal 153 * resource and memory management. Canned release function freeing memory 154 * allocated for the device allows drivers using such devices to be 155 * unloaded without waiting for the last reference to the device to be 156 * dropped. 157 * 158 * Returns &struct platform_device pointer on success, or ERR_PTR() on error. 159 */ 160 static inline struct platform_device *platform_device_register_data( 161 struct device *parent, const char *name, int id, 162 const void *data, size_t size) 163 { 164 return platform_device_register_resndata(parent, name, id, 165 NULL, 0, data, size); 166 } 167 168 extern struct platform_device *platform_device_alloc(const char *name, int id); 169 extern int platform_device_add_resources(struct platform_device *pdev, 170 const struct resource *res, 171 unsigned int num); 172 extern int platform_device_add_data(struct platform_device *pdev, 173 const void *data, size_t size); 174 extern int platform_device_add_properties(struct platform_device *pdev, 175 struct property_entry *properties); 176 extern int platform_device_add(struct platform_device *pdev); 177 extern void platform_device_del(struct platform_device *pdev); 178 extern void platform_device_put(struct platform_device *pdev); 179 180 struct platform_driver { 181 int (*probe)(struct platform_device *); 182 int (*remove)(struct platform_device *); 183 void (*shutdown)(struct platform_device *); 184 int (*suspend)(struct platform_device *, pm_message_t state); 185 int (*resume)(struct platform_device *); 186 struct device_driver driver; 187 const struct platform_device_id *id_table; 188 bool prevent_deferred_probe; 189 }; 190 191 #define to_platform_driver(drv) (container_of((drv), struct platform_driver, \ 192 driver)) 193 194 /* 195 * use a macro to avoid include chaining to get THIS_MODULE 196 */ 197 #define platform_driver_register(drv) \ 198 __platform_driver_register(drv, THIS_MODULE) 199 extern int __platform_driver_register(struct platform_driver *, 200 struct module *); 201 extern void platform_driver_unregister(struct platform_driver *); 202 203 /* non-hotpluggable platform devices may use this so that probe() and 204 * its support may live in __init sections, conserving runtime memory. 205 */ 206 #define platform_driver_probe(drv, probe) \ 207 __platform_driver_probe(drv, probe, THIS_MODULE) 208 extern int __platform_driver_probe(struct platform_driver *driver, 209 int (*probe)(struct platform_device *), struct module *module); 210 211 static inline void *platform_get_drvdata(const struct platform_device *pdev) 212 { 213 return dev_get_drvdata(&pdev->dev); 214 } 215 216 static inline void platform_set_drvdata(struct platform_device *pdev, 217 void *data) 218 { 219 dev_set_drvdata(&pdev->dev, data); 220 } 221 222 /* module_platform_driver() - Helper macro for drivers that don't do 223 * anything special in module init/exit. This eliminates a lot of 224 * boilerplate. Each module may only use this macro once, and 225 * calling it replaces module_init() and module_exit() 226 */ 227 #define module_platform_driver(__platform_driver) \ 228 module_driver(__platform_driver, platform_driver_register, \ 229 platform_driver_unregister) 230 231 /* builtin_platform_driver() - Helper macro for builtin drivers that 232 * don't do anything special in driver init. This eliminates some 233 * boilerplate. Each driver may only use this macro once, and 234 * calling it replaces device_initcall(). Note this is meant to be 235 * a parallel of module_platform_driver() above, but w/o _exit stuff. 236 */ 237 #define builtin_platform_driver(__platform_driver) \ 238 builtin_driver(__platform_driver, platform_driver_register) 239 240 /* module_platform_driver_probe() - Helper macro for drivers that don't do 241 * anything special in module init/exit. This eliminates a lot of 242 * boilerplate. Each module may only use this macro once, and 243 * calling it replaces module_init() and module_exit() 244 */ 245 #define module_platform_driver_probe(__platform_driver, __platform_probe) \ 246 static int __init __platform_driver##_init(void) \ 247 { \ 248 return platform_driver_probe(&(__platform_driver), \ 249 __platform_probe); \ 250 } \ 251 module_init(__platform_driver##_init); \ 252 static void __exit __platform_driver##_exit(void) \ 253 { \ 254 platform_driver_unregister(&(__platform_driver)); \ 255 } \ 256 module_exit(__platform_driver##_exit); 257 258 /* builtin_platform_driver_probe() - Helper macro for drivers that don't do 259 * anything special in device init. This eliminates some boilerplate. Each 260 * driver may only use this macro once, and using it replaces device_initcall. 261 * This is meant to be a parallel of module_platform_driver_probe above, but 262 * without the __exit parts. 263 */ 264 #define builtin_platform_driver_probe(__platform_driver, __platform_probe) \ 265 static int __init __platform_driver##_init(void) \ 266 { \ 267 return platform_driver_probe(&(__platform_driver), \ 268 __platform_probe); \ 269 } \ 270 device_initcall(__platform_driver##_init); \ 271 272 #define platform_create_bundle(driver, probe, res, n_res, data, size) \ 273 __platform_create_bundle(driver, probe, res, n_res, data, size, THIS_MODULE) 274 extern struct platform_device *__platform_create_bundle( 275 struct platform_driver *driver, int (*probe)(struct platform_device *), 276 struct resource *res, unsigned int n_res, 277 const void *data, size_t size, struct module *module); 278 279 int __platform_register_drivers(struct platform_driver * const *drivers, 280 unsigned int count, struct module *owner); 281 void platform_unregister_drivers(struct platform_driver * const *drivers, 282 unsigned int count); 283 284 #define platform_register_drivers(drivers, count) \ 285 __platform_register_drivers(drivers, count, THIS_MODULE) 286 287 /* early platform driver interface */ 288 struct early_platform_driver { 289 const char *class_str; 290 struct platform_driver *pdrv; 291 struct list_head list; 292 int requested_id; 293 char *buffer; 294 int bufsize; 295 }; 296 297 #define EARLY_PLATFORM_ID_UNSET -2 298 #define EARLY_PLATFORM_ID_ERROR -3 299 300 extern int early_platform_driver_register(struct early_platform_driver *epdrv, 301 char *buf); 302 extern void early_platform_add_devices(struct platform_device **devs, int num); 303 304 static inline int is_early_platform_device(struct platform_device *pdev) 305 { 306 return !pdev->dev.driver; 307 } 308 309 extern void early_platform_driver_register_all(char *class_str); 310 extern int early_platform_driver_probe(char *class_str, 311 int nr_probe, int user_only); 312 extern void early_platform_cleanup(void); 313 314 #define early_platform_init(class_string, platdrv) \ 315 early_platform_init_buffer(class_string, platdrv, NULL, 0) 316 317 #ifndef MODULE 318 #define early_platform_init_buffer(class_string, platdrv, buf, bufsiz) \ 319 static __initdata struct early_platform_driver early_driver = { \ 320 .class_str = class_string, \ 321 .buffer = buf, \ 322 .bufsize = bufsiz, \ 323 .pdrv = platdrv, \ 324 .requested_id = EARLY_PLATFORM_ID_UNSET, \ 325 }; \ 326 static int __init early_platform_driver_setup_func(char *buffer) \ 327 { \ 328 return early_platform_driver_register(&early_driver, buffer); \ 329 } \ 330 early_param(class_string, early_platform_driver_setup_func) 331 #else /* MODULE */ 332 #define early_platform_init_buffer(class_string, platdrv, buf, bufsiz) \ 333 static inline char *early_platform_driver_setup_func(void) \ 334 { \ 335 return bufsiz ? buf : NULL; \ 336 } 337 #endif /* MODULE */ 338 339 #ifdef CONFIG_SUSPEND 340 extern int platform_pm_suspend(struct device *dev); 341 extern int platform_pm_resume(struct device *dev); 342 #else 343 #define platform_pm_suspend NULL 344 #define platform_pm_resume NULL 345 #endif 346 347 #ifdef CONFIG_HIBERNATE_CALLBACKS 348 extern int platform_pm_freeze(struct device *dev); 349 extern int platform_pm_thaw(struct device *dev); 350 extern int platform_pm_poweroff(struct device *dev); 351 extern int platform_pm_restore(struct device *dev); 352 #else 353 #define platform_pm_freeze NULL 354 #define platform_pm_thaw NULL 355 #define platform_pm_poweroff NULL 356 #define platform_pm_restore NULL 357 #endif 358 359 #ifdef CONFIG_PM_SLEEP 360 #define USE_PLATFORM_PM_SLEEP_OPS \ 361 .suspend = platform_pm_suspend, \ 362 .resume = platform_pm_resume, \ 363 .freeze = platform_pm_freeze, \ 364 .thaw = platform_pm_thaw, \ 365 .poweroff = platform_pm_poweroff, \ 366 .restore = platform_pm_restore, 367 #else 368 #define USE_PLATFORM_PM_SLEEP_OPS 369 #endif 370 371 #endif /* _PLATFORM_DEVICE_H_ */

Here is an explanation of a rule violation arisen while checking your driver against a corresponding kernel.

Note that it may be false positive, i.e. there isn't a real error indeed. Please analyze a given error trace and related source code to understand whether there is an error in your driver.

Error trace column contains a path on which the given rule is violated. You can expand/collapse some entity classes by clicking on corresponding checkboxes in a main menu or in an advanced Others menu. Also you can expand/collapse each particular entity by clicking on +/-. In hovering on some entities you can see some tips. Also the error trace is bound with related source code. Line numbers may be shown as links on the left. You can click on them to open corresponding lines in source code.

Source code column contains a content of files related with the error trace. There is source code of your driver (note that there are some LDV modifications at the end), kernel headers and rule model. Tabs show a currently opened file and other available files. In hovering on them you can see full file names. On clicking a corresponding file content will be shown.

Ядро Модуль Правило Верификатор Вердикт Статус Время создания Описание проблемы
linux-4.8-rc1.tar.xz drivers/i2c/busses/i2c-axxia.ko 320_7a CPAchecker Bug Fixed 2017-04-22 22:11:52 L0249

Комментарий

Reported: 16 Sep 2016

[В начало]