Bug

[В начало]

Ошибка # 160

Показать/спрятать трассу ошибок
Error trace
Function bodies
Blocks
  • Others...
    Function bodies without model function calls
    Initialization function calls
    Initialization function bodies
    Entry point
    Entry point body
    Function calls
    Skipped function calls
    Formal parameter names
    Declarations
    Assumes
    Assume conditions
    Returns
    Return values
    DEG initialization
    DEG function calls
    Model function calls
    Model function bodies
    Model asserts
    Model state changes
    Model function function calls
    Model function function bodies
    Model returns
    Model others
    Identation
    Line numbers
    Expand signs
-__CPAchecker_initialize()
{
20 typedef unsigned char __u8;
23 typedef unsigned short __u16;
25 typedef int __s32;
26 typedef unsigned int __u32;
29 typedef long long __s64;
30 typedef unsigned long long __u64;
15 typedef signed char s8;
16 typedef unsigned char u8;
18 typedef short s16;
19 typedef unsigned short u16;
21 typedef int s32;
22 typedef unsigned int u32;
24 typedef long long s64;
25 typedef unsigned long long u64;
14 typedef long __kernel_long_t;
15 typedef unsigned long __kernel_ulong_t;
27 typedef int __kernel_pid_t;
48 typedef unsigned int __kernel_uid32_t;
49 typedef unsigned int __kernel_gid32_t;
71 typedef __kernel_ulong_t __kernel_size_t;
72 typedef __kernel_long_t __kernel_ssize_t;
87 typedef long long __kernel_loff_t;
88 typedef __kernel_long_t __kernel_time_t;
89 typedef __kernel_long_t __kernel_clock_t;
90 typedef int __kernel_timer_t;
91 typedef int __kernel_clockid_t;
280 struct kernel_symbol { unsigned long value; const char *name; } ;
34 struct module ;
12 typedef __u32 __kernel_dev_t;
15 typedef __kernel_dev_t dev_t;
18 typedef unsigned short umode_t;
21 typedef __kernel_pid_t pid_t;
26 typedef __kernel_clockid_t clockid_t;
29 typedef _Bool bool;
31 typedef __kernel_uid32_t uid_t;
32 typedef __kernel_gid32_t gid_t;
45 typedef __kernel_loff_t loff_t;
54 typedef __kernel_size_t size_t;
59 typedef __kernel_ssize_t ssize_t;
69 typedef __kernel_time_t time_t;
102 typedef __s32 int32_t;
108 typedef __u32 uint32_t;
133 typedef unsigned long sector_t;
134 typedef unsigned long blkcnt_t;
152 typedef u64 dma_addr_t;
157 typedef unsigned int gfp_t;
158 typedef unsigned int fmode_t;
161 typedef u64 phys_addr_t;
166 typedef phys_addr_t resource_size_t;
176 struct __anonstruct_atomic_t_6 { int counter; } ;
176 typedef struct __anonstruct_atomic_t_6 atomic_t;
181 struct __anonstruct_atomic64_t_7 { long counter; } ;
181 typedef struct __anonstruct_atomic64_t_7 atomic64_t;
182 struct list_head { struct list_head *next; struct list_head *prev; } ;
187 struct hlist_node ;
187 struct hlist_head { struct hlist_node *first; } ;
191 struct hlist_node { struct hlist_node *next; struct hlist_node **pprev; } ;
202 struct callback_head { struct callback_head *next; void (*func)(struct callback_head *); } ;
115 typedef void (*ctor_fn_t)();
58 struct device ;
450 struct file_operations ;
462 struct completion ;
463 struct pt_regs ;
557 struct bug_entry { int bug_addr_disp; int file_disp; unsigned short line; unsigned short flags; } ;
114 struct timespec ;
115 struct compat_timespec ;
116 struct thread_info { unsigned long flags; } ;
20 struct __anonstruct_futex_25 { u32 *uaddr; u32 val; u32 flags; u32 bitset; u64 time; u32 *uaddr2; } ;
20 struct __anonstruct_nanosleep_26 { clockid_t clockid; struct timespec *rmtp; struct compat_timespec *compat_rmtp; u64 expires; } ;
20 struct pollfd ;
20 struct __anonstruct_poll_27 { struct pollfd *ufds; int nfds; int has_timeout; unsigned long tv_sec; unsigned long tv_nsec; } ;
20 union __anonunion____missing_field_name_24 { struct __anonstruct_futex_25 futex; struct __anonstruct_nanosleep_26 nanosleep; struct __anonstruct_poll_27 poll; } ;
20 struct restart_block { long int (*fn)(struct restart_block *); union __anonunion____missing_field_name_24 __annonCompField4; } ;
39 struct page ;
26 struct task_struct ;
27 struct mm_struct ;
288 struct pt_regs { unsigned long r15; unsigned long r14; unsigned long r13; unsigned long r12; unsigned long bp; unsigned long bx; unsigned long r11; unsigned long r10; unsigned long r9; unsigned long r8; unsigned long ax; unsigned long cx; unsigned long dx; unsigned long si; unsigned long di; unsigned long orig_ax; unsigned long ip; unsigned long cs; unsigned long flags; unsigned long sp; unsigned long ss; } ;
66 struct __anonstruct____missing_field_name_30 { unsigned int a; unsigned int b; } ;
66 struct __anonstruct____missing_field_name_31 { u16 limit0; u16 base0; unsigned char base1; unsigned char type; unsigned char s; unsigned char dpl; unsigned char p; unsigned char limit; unsigned char avl; unsigned char l; unsigned char d; unsigned char g; unsigned char base2; } ;
66 union __anonunion____missing_field_name_29 { struct __anonstruct____missing_field_name_30 __annonCompField5; struct __anonstruct____missing_field_name_31 __annonCompField6; } ;
66 struct desc_struct { union __anonunion____missing_field_name_29 __annonCompField7; } ;
13 typedef unsigned long pteval_t;
14 typedef unsigned long pmdval_t;
16 typedef unsigned long pgdval_t;
17 typedef unsigned long pgprotval_t;
19 struct __anonstruct_pte_t_32 { pteval_t pte; } ;
19 typedef struct __anonstruct_pte_t_32 pte_t;
21 struct pgprot { pgprotval_t pgprot; } ;
256 typedef struct pgprot pgprot_t;
258 struct __anonstruct_pgd_t_33 { pgdval_t pgd; } ;
258 typedef struct __anonstruct_pgd_t_33 pgd_t;
297 struct __anonstruct_pmd_t_35 { pmdval_t pmd; } ;
297 typedef struct __anonstruct_pmd_t_35 pmd_t;
423 typedef struct page *pgtable_t;
434 struct file ;
445 struct seq_file ;
481 struct thread_struct ;
483 struct cpumask ;
20 struct qspinlock { atomic_t val; } ;
33 typedef struct qspinlock arch_spinlock_t;
34 struct qrwlock { atomic_t cnts; arch_spinlock_t wait_lock; } ;
14 typedef struct qrwlock arch_rwlock_t;
247 struct math_emu_info { long ___orig_eip; struct pt_regs *regs; } ;
83 struct static_key { atomic_t enabled; } ;
23 typedef atomic64_t atomic_long_t;
359 struct cpumask { unsigned long bits[128U]; } ;
15 typedef struct cpumask cpumask_t;
654 typedef struct cpumask *cpumask_var_t;
22 struct tracepoint_func { void *func; void *data; int prio; } ;
28 struct tracepoint { const char *name; struct static_key key; void (*regfunc)(); void (*unregfunc)(); struct tracepoint_func *funcs; } ;
246 struct fregs_state { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u32 status; } ;
26 struct __anonstruct____missing_field_name_59 { u64 rip; u64 rdp; } ;
26 struct __anonstruct____missing_field_name_60 { u32 fip; u32 fcs; u32 foo; u32 fos; } ;
26 union __anonunion____missing_field_name_58 { struct __anonstruct____missing_field_name_59 __annonCompField13; struct __anonstruct____missing_field_name_60 __annonCompField14; } ;
26 union __anonunion____missing_field_name_61 { u32 padding1[12U]; u32 sw_reserved[12U]; } ;
26 struct fxregs_state { u16 cwd; u16 swd; u16 twd; u16 fop; union __anonunion____missing_field_name_58 __annonCompField15; u32 mxcsr; u32 mxcsr_mask; u32 st_space[32U]; u32 xmm_space[64U]; u32 padding[12U]; union __anonunion____missing_field_name_61 __annonCompField16; } ;
66 struct swregs_state { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u8 ftop; u8 changed; u8 lookahead; u8 no_update; u8 rm; u8 alimit; struct math_emu_info *info; u32 entry_eip; } ;
227 struct xstate_header { u64 xfeatures; u64 xcomp_bv; u64 reserved[6U]; } ;
233 struct xregs_state { struct fxregs_state i387; struct xstate_header header; u8 extended_state_area[0U]; } ;
254 union fpregs_state { struct fregs_state fsave; struct fxregs_state fxsave; struct swregs_state soft; struct xregs_state xsave; u8 __padding[4096U]; } ;
271 struct fpu { unsigned int last_cpu; unsigned char fpstate_active; unsigned char fpregs_active; unsigned char counter; union fpregs_state state; } ;
169 struct seq_operations ;
372 struct perf_event ;
377 struct __anonstruct_mm_segment_t_73 { unsigned long seg; } ;
377 typedef struct __anonstruct_mm_segment_t_73 mm_segment_t;
378 struct thread_struct { struct desc_struct tls_array[3U]; unsigned long sp0; unsigned long sp; unsigned short es; unsigned short ds; unsigned short fsindex; unsigned short gsindex; u32 status; unsigned long fsbase; unsigned long gsbase; struct perf_event *ptrace_bps[4U]; unsigned long debugreg6; unsigned long ptrace_dr7; unsigned long cr2; unsigned long trap_nr; unsigned long error_code; unsigned long *io_bitmap_ptr; unsigned long iopl; unsigned int io_bitmap_max; mm_segment_t addr_limit; unsigned char sig_on_uaccess_err; unsigned char uaccess_err; struct fpu fpu; } ;
33 struct lockdep_map ;
55 struct stack_trace { unsigned int nr_entries; unsigned int max_entries; unsigned long *entries; int skip; } ;
28 struct lockdep_subclass_key { char __one_byte; } ;
53 struct lock_class_key { struct lockdep_subclass_key subkeys[8U]; } ;
59 struct lock_class { struct hlist_node hash_entry; struct list_head lock_entry; struct lockdep_subclass_key *key; unsigned int subclass; unsigned int dep_gen_id; unsigned long usage_mask; struct stack_trace usage_traces[13U]; struct list_head locks_after; struct list_head locks_before; unsigned int version; unsigned long ops; const char *name; int name_version; unsigned long contention_point[4U]; unsigned long contending_point[4U]; } ;
144 struct lockdep_map { struct lock_class_key *key; struct lock_class *class_cache[2U]; const char *name; int cpu; unsigned long ip; } ;
207 struct held_lock { u64 prev_chain_key; unsigned long acquire_ip; struct lockdep_map *instance; struct lockdep_map *nest_lock; u64 waittime_stamp; u64 holdtime_stamp; unsigned short class_idx; unsigned char irq_context; unsigned char trylock; unsigned char read; unsigned char check; unsigned char hardirqs_off; unsigned short references; unsigned int pin_count; } ;
572 struct raw_spinlock { arch_spinlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ;
32 typedef struct raw_spinlock raw_spinlock_t;
33 struct __anonstruct____missing_field_name_75 { u8 __padding[24U]; struct lockdep_map dep_map; } ;
33 union __anonunion____missing_field_name_74 { struct raw_spinlock rlock; struct __anonstruct____missing_field_name_75 __annonCompField19; } ;
33 struct spinlock { union __anonunion____missing_field_name_74 __annonCompField20; } ;
76 typedef struct spinlock spinlock_t;
23 struct __anonstruct_rwlock_t_76 { arch_rwlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ;
23 typedef struct __anonstruct_rwlock_t_76 rwlock_t;
416 struct seqcount { unsigned int sequence; struct lockdep_map dep_map; } ;
52 typedef struct seqcount seqcount_t;
407 struct __anonstruct_seqlock_t_91 { struct seqcount seqcount; spinlock_t lock; } ;
407 typedef struct __anonstruct_seqlock_t_91 seqlock_t;
601 struct timespec { __kernel_time_t tv_sec; long tv_nsec; } ;
7 typedef __s64 time64_t;
83 struct user_namespace ;
22 struct __anonstruct_kuid_t_92 { uid_t val; } ;
22 typedef struct __anonstruct_kuid_t_92 kuid_t;
27 struct __anonstruct_kgid_t_93 { gid_t val; } ;
27 typedef struct __anonstruct_kgid_t_93 kgid_t;
139 struct kstat { u64 ino; dev_t dev; umode_t mode; unsigned int nlink; kuid_t uid; kgid_t gid; dev_t rdev; loff_t size; struct timespec atime; struct timespec mtime; struct timespec ctime; unsigned long blksize; unsigned long long blocks; } ;
36 struct vm_area_struct ;
38 struct __wait_queue_head { spinlock_t lock; struct list_head task_list; } ;
43 typedef struct __wait_queue_head wait_queue_head_t;
97 struct __anonstruct_nodemask_t_94 { unsigned long bits[16U]; } ;
97 typedef struct __anonstruct_nodemask_t_94 nodemask_t;
249 typedef unsigned int isolate_mode_t;
13 struct optimistic_spin_queue { atomic_t tail; } ;
39 struct mutex { atomic_t count; spinlock_t wait_lock; struct list_head wait_list; struct task_struct *owner; void *magic; struct lockdep_map dep_map; } ;
67 struct mutex_waiter { struct list_head list; struct task_struct *task; void *magic; } ;
177 struct rw_semaphore ;
178 struct rw_semaphore { atomic_long_t count; struct list_head wait_list; raw_spinlock_t wait_lock; struct optimistic_spin_queue osq; struct task_struct *owner; struct lockdep_map dep_map; } ;
178 struct completion { unsigned int done; wait_queue_head_t wait; } ;
450 union ktime { s64 tv64; } ;
41 typedef union ktime ktime_t;
1145 struct timer_list { struct hlist_node entry; unsigned long expires; void (*function)(unsigned long); unsigned long data; u32 flags; int start_pid; void *start_site; char start_comm[16U]; struct lockdep_map lockdep_map; } ;
254 struct hrtimer ;
255 enum hrtimer_restart ;
256 struct rb_node { unsigned long __rb_parent_color; struct rb_node *rb_right; struct rb_node *rb_left; } ;
41 struct rb_root { struct rb_node *rb_node; } ;
835 struct nsproxy ;
278 struct workqueue_struct ;
279 struct work_struct ;
54 struct work_struct { atomic_long_t data; struct list_head entry; void (*func)(struct work_struct *); struct lockdep_map lockdep_map; } ;
107 struct delayed_work { struct work_struct work; struct timer_list timer; struct workqueue_struct *wq; int cpu; } ;
217 struct resource ;
66 struct resource { resource_size_t start; resource_size_t end; const char *name; unsigned long flags; unsigned long desc; struct resource *parent; struct resource *sibling; struct resource *child; } ;
58 struct pm_message { int event; } ;
64 typedef struct pm_message pm_message_t;
65 struct dev_pm_ops { int (*prepare)(struct device *); void (*complete)(struct device *); int (*suspend)(struct device *); int (*resume)(struct device *); int (*freeze)(struct device *); int (*thaw)(struct device *); int (*poweroff)(struct device *); int (*restore)(struct device *); int (*suspend_late)(struct device *); int (*resume_early)(struct device *); int (*freeze_late)(struct device *); int (*thaw_early)(struct device *); int (*poweroff_late)(struct device *); int (*restore_early)(struct device *); int (*suspend_noirq)(struct device *); int (*resume_noirq)(struct device *); int (*freeze_noirq)(struct device *); int (*thaw_noirq)(struct device *); int (*poweroff_noirq)(struct device *); int (*restore_noirq)(struct device *); int (*runtime_suspend)(struct device *); int (*runtime_resume)(struct device *); int (*runtime_idle)(struct device *); } ;
320 enum rpm_status { RPM_ACTIVE = 0, RPM_RESUMING = 1, RPM_SUSPENDED = 2, RPM_SUSPENDING = 3 } ;
327 enum rpm_request { RPM_REQ_NONE = 0, RPM_REQ_IDLE = 1, RPM_REQ_SUSPEND = 2, RPM_REQ_AUTOSUSPEND = 3, RPM_REQ_RESUME = 4 } ;
335 struct wakeup_source ;
336 struct wake_irq ;
337 struct pm_domain_data ;
338 struct pm_subsys_data { spinlock_t lock; unsigned int refcount; struct list_head clock_list; struct pm_domain_data *domain_data; } ;
556 struct dev_pm_qos ;
556 struct dev_pm_info { pm_message_t power_state; unsigned char can_wakeup; unsigned char async_suspend; bool is_prepared; bool is_suspended; bool is_noirq_suspended; bool is_late_suspended; bool early_init; bool direct_complete; spinlock_t lock; struct list_head entry; struct completion completion; struct wakeup_source *wakeup; bool wakeup_path; bool syscore; bool no_pm_callbacks; struct timer_list suspend_timer; unsigned long timer_expires; struct work_struct work; wait_queue_head_t wait_queue; struct wake_irq *wakeirq; atomic_t usage_count; atomic_t child_count; unsigned char disable_depth; unsigned char idle_notification; unsigned char request_pending; unsigned char deferred_resume; unsigned char run_wake; unsigned char runtime_auto; bool ignore_children; unsigned char no_callbacks; unsigned char irq_safe; unsigned char use_autosuspend; unsigned char timer_autosuspends; unsigned char memalloc_noio; enum rpm_request request; enum rpm_status runtime_status; int runtime_error; int autosuspend_delay; unsigned long last_busy; unsigned long active_jiffies; unsigned long suspended_jiffies; unsigned long accounting_timestamp; struct pm_subsys_data *subsys_data; void (*set_latency_tolerance)(struct device *, s32 ); struct dev_pm_qos *qos; } ;
616 struct dev_pm_domain { struct dev_pm_ops ops; void (*detach)(struct device *, bool ); int (*activate)(struct device *); void (*sync)(struct device *); void (*dismiss)(struct device *); } ;
34 struct ldt_struct ;
34 struct vdso_image ;
34 struct __anonstruct_mm_context_t_165 { struct ldt_struct *ldt; unsigned short ia32_compat; struct mutex lock; void *vdso; const struct vdso_image *vdso_image; atomic_t perf_rdpmc_allowed; u16 pkey_allocation_map; s16 execute_only_pkey; } ;
34 typedef struct __anonstruct_mm_context_t_165 mm_context_t;
1290 struct llist_node ;
64 struct llist_node { struct llist_node *next; } ;
37 struct cred ;
19 struct inode ;
58 struct arch_uprobe_task { unsigned long saved_scratch_register; unsigned int saved_trap_nr; unsigned int saved_tf; } ;
66 enum uprobe_task_state { UTASK_RUNNING = 0, UTASK_SSTEP = 1, UTASK_SSTEP_ACK = 2, UTASK_SSTEP_TRAPPED = 3 } ;
73 struct __anonstruct____missing_field_name_211 { struct arch_uprobe_task autask; unsigned long vaddr; } ;
73 struct __anonstruct____missing_field_name_212 { struct callback_head dup_xol_work; unsigned long dup_xol_addr; } ;
73 union __anonunion____missing_field_name_210 { struct __anonstruct____missing_field_name_211 __annonCompField35; struct __anonstruct____missing_field_name_212 __annonCompField36; } ;
73 struct uprobe ;
73 struct return_instance ;
73 struct uprobe_task { enum uprobe_task_state state; union __anonunion____missing_field_name_210 __annonCompField37; struct uprobe *active_uprobe; unsigned long xol_vaddr; struct return_instance *return_instances; unsigned int depth; } ;
94 struct return_instance { struct uprobe *uprobe; unsigned long func; unsigned long stack; unsigned long orig_ret_vaddr; bool chained; struct return_instance *next; } ;
110 struct xol_area ;
111 struct uprobes_state { struct xol_area *xol_area; } ;
150 struct address_space ;
151 struct mem_cgroup ;
152 union __anonunion____missing_field_name_213 { struct address_space *mapping; void *s_mem; atomic_t compound_mapcount; } ;
152 union __anonunion____missing_field_name_214 { unsigned long index; void *freelist; } ;
152 struct __anonstruct____missing_field_name_218 { unsigned short inuse; unsigned short objects; unsigned char frozen; } ;
152 union __anonunion____missing_field_name_217 { atomic_t _mapcount; unsigned int active; struct __anonstruct____missing_field_name_218 __annonCompField40; int units; } ;
152 struct __anonstruct____missing_field_name_216 { union __anonunion____missing_field_name_217 __annonCompField41; atomic_t _refcount; } ;
152 union __anonunion____missing_field_name_215 { unsigned long counters; struct __anonstruct____missing_field_name_216 __annonCompField42; } ;
152 struct dev_pagemap ;
152 struct __anonstruct____missing_field_name_220 { struct page *next; int pages; int pobjects; } ;
152 struct __anonstruct____missing_field_name_221 { unsigned long compound_head; unsigned int compound_dtor; unsigned int compound_order; } ;
152 struct __anonstruct____missing_field_name_222 { unsigned long __pad; pgtable_t pmd_huge_pte; } ;
152 union __anonunion____missing_field_name_219 { struct list_head lru; struct dev_pagemap *pgmap; struct __anonstruct____missing_field_name_220 __annonCompField44; struct callback_head callback_head; struct __anonstruct____missing_field_name_221 __annonCompField45; struct __anonstruct____missing_field_name_222 __annonCompField46; } ;
152 struct kmem_cache ;
152 union __anonunion____missing_field_name_223 { unsigned long private; spinlock_t *ptl; struct kmem_cache *slab_cache; } ;
152 struct page { unsigned long flags; union __anonunion____missing_field_name_213 __annonCompField38; union __anonunion____missing_field_name_214 __annonCompField39; union __anonunion____missing_field_name_215 __annonCompField43; union __anonunion____missing_field_name_219 __annonCompField47; union __anonunion____missing_field_name_223 __annonCompField48; struct mem_cgroup *mem_cgroup; } ;
197 struct page_frag { struct page *page; __u32 offset; __u32 size; } ;
282 struct userfaultfd_ctx ;
282 struct vm_userfaultfd_ctx { struct userfaultfd_ctx *ctx; } ;
289 struct __anonstruct_shared_224 { struct rb_node rb; unsigned long rb_subtree_last; } ;
289 struct anon_vma ;
289 struct vm_operations_struct ;
289 struct mempolicy ;
289 struct vm_area_struct { unsigned long vm_start; unsigned long vm_end; struct vm_area_struct *vm_next; struct vm_area_struct *vm_prev; struct rb_node vm_rb; unsigned long rb_subtree_gap; struct mm_struct *vm_mm; pgprot_t vm_page_prot; unsigned long vm_flags; struct __anonstruct_shared_224 shared; struct list_head anon_vma_chain; struct anon_vma *anon_vma; const struct vm_operations_struct *vm_ops; unsigned long vm_pgoff; struct file *vm_file; void *vm_private_data; struct mempolicy *vm_policy; struct vm_userfaultfd_ctx vm_userfaultfd_ctx; } ;
362 struct core_thread { struct task_struct *task; struct core_thread *next; } ;
367 struct core_state { atomic_t nr_threads; struct core_thread dumper; struct completion startup; } ;
381 struct task_rss_stat { int events; int count[4U]; } ;
389 struct mm_rss_stat { atomic_long_t count[4U]; } ;
394 struct kioctx_table ;
395 struct linux_binfmt ;
395 struct mmu_notifier_mm ;
395 struct mm_struct { struct vm_area_struct *mmap; struct rb_root mm_rb; u32 vmacache_seqnum; unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); unsigned long mmap_base; unsigned long mmap_legacy_base; unsigned long task_size; unsigned long highest_vm_end; pgd_t *pgd; atomic_t mm_users; atomic_t mm_count; atomic_long_t nr_ptes; atomic_long_t nr_pmds; int map_count; spinlock_t page_table_lock; struct rw_semaphore mmap_sem; struct list_head mmlist; unsigned long hiwater_rss; unsigned long hiwater_vm; unsigned long total_vm; unsigned long locked_vm; unsigned long pinned_vm; unsigned long data_vm; unsigned long exec_vm; unsigned long stack_vm; unsigned long def_flags; unsigned long start_code; unsigned long end_code; unsigned long start_data; unsigned long end_data; unsigned long start_brk; unsigned long brk; unsigned long start_stack; unsigned long arg_start; unsigned long arg_end; unsigned long env_start; unsigned long env_end; unsigned long saved_auxv[46U]; struct mm_rss_stat rss_stat; struct linux_binfmt *binfmt; cpumask_var_t cpu_vm_mask_var; mm_context_t context; unsigned long flags; struct core_state *core_state; spinlock_t ioctx_lock; struct kioctx_table *ioctx_table; struct task_struct *owner; struct file *exe_file; struct mmu_notifier_mm *mmu_notifier_mm; struct cpumask cpumask_allocation; unsigned long numa_next_scan; unsigned long numa_scan_offset; int numa_scan_seq; bool tlb_flush_pending; struct uprobes_state uprobes_state; void *bd_addr; atomic_long_t hugetlb_usage; struct work_struct async_put_work; } ;
563 struct vm_fault ;
617 struct vdso_image { void *data; unsigned long size; unsigned long alt; unsigned long alt_len; long sym_vvar_start; long sym_vvar_page; long sym_hpet_page; long sym_pvclock_page; long sym_VDSO32_NOTE_MASK; long sym___kernel_sigreturn; long sym___kernel_rt_sigreturn; long sym___kernel_vsyscall; long sym_int80_landing_pad; } ;
15 typedef __u64 Elf64_Addr;
16 typedef __u16 Elf64_Half;
18 typedef __u64 Elf64_Off;
20 typedef __u32 Elf64_Word;
21 typedef __u64 Elf64_Xword;
190 struct elf64_sym { Elf64_Word st_name; unsigned char st_info; unsigned char st_other; Elf64_Half st_shndx; Elf64_Addr st_value; Elf64_Xword st_size; } ;
198 typedef struct elf64_sym Elf64_Sym;
219 struct elf64_hdr { unsigned char e_ident[16U]; Elf64_Half e_type; Elf64_Half e_machine; Elf64_Word e_version; Elf64_Addr e_entry; Elf64_Off e_phoff; Elf64_Off e_shoff; Elf64_Word e_flags; Elf64_Half e_ehsize; Elf64_Half e_phentsize; Elf64_Half e_phnum; Elf64_Half e_shentsize; Elf64_Half e_shnum; Elf64_Half e_shstrndx; } ;
235 typedef struct elf64_hdr Elf64_Ehdr;
314 struct elf64_shdr { Elf64_Word sh_name; Elf64_Word sh_type; Elf64_Xword sh_flags; Elf64_Addr sh_addr; Elf64_Off sh_offset; Elf64_Xword sh_size; Elf64_Word sh_link; Elf64_Word sh_info; Elf64_Xword sh_addralign; Elf64_Xword sh_entsize; } ;
326 typedef struct elf64_shdr Elf64_Shdr;
53 union __anonunion____missing_field_name_229 { unsigned long bitmap[4U]; struct callback_head callback_head; } ;
53 struct idr_layer { int prefix; int layer; struct idr_layer *ary[256U]; int count; union __anonunion____missing_field_name_229 __annonCompField49; } ;
41 struct idr { struct idr_layer *hint; struct idr_layer *top; int layers; int cur; spinlock_t lock; int id_free_cnt; struct idr_layer *id_free; } ;
124 struct ida_bitmap { long nr_busy; unsigned long bitmap[15U]; } ;
167 struct ida { struct idr idr; struct ida_bitmap *free_bitmap; } ;
199 struct dentry ;
200 struct iattr ;
201 struct super_block ;
202 struct file_system_type ;
203 struct kernfs_open_node ;
204 struct kernfs_iattrs ;
227 struct kernfs_root ;
227 struct kernfs_elem_dir { unsigned long subdirs; struct rb_root children; struct kernfs_root *root; } ;
85 struct kernfs_node ;
85 struct kernfs_elem_symlink { struct kernfs_node *target_kn; } ;
89 struct kernfs_ops ;
89 struct kernfs_elem_attr { const struct kernfs_ops *ops; struct kernfs_open_node *open; loff_t size; struct kernfs_node *notify_next; } ;
96 union __anonunion____missing_field_name_234 { struct kernfs_elem_dir dir; struct kernfs_elem_symlink symlink; struct kernfs_elem_attr attr; } ;
96 struct kernfs_node { atomic_t count; atomic_t active; struct lockdep_map dep_map; struct kernfs_node *parent; const char *name; struct rb_node rb; const void *ns; unsigned int hash; union __anonunion____missing_field_name_234 __annonCompField50; void *priv; unsigned short flags; umode_t mode; unsigned int ino; struct kernfs_iattrs *iattr; } ;
138 struct kernfs_syscall_ops { int (*remount_fs)(struct kernfs_root *, int *, char *); int (*show_options)(struct seq_file *, struct kernfs_root *); int (*mkdir)(struct kernfs_node *, const char *, umode_t ); int (*rmdir)(struct kernfs_node *); int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *); int (*show_path)(struct seq_file *, struct kernfs_node *, struct kernfs_root *); } ;
157 struct kernfs_root { struct kernfs_node *kn; unsigned int flags; struct ida ino_ida; struct kernfs_syscall_ops *syscall_ops; struct list_head supers; wait_queue_head_t deactivate_waitq; } ;
173 struct kernfs_open_file { struct kernfs_node *kn; struct file *file; void *priv; struct mutex mutex; struct mutex prealloc_mutex; int event; struct list_head list; char *prealloc_buf; size_t atomic_write_len; bool mmapped; const struct vm_operations_struct *vm_ops; } ;
191 struct kernfs_ops { int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); ssize_t (*read)(struct kernfs_open_file *, char *, size_t , loff_t ); size_t atomic_write_len; bool prealloc; ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *); struct lock_class_key lockdep_key; } ;
511 struct sock ;
512 struct kobject ;
513 enum kobj_ns_type { KOBJ_NS_TYPE_NONE = 0, KOBJ_NS_TYPE_NET = 1, KOBJ_NS_TYPES = 2 } ;
519 struct kobj_ns_type_operations { enum kobj_ns_type type; bool (*current_may_mount)(); void * (*grab_current_ns)(); const void * (*netlink_ns)(struct sock *); const void * (*initial_ns)(); void (*drop_ns)(void *); } ;
59 struct bin_attribute ;
60 struct attribute { const char *name; umode_t mode; bool ignore_lockdep; struct lock_class_key *key; struct lock_class_key skey; } ;
37 struct attribute_group { const char *name; umode_t (*is_visible)(struct kobject *, struct attribute *, int); umode_t (*is_bin_visible)(struct kobject *, struct bin_attribute *, int); struct attribute **attrs; struct bin_attribute **bin_attrs; } ;
92 struct bin_attribute { struct attribute attr; size_t size; void *private; ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); int (*mmap)(struct file *, struct kobject *, struct bin_attribute *, struct vm_area_struct *); } ;
165 struct sysfs_ops { ssize_t (*show)(struct kobject *, struct attribute *, char *); ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t ); } ;
530 struct kref { atomic_t refcount; } ;
52 struct kset ;
52 struct kobj_type ;
52 struct kobject { const char *name; struct list_head entry; struct kobject *parent; struct kset *kset; struct kobj_type *ktype; struct kernfs_node *sd; struct kref kref; struct delayed_work release; unsigned char state_initialized; unsigned char state_in_sysfs; unsigned char state_add_uevent_sent; unsigned char state_remove_uevent_sent; unsigned char uevent_suppress; } ;
115 struct kobj_type { void (*release)(struct kobject *); const struct sysfs_ops *sysfs_ops; struct attribute **default_attrs; const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject *); const void * (*namespace)(struct kobject *); } ;
123 struct kobj_uevent_env { char *argv[3U]; char *envp[32U]; int envp_idx; char buf[2048U]; int buflen; } ;
131 struct kset_uevent_ops { const int (*filter)(struct kset *, struct kobject *); const const char * (*name)(struct kset *, struct kobject *); const int (*uevent)(struct kset *, struct kobject *, struct kobj_uevent_env *); } ;
148 struct kset { struct list_head list; spinlock_t list_lock; struct kobject kobj; const struct kset_uevent_ops *uevent_ops; } ;
223 struct kernel_param ;
228 struct kernel_param_ops { unsigned int flags; int (*set)(const char *, const struct kernel_param *); int (*get)(char *, const struct kernel_param *); void (*free)(void *); } ;
62 struct kparam_string ;
62 struct kparam_array ;
62 union __anonunion____missing_field_name_237 { void *arg; const struct kparam_string *str; const struct kparam_array *arr; } ;
62 struct kernel_param { const char *name; struct module *mod; const struct kernel_param_ops *ops; const u16 perm; s8 level; u8 flags; union __anonunion____missing_field_name_237 __annonCompField51; } ;
83 struct kparam_string { unsigned int maxlen; char *string; } ;
89 struct kparam_array { unsigned int max; unsigned int elemsize; unsigned int *num; const struct kernel_param_ops *ops; void *elem; } ;
470 struct exception_table_entry ;
24 struct latch_tree_node { struct rb_node node[2U]; } ;
211 struct mod_arch_specific { } ;
39 struct module_param_attrs ;
39 struct module_kobject { struct kobject kobj; struct module *mod; struct kobject *drivers_dir; struct module_param_attrs *mp; struct completion *kobj_completion; } ;
50 struct module_attribute { struct attribute attr; ssize_t (*show)(struct module_attribute *, struct module_kobject *, char *); ssize_t (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t ); void (*setup)(struct module *, const char *); int (*test)(struct module *); void (*free)(struct module *); } ;
277 enum module_state { MODULE_STATE_LIVE = 0, MODULE_STATE_COMING = 1, MODULE_STATE_GOING = 2, MODULE_STATE_UNFORMED = 3 } ;
284 struct mod_tree_node { struct module *mod; struct latch_tree_node node; } ;
291 struct module_layout { void *base; unsigned int size; unsigned int text_size; unsigned int ro_size; unsigned int ro_after_init_size; struct mod_tree_node mtn; } ;
307 struct mod_kallsyms { Elf64_Sym *symtab; unsigned int num_symtab; char *strtab; } ;
321 struct klp_modinfo { Elf64_Ehdr hdr; Elf64_Shdr *sechdrs; char *secstrings; unsigned int symndx; } ;
329 struct module_sect_attrs ;
329 struct module_notes_attrs ;
329 struct trace_event_call ;
329 struct trace_enum_map ;
329 struct module { enum module_state state; struct list_head list; char name[56U]; struct module_kobject mkobj; struct module_attribute *modinfo_attrs; const char *version; const char *srcversion; struct kobject *holders_dir; const struct kernel_symbol *syms; const unsigned long *crcs; unsigned int num_syms; struct mutex param_lock; struct kernel_param *kp; unsigned int num_kp; unsigned int num_gpl_syms; const struct kernel_symbol *gpl_syms; const unsigned long *gpl_crcs; const struct kernel_symbol *unused_syms; const unsigned long *unused_crcs; unsigned int num_unused_syms; unsigned int num_unused_gpl_syms; const struct kernel_symbol *unused_gpl_syms; const unsigned long *unused_gpl_crcs; bool sig_ok; bool async_probe_requested; const struct kernel_symbol *gpl_future_syms; const unsigned long *gpl_future_crcs; unsigned int num_gpl_future_syms; unsigned int num_exentries; struct exception_table_entry *extable; int (*init)(); struct module_layout core_layout; struct module_layout init_layout; struct mod_arch_specific arch; unsigned int taints; unsigned int num_bugs; struct list_head bug_list; struct bug_entry *bug_table; struct mod_kallsyms *kallsyms; struct mod_kallsyms core_kallsyms; struct module_sect_attrs *sect_attrs; struct module_notes_attrs *notes_attrs; char *args; void *percpu; unsigned int percpu_size; unsigned int num_tracepoints; const struct tracepoint **tracepoints_ptrs; unsigned int num_trace_bprintk_fmt; const char **trace_bprintk_fmt_start; struct trace_event_call **trace_events; unsigned int num_trace_events; struct trace_enum_map **trace_enums; unsigned int num_trace_enums; bool klp; bool klp_alive; struct klp_modinfo *klp_info; struct list_head source_list; struct list_head target_list; void (*exit)(); atomic_t refcnt; ctor_fn_t (**ctors)(); unsigned int num_ctors; } ;
799 struct clk ;
15 struct klist_node ;
37 struct klist_node { void *n_klist; struct list_head n_node; struct kref n_ref; } ;
93 struct hlist_bl_node ;
93 struct hlist_bl_head { struct hlist_bl_node *first; } ;
36 struct hlist_bl_node { struct hlist_bl_node *next; struct hlist_bl_node **pprev; } ;
114 struct __anonstruct____missing_field_name_287 { spinlock_t lock; int count; } ;
114 union __anonunion____missing_field_name_286 { struct __anonstruct____missing_field_name_287 __annonCompField52; } ;
114 struct lockref { union __anonunion____missing_field_name_286 __annonCompField53; } ;
77 struct path ;
78 struct vfsmount ;
79 struct __anonstruct____missing_field_name_289 { u32 hash; u32 len; } ;
79 union __anonunion____missing_field_name_288 { struct __anonstruct____missing_field_name_289 __annonCompField54; u64 hash_len; } ;
79 struct qstr { union __anonunion____missing_field_name_288 __annonCompField55; const unsigned char *name; } ;
65 struct dentry_operations ;
65 union __anonunion____missing_field_name_290 { struct list_head d_lru; wait_queue_head_t *d_wait; } ;
65 union __anonunion_d_u_291 { struct hlist_node d_alias; struct hlist_bl_node d_in_lookup_hash; struct callback_head d_rcu; } ;
65 struct dentry { unsigned int d_flags; seqcount_t d_seq; struct hlist_bl_node d_hash; struct dentry *d_parent; struct qstr d_name; struct inode *d_inode; unsigned char d_iname[32U]; struct lockref d_lockref; const struct dentry_operations *d_op; struct super_block *d_sb; unsigned long d_time; void *d_fsdata; union __anonunion____missing_field_name_290 __annonCompField56; struct list_head d_child; struct list_head d_subdirs; union __anonunion_d_u_291 d_u; } ;
121 struct dentry_operations { int (*d_revalidate)(struct dentry *, unsigned int); int (*d_weak_revalidate)(struct dentry *, unsigned int); int (*d_hash)(const struct dentry *, struct qstr *); int (*d_compare)(const struct dentry *, unsigned int, const char *, const struct qstr *); int (*d_delete)(const struct dentry *); int (*d_init)(struct dentry *); void (*d_release)(struct dentry *); void (*d_prune)(struct dentry *); void (*d_iput)(struct dentry *, struct inode *); char * (*d_dname)(struct dentry *, char *, int); struct vfsmount * (*d_automount)(struct path *); int (*d_manage)(struct dentry *, bool ); struct dentry * (*d_real)(struct dentry *, const struct inode *, unsigned int); } ;
592 struct path { struct vfsmount *mnt; struct dentry *dentry; } ;
19 struct shrink_control { gfp_t gfp_mask; unsigned long nr_to_scan; int nid; struct mem_cgroup *memcg; } ;
27 struct shrinker { unsigned long int (*count_objects)(struct shrinker *, struct shrink_control *); unsigned long int (*scan_objects)(struct shrinker *, struct shrink_control *); int seeks; long batch; unsigned long flags; struct list_head list; atomic_long_t *nr_deferred; } ;
80 struct list_lru_one { struct list_head list; long nr_items; } ;
32 struct list_lru_memcg { struct list_lru_one *lru[0U]; } ;
37 struct list_lru_node { spinlock_t lock; struct list_lru_one lru; struct list_lru_memcg *memcg_lrus; } ;
47 struct list_lru { struct list_lru_node *node; struct list_head list; } ;
63 struct __anonstruct____missing_field_name_293 { struct radix_tree_node *parent; void *private_data; } ;
63 union __anonunion____missing_field_name_292 { struct __anonstruct____missing_field_name_293 __annonCompField57; struct callback_head callback_head; } ;
63 struct radix_tree_node { unsigned char shift; unsigned char offset; unsigned int count; union __anonunion____missing_field_name_292 __annonCompField58; struct list_head private_list; void *slots[64U]; unsigned long tags[3U][1U]; } ;
106 struct radix_tree_root { gfp_t gfp_mask; struct radix_tree_node *rnode; } ;
531 enum pid_type { PIDTYPE_PID = 0, PIDTYPE_PGID = 1, PIDTYPE_SID = 2, PIDTYPE_MAX = 3 } ;
538 struct pid_namespace ;
538 struct upid { int nr; struct pid_namespace *ns; struct hlist_node pid_chain; } ;
56 struct pid { atomic_t count; unsigned int level; struct hlist_head tasks[3U]; struct callback_head rcu; struct upid numbers[1U]; } ;
68 struct pid_link { struct hlist_node node; struct pid *pid; } ;
22 struct kernel_cap_struct { __u32 cap[2U]; } ;
25 typedef struct kernel_cap_struct kernel_cap_t;
45 struct fiemap_extent { __u64 fe_logical; __u64 fe_physical; __u64 fe_length; __u64 fe_reserved64[2U]; __u32 fe_flags; __u32 fe_reserved[3U]; } ;
38 enum migrate_mode { MIGRATE_ASYNC = 0, MIGRATE_SYNC_LIGHT = 1, MIGRATE_SYNC = 2 } ;
44 enum rcu_sync_type { RCU_SYNC = 0, RCU_SCHED_SYNC = 1, RCU_BH_SYNC = 2 } ;
50 struct rcu_sync { int gp_state; int gp_count; wait_queue_head_t gp_wait; int cb_state; struct callback_head cb_head; enum rcu_sync_type gp_type; } ;
66 struct percpu_rw_semaphore { struct rcu_sync rss; unsigned int *read_count; struct rw_semaphore rw_sem; wait_queue_head_t writer; int readers_block; } ;
87 struct block_device ;
88 struct io_context ;
89 struct cgroup_subsys_state ;
273 struct delayed_call { void (*fn)(void *); void *arg; } ;
264 struct backing_dev_info ;
265 struct bdi_writeback ;
266 struct export_operations ;
269 struct kiocb ;
270 struct pipe_inode_info ;
271 struct poll_table_struct ;
272 struct kstatfs ;
273 struct swap_info_struct ;
274 struct iov_iter ;
275 struct fscrypt_info ;
276 struct fscrypt_operations ;
76 struct iattr { unsigned int ia_valid; umode_t ia_mode; kuid_t ia_uid; kgid_t ia_gid; loff_t ia_size; struct timespec ia_atime; struct timespec ia_mtime; struct timespec ia_ctime; struct file *ia_file; } ;
213 struct dquot ;
214 struct kqid ;
19 typedef __kernel_uid32_t projid_t;
23 struct __anonstruct_kprojid_t_302 { projid_t val; } ;
23 typedef struct __anonstruct_kprojid_t_302 kprojid_t;
181 enum quota_type { USRQUOTA = 0, GRPQUOTA = 1, PRJQUOTA = 2 } ;
66 typedef long long qsize_t;
67 union __anonunion____missing_field_name_303 { kuid_t uid; kgid_t gid; kprojid_t projid; } ;
67 struct kqid { union __anonunion____missing_field_name_303 __annonCompField60; enum quota_type type; } ;
194 struct mem_dqblk { qsize_t dqb_bhardlimit; qsize_t dqb_bsoftlimit; qsize_t dqb_curspace; qsize_t dqb_rsvspace; qsize_t dqb_ihardlimit; qsize_t dqb_isoftlimit; qsize_t dqb_curinodes; time64_t dqb_btime; time64_t dqb_itime; } ;
216 struct quota_format_type ;
217 struct mem_dqinfo { struct quota_format_type *dqi_format; int dqi_fmt_id; struct list_head dqi_dirty_list; unsigned long dqi_flags; unsigned int dqi_bgrace; unsigned int dqi_igrace; qsize_t dqi_max_spc_limit; qsize_t dqi_max_ino_limit; void *dqi_priv; } ;
282 struct dquot { struct hlist_node dq_hash; struct list_head dq_inuse; struct list_head dq_free; struct list_head dq_dirty; struct mutex dq_lock; atomic_t dq_count; wait_queue_head_t dq_wait_unused; struct super_block *dq_sb; struct kqid dq_id; loff_t dq_off; unsigned long dq_flags; struct mem_dqblk dq_dqb; } ;
309 struct quota_format_ops { int (*check_quota_file)(struct super_block *, int); int (*read_file_info)(struct super_block *, int); int (*write_file_info)(struct super_block *, int); int (*free_file_info)(struct super_block *, int); int (*read_dqblk)(struct dquot *); int (*commit_dqblk)(struct dquot *); int (*release_dqblk)(struct dquot *); int (*get_next_id)(struct super_block *, struct kqid *); } ;
321 struct dquot_operations { int (*write_dquot)(struct dquot *); struct dquot * (*alloc_dquot)(struct super_block *, int); void (*destroy_dquot)(struct dquot *); int (*acquire_dquot)(struct dquot *); int (*release_dquot)(struct dquot *); int (*mark_dirty)(struct dquot *); int (*write_info)(struct super_block *, int); qsize_t * (*get_reserved_space)(struct inode *); int (*get_projid)(struct inode *, kprojid_t *); int (*get_next_id)(struct super_block *, struct kqid *); } ;
338 struct qc_dqblk { int d_fieldmask; u64 d_spc_hardlimit; u64 d_spc_softlimit; u64 d_ino_hardlimit; u64 d_ino_softlimit; u64 d_space; u64 d_ino_count; s64 d_ino_timer; s64 d_spc_timer; int d_ino_warns; int d_spc_warns; u64 d_rt_spc_hardlimit; u64 d_rt_spc_softlimit; u64 d_rt_space; s64 d_rt_spc_timer; int d_rt_spc_warns; } ;
361 struct qc_type_state { unsigned int flags; unsigned int spc_timelimit; unsigned int ino_timelimit; unsigned int rt_spc_timelimit; unsigned int spc_warnlimit; unsigned int ino_warnlimit; unsigned int rt_spc_warnlimit; unsigned long long ino; blkcnt_t blocks; blkcnt_t nextents; } ;
407 struct qc_state { unsigned int s_incoredqs; struct qc_type_state s_state[3U]; } ;
418 struct qc_info { int i_fieldmask; unsigned int i_flags; unsigned int i_spc_timelimit; unsigned int i_ino_timelimit; unsigned int i_rt_spc_timelimit; unsigned int i_spc_warnlimit; unsigned int i_ino_warnlimit; unsigned int i_rt_spc_warnlimit; } ;
431 struct quotactl_ops { int (*quota_on)(struct super_block *, int, int, struct path *); int (*quota_off)(struct super_block *, int); int (*quota_enable)(struct super_block *, unsigned int); int (*quota_disable)(struct super_block *, unsigned int); int (*quota_sync)(struct super_block *, int); int (*set_info)(struct super_block *, int, struct qc_info *); int (*get_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *); int (*get_nextdqblk)(struct super_block *, struct kqid *, struct qc_dqblk *); int (*set_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *); int (*get_state)(struct super_block *, struct qc_state *); int (*rm_xquota)(struct super_block *, unsigned int); } ;
447 struct quota_format_type { int qf_fmt_id; const struct quota_format_ops *qf_ops; struct module *qf_owner; struct quota_format_type *qf_next; } ;
511 struct quota_info { unsigned int flags; struct mutex dqio_mutex; struct mutex dqonoff_mutex; struct inode *files[3U]; struct mem_dqinfo info[3U]; const struct quota_format_ops *ops[3U]; } ;
541 struct writeback_control ;
542 struct kiocb { struct file *ki_filp; loff_t ki_pos; void (*ki_complete)(struct kiocb *, long, long); void *private; int ki_flags; } ;
368 struct address_space_operations { int (*writepage)(struct page *, struct writeback_control *); int (*readpage)(struct file *, struct page *); int (*writepages)(struct address_space *, struct writeback_control *); int (*set_page_dirty)(struct page *); int (*readpages)(struct file *, struct address_space *, struct list_head *, unsigned int); int (*write_begin)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page **, void **); int (*write_end)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page *, void *); sector_t (*bmap)(struct address_space *, sector_t ); void (*invalidatepage)(struct page *, unsigned int, unsigned int); int (*releasepage)(struct page *, gfp_t ); void (*freepage)(struct page *); ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *); int (*migratepage)(struct address_space *, struct page *, struct page *, enum migrate_mode ); bool (*isolate_page)(struct page *, isolate_mode_t ); void (*putback_page)(struct page *); int (*launder_page)(struct page *); int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long); void (*is_dirty_writeback)(struct page *, bool *, bool *); int (*error_remove_page)(struct address_space *, struct page *); int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *); void (*swap_deactivate)(struct file *); } ;
427 struct address_space { struct inode *host; struct radix_tree_root page_tree; spinlock_t tree_lock; atomic_t i_mmap_writable; struct rb_root i_mmap; struct rw_semaphore i_mmap_rwsem; unsigned long nrpages; unsigned long nrexceptional; unsigned long writeback_index; const struct address_space_operations *a_ops; unsigned long flags; spinlock_t private_lock; gfp_t gfp_mask; struct list_head private_list; void *private_data; } ;
449 struct request_queue ;
450 struct hd_struct ;
450 struct gendisk ;
450 struct block_device { dev_t bd_dev; int bd_openers; struct inode *bd_inode; struct super_block *bd_super; struct mutex bd_mutex; void *bd_claiming; void *bd_holder; int bd_holders; bool bd_write_holder; struct list_head bd_holder_disks; struct block_device *bd_contains; unsigned int bd_block_size; struct hd_struct *bd_part; unsigned int bd_part_count; int bd_invalidated; struct gendisk *bd_disk; struct request_queue *bd_queue; struct list_head bd_list; unsigned long bd_private; int bd_fsfreeze_count; struct mutex bd_fsfreeze_mutex; } ;
565 struct posix_acl ;
592 struct inode_operations ;
592 union __anonunion____missing_field_name_308 { const unsigned int i_nlink; unsigned int __i_nlink; } ;
592 union __anonunion____missing_field_name_309 { struct hlist_head i_dentry; struct callback_head i_rcu; } ;
592 struct file_lock_context ;
592 struct cdev ;
592 union __anonunion____missing_field_name_310 { struct pipe_inode_info *i_pipe; struct block_device *i_bdev; struct cdev *i_cdev; char *i_link; unsigned int i_dir_seq; } ;
592 struct inode { umode_t i_mode; unsigned short i_opflags; kuid_t i_uid; kgid_t i_gid; unsigned int i_flags; struct posix_acl *i_acl; struct posix_acl *i_default_acl; const struct inode_operations *i_op; struct super_block *i_sb; struct address_space *i_mapping; void *i_security; unsigned long i_ino; union __anonunion____missing_field_name_308 __annonCompField61; dev_t i_rdev; loff_t i_size; struct timespec i_atime; struct timespec i_mtime; struct timespec i_ctime; spinlock_t i_lock; unsigned short i_bytes; unsigned int i_blkbits; blkcnt_t i_blocks; unsigned long i_state; struct rw_semaphore i_rwsem; unsigned long dirtied_when; unsigned long dirtied_time_when; struct hlist_node i_hash; struct list_head i_io_list; struct bdi_writeback *i_wb; int i_wb_frn_winner; u16 i_wb_frn_avg_time; u16 i_wb_frn_history; struct list_head i_lru; struct list_head i_sb_list; struct list_head i_wb_list; union __anonunion____missing_field_name_309 __annonCompField62; u64 i_version; atomic_t i_count; atomic_t i_dio_count; atomic_t i_writecount; atomic_t i_readcount; const struct file_operations *i_fop; struct file_lock_context *i_flctx; struct address_space i_data; struct list_head i_devices; union __anonunion____missing_field_name_310 __annonCompField63; __u32 i_generation; __u32 i_fsnotify_mask; struct hlist_head i_fsnotify_marks; struct fscrypt_info *i_crypt_info; void *i_private; } ;
847 struct fown_struct { rwlock_t lock; struct pid *pid; enum pid_type pid_type; kuid_t uid; kuid_t euid; int signum; } ;
855 struct file_ra_state { unsigned long start; unsigned int size; unsigned int async_size; unsigned int ra_pages; unsigned int mmap_miss; loff_t prev_pos; } ;
878 union __anonunion_f_u_311 { struct llist_node fu_llist; struct callback_head fu_rcuhead; } ;
878 struct file { union __anonunion_f_u_311 f_u; struct path f_path; struct inode *f_inode; const struct file_operations *f_op; spinlock_t f_lock; atomic_long_t f_count; unsigned int f_flags; fmode_t f_mode; struct mutex f_pos_lock; loff_t f_pos; struct fown_struct f_owner; const struct cred *f_cred; struct file_ra_state f_ra; u64 f_version; void *f_security; void *private_data; struct list_head f_ep_links; struct list_head f_tfile_llink; struct address_space *f_mapping; } ;
963 typedef void *fl_owner_t;
964 struct file_lock ;
965 struct file_lock_operations { void (*fl_copy_lock)(struct file_lock *, struct file_lock *); void (*fl_release_private)(struct file_lock *); } ;
971 struct lock_manager_operations { int (*lm_compare_owner)(struct file_lock *, struct file_lock *); unsigned long int (*lm_owner_key)(struct file_lock *); fl_owner_t (*lm_get_owner)(fl_owner_t ); void (*lm_put_owner)(fl_owner_t ); void (*lm_notify)(struct file_lock *); int (*lm_grant)(struct file_lock *, int); bool (*lm_break)(struct file_lock *); int (*lm_change)(struct file_lock *, int, struct list_head *); void (*lm_setup)(struct file_lock *, void **); } ;
998 struct nlm_lockowner ;
999 struct nfs_lock_info { u32 state; struct nlm_lockowner *owner; struct list_head list; } ;
14 struct nfs4_lock_state ;
15 struct nfs4_lock_info { struct nfs4_lock_state *owner; } ;
19 struct fasync_struct ;
19 struct __anonstruct_afs_313 { struct list_head link; int state; } ;
19 union __anonunion_fl_u_312 { struct nfs_lock_info nfs_fl; struct nfs4_lock_info nfs4_fl; struct __anonstruct_afs_313 afs; } ;
19 struct file_lock { struct file_lock *fl_next; struct list_head fl_list; struct hlist_node fl_link; struct list_head fl_block; fl_owner_t fl_owner; unsigned int fl_flags; unsigned char fl_type; unsigned int fl_pid; int fl_link_cpu; struct pid *fl_nspid; wait_queue_head_t fl_wait; struct file *fl_file; loff_t fl_start; loff_t fl_end; struct fasync_struct *fl_fasync; unsigned long fl_break_time; unsigned long fl_downgrade_time; const struct file_lock_operations *fl_ops; const struct lock_manager_operations *fl_lmops; union __anonunion_fl_u_312 fl_u; } ;
1051 struct file_lock_context { spinlock_t flc_lock; struct list_head flc_flock; struct list_head flc_posix; struct list_head flc_lease; } ;
1118 struct files_struct ;
1271 struct fasync_struct { spinlock_t fa_lock; int magic; int fa_fd; struct fasync_struct *fa_next; struct file *fa_file; struct callback_head fa_rcu; } ;
1306 struct sb_writers { int frozen; wait_queue_head_t wait_unfrozen; struct percpu_rw_semaphore rw_sem[3U]; } ;
1336 struct super_operations ;
1336 struct xattr_handler ;
1336 struct mtd_info ;
1336 struct super_block { struct list_head s_list; dev_t s_dev; unsigned char s_blocksize_bits; unsigned long s_blocksize; loff_t s_maxbytes; struct file_system_type *s_type; const struct super_operations *s_op; const struct dquot_operations *dq_op; const struct quotactl_ops *s_qcop; const struct export_operations *s_export_op; unsigned long s_flags; unsigned long s_iflags; unsigned long s_magic; struct dentry *s_root; struct rw_semaphore s_umount; int s_count; atomic_t s_active; void *s_security; const struct xattr_handler **s_xattr; const struct fscrypt_operations *s_cop; struct hlist_bl_head s_anon; struct list_head s_mounts; struct block_device *s_bdev; struct backing_dev_info *s_bdi; struct mtd_info *s_mtd; struct hlist_node s_instances; unsigned int s_quota_types; struct quota_info s_dquot; struct sb_writers s_writers; char s_id[32U]; u8 s_uuid[16U]; void *s_fs_info; unsigned int s_max_links; fmode_t s_mode; u32 s_time_gran; struct mutex s_vfs_rename_mutex; char *s_subtype; char *s_options; const struct dentry_operations *s_d_op; int cleancache_poolid; struct shrinker s_shrink; atomic_long_t s_remove_count; int s_readonly_remount; struct workqueue_struct *s_dio_done_wq; struct hlist_head s_pins; struct user_namespace *s_user_ns; struct list_lru s_dentry_lru; struct list_lru s_inode_lru; struct callback_head rcu; struct work_struct destroy_work; struct mutex s_sync_lock; int s_stack_depth; spinlock_t s_inode_list_lock; struct list_head s_inodes; spinlock_t s_inode_wblist_lock; struct list_head s_inodes_wb; } ;
1620 struct fiemap_extent_info { unsigned int fi_flags; unsigned int fi_extents_mapped; unsigned int fi_extents_max; struct fiemap_extent *fi_extents_start; } ;
1633 struct dir_context ;
1658 struct dir_context { int (*actor)(struct dir_context *, const char *, int, loff_t , u64 , unsigned int); loff_t pos; } ;
1665 struct file_operations { struct module *owner; loff_t (*llseek)(struct file *, loff_t , int); ssize_t (*read)(struct file *, char *, size_t , loff_t *); ssize_t (*write)(struct file *, const char *, size_t , loff_t *); ssize_t (*read_iter)(struct kiocb *, struct iov_iter *); ssize_t (*write_iter)(struct kiocb *, struct iov_iter *); int (*iterate)(struct file *, struct dir_context *); int (*iterate_shared)(struct file *, struct dir_context *); unsigned int (*poll)(struct file *, struct poll_table_struct *); long int (*unlocked_ioctl)(struct file *, unsigned int, unsigned long); long int (*compat_ioctl)(struct file *, unsigned int, unsigned long); int (*mmap)(struct file *, struct vm_area_struct *); int (*open)(struct inode *, struct file *); int (*flush)(struct file *, fl_owner_t ); int (*release)(struct inode *, struct file *); int (*fsync)(struct file *, loff_t , loff_t , int); int (*aio_fsync)(struct kiocb *, int); int (*fasync)(int, struct file *, int); int (*lock)(struct file *, int, struct file_lock *); ssize_t (*sendpage)(struct file *, struct page *, int, size_t , loff_t *, int); unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); int (*check_flags)(int); int (*flock)(struct file *, int, struct file_lock *); ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t , unsigned int); ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t , unsigned int); int (*setlease)(struct file *, long, struct file_lock **, void **); long int (*fallocate)(struct file *, int, loff_t , loff_t ); void (*show_fdinfo)(struct seq_file *, struct file *); ssize_t (*copy_file_range)(struct file *, loff_t , struct file *, loff_t , size_t , unsigned int); int (*clone_file_range)(struct file *, loff_t , struct file *, loff_t , u64 ); ssize_t (*dedupe_file_range)(struct file *, u64 , u64 , struct file *, u64 ); } ;
1734 struct inode_operations { struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int); const char * (*get_link)(struct dentry *, struct inode *, struct delayed_call *); int (*permission)(struct inode *, int); struct posix_acl * (*get_acl)(struct inode *, int); int (*readlink)(struct dentry *, char *, int); int (*create)(struct inode *, struct dentry *, umode_t , bool ); int (*link)(struct dentry *, struct inode *, struct dentry *); int (*unlink)(struct inode *, struct dentry *); int (*symlink)(struct inode *, struct dentry *, const char *); int (*mkdir)(struct inode *, struct dentry *, umode_t ); int (*rmdir)(struct inode *, struct dentry *); int (*mknod)(struct inode *, struct dentry *, umode_t , dev_t ); int (*rename)(struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); int (*setattr)(struct dentry *, struct iattr *); int (*getattr)(struct vfsmount *, struct dentry *, struct kstat *); ssize_t (*listxattr)(struct dentry *, char *, size_t ); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 , u64 ); int (*update_time)(struct inode *, struct timespec *, int); int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t , int *); int (*tmpfile)(struct inode *, struct dentry *, umode_t ); int (*set_acl)(struct inode *, struct posix_acl *, int); } ;
1784 struct super_operations { struct inode * (*alloc_inode)(struct super_block *); void (*destroy_inode)(struct inode *); void (*dirty_inode)(struct inode *, int); int (*write_inode)(struct inode *, struct writeback_control *); int (*drop_inode)(struct inode *); void (*evict_inode)(struct inode *); void (*put_super)(struct super_block *); int (*sync_fs)(struct super_block *, int); int (*freeze_super)(struct super_block *); int (*freeze_fs)(struct super_block *); int (*thaw_super)(struct super_block *); int (*unfreeze_fs)(struct super_block *); int (*statfs)(struct dentry *, struct kstatfs *); int (*remount_fs)(struct super_block *, int *, char *); void (*umount_begin)(struct super_block *); int (*show_options)(struct seq_file *, struct dentry *); int (*show_devname)(struct seq_file *, struct dentry *); int (*show_path)(struct seq_file *, struct dentry *); int (*show_stats)(struct seq_file *, struct dentry *); ssize_t (*quota_read)(struct super_block *, int, char *, size_t , loff_t ); ssize_t (*quota_write)(struct super_block *, int, const char *, size_t , loff_t ); struct dquot ** (*get_dquots)(struct inode *); int (*bdev_try_to_free_page)(struct super_block *, struct page *, gfp_t ); long int (*nr_cached_objects)(struct super_block *, struct shrink_control *); long int (*free_cached_objects)(struct super_block *, struct shrink_control *); } ;
2027 struct file_system_type { const char *name; int fs_flags; struct dentry * (*mount)(struct file_system_type *, int, const char *, void *); void (*kill_sb)(struct super_block *); struct module *owner; struct file_system_type *next; struct hlist_head fs_supers; struct lock_class_key s_lock_key; struct lock_class_key s_umount_key; struct lock_class_key s_vfs_rename_key; struct lock_class_key s_writers_key[3U]; struct lock_class_key i_lock_key; struct lock_class_key i_mutex_key; struct lock_class_key i_mutex_dir_key; } ;
3211 struct assoc_array_ptr ;
3211 struct assoc_array { struct assoc_array_ptr *root; unsigned long nr_leaves_on_tree; } ;
31 typedef int32_t key_serial_t;
34 typedef uint32_t key_perm_t;
35 struct key ;
36 struct user_struct ;
37 struct signal_struct ;
38 struct key_type ;
42 struct keyring_index_key { struct key_type *type; const char *description; size_t desc_len; } ;
91 union key_payload { void *rcu_data0; void *data[4U]; } ;
128 union __anonunion____missing_field_name_314 { struct list_head graveyard_link; struct rb_node serial_node; } ;
128 struct key_user ;
128 union __anonunion____missing_field_name_315 { time_t expiry; time_t revoked_at; } ;
128 struct __anonstruct____missing_field_name_317 { struct key_type *type; char *description; } ;
128 union __anonunion____missing_field_name_316 { struct keyring_index_key index_key; struct __anonstruct____missing_field_name_317 __annonCompField66; } ;
128 struct __anonstruct____missing_field_name_319 { struct list_head name_link; struct assoc_array keys; } ;
128 union __anonunion____missing_field_name_318 { union key_payload payload; struct __anonstruct____missing_field_name_319 __annonCompField68; int reject_error; } ;
128 struct key { atomic_t usage; key_serial_t serial; union __anonunion____missing_field_name_314 __annonCompField64; struct rw_semaphore sem; struct key_user *user; void *security; union __anonunion____missing_field_name_315 __annonCompField65; time_t last_used_at; kuid_t uid; kgid_t gid; key_perm_t perm; unsigned short quotalen; unsigned short datalen; unsigned long flags; union __anonunion____missing_field_name_316 __annonCompField67; union __anonunion____missing_field_name_318 __annonCompField69; int (*restrict_link)(struct key *, const struct key_type *, const union key_payload *); } ;
377 struct audit_context ;
27 struct group_info { atomic_t usage; int ngroups; kgid_t gid[0U]; } ;
85 struct cred { atomic_t usage; atomic_t subscribers; void *put_addr; unsigned int magic; kuid_t uid; kgid_t gid; kuid_t suid; kgid_t sgid; kuid_t euid; kgid_t egid; kuid_t fsuid; kgid_t fsgid; unsigned int securebits; kernel_cap_t cap_inheritable; kernel_cap_t cap_permitted; kernel_cap_t cap_effective; kernel_cap_t cap_bset; kernel_cap_t cap_ambient; unsigned char jit_keyring; struct key *session_keyring; struct key *process_keyring; struct key *thread_keyring; struct key *request_key_auth; void *security; struct user_struct *user; struct user_namespace *user_ns; struct group_info *group_info; struct callback_head rcu; } ;
368 struct seq_file { char *buf; size_t size; size_t from; size_t count; size_t pad_until; loff_t index; loff_t read_pos; u64 version; struct mutex lock; const struct seq_operations *op; int poll_event; const struct file *file; void *private; } ;
30 struct seq_operations { void * (*start)(struct seq_file *, loff_t *); void (*stop)(struct seq_file *, void *); void * (*next)(struct seq_file *, void *, loff_t *); int (*show)(struct seq_file *, void *); } ;
222 struct pinctrl ;
223 struct pinctrl_state ;
194 struct dev_pin_info { struct pinctrl *p; struct pinctrl_state *default_state; struct pinctrl_state *init_state; struct pinctrl_state *sleep_state; struct pinctrl_state *idle_state; } ;
84 struct plist_node { int prio; struct list_head prio_list; struct list_head node_list; } ;
4 typedef unsigned long cputime_t;
26 struct sem_undo_list ;
26 struct sysv_sem { struct sem_undo_list *undo_list; } ;
26 struct sysv_shm { struct list_head shm_clist; } ;
24 struct __anonstruct_sigset_t_320 { unsigned long sig[1U]; } ;
24 typedef struct __anonstruct_sigset_t_320 sigset_t;
25 struct siginfo ;
17 typedef void __signalfn_t(int);
18 typedef __signalfn_t *__sighandler_t;
20 typedef void __restorefn_t();
21 typedef __restorefn_t *__sigrestore_t;
38 union sigval { int sival_int; void *sival_ptr; } ;
10 typedef union sigval sigval_t;
11 struct __anonstruct__kill_322 { __kernel_pid_t _pid; __kernel_uid32_t _uid; } ;
11 struct __anonstruct__timer_323 { __kernel_timer_t _tid; int _overrun; char _pad[0U]; sigval_t _sigval; int _sys_private; } ;
11 struct __anonstruct__rt_324 { __kernel_pid_t _pid; __kernel_uid32_t _uid; sigval_t _sigval; } ;
11 struct __anonstruct__sigchld_325 { __kernel_pid_t _pid; __kernel_uid32_t _uid; int _status; __kernel_clock_t _utime; __kernel_clock_t _stime; } ;
11 struct __anonstruct__addr_bnd_328 { void *_lower; void *_upper; } ;
11 union __anonunion____missing_field_name_327 { struct __anonstruct__addr_bnd_328 _addr_bnd; __u32 _pkey; } ;
11 struct __anonstruct__sigfault_326 { void *_addr; short _addr_lsb; union __anonunion____missing_field_name_327 __annonCompField70; } ;
11 struct __anonstruct__sigpoll_329 { long _band; int _fd; } ;
11 struct __anonstruct__sigsys_330 { void *_call_addr; int _syscall; unsigned int _arch; } ;
11 union __anonunion__sifields_321 { int _pad[28U]; struct __anonstruct__kill_322 _kill; struct __anonstruct__timer_323 _timer; struct __anonstruct__rt_324 _rt; struct __anonstruct__sigchld_325 _sigchld; struct __anonstruct__sigfault_326 _sigfault; struct __anonstruct__sigpoll_329 _sigpoll; struct __anonstruct__sigsys_330 _sigsys; } ;
11 struct siginfo { int si_signo; int si_errno; int si_code; union __anonunion__sifields_321 _sifields; } ;
118 typedef struct siginfo siginfo_t;
22 struct sigpending { struct list_head list; sigset_t signal; } ;
257 struct sigaction { __sighandler_t sa_handler; unsigned long sa_flags; __sigrestore_t sa_restorer; sigset_t sa_mask; } ;
271 struct k_sigaction { struct sigaction sa; } ;
43 struct seccomp_filter ;
44 struct seccomp { int mode; struct seccomp_filter *filter; } ;
40 struct rt_mutex_waiter ;
41 struct rlimit { __kernel_ulong_t rlim_cur; __kernel_ulong_t rlim_max; } ;
11 struct timerqueue_node { struct rb_node node; ktime_t expires; } ;
12 struct timerqueue_head { struct rb_root head; struct timerqueue_node *next; } ;
50 struct hrtimer_clock_base ;
51 struct hrtimer_cpu_base ;
60 enum hrtimer_restart { HRTIMER_NORESTART = 0, HRTIMER_RESTART = 1 } ;
65 struct hrtimer { struct timerqueue_node node; ktime_t _softexpires; enum hrtimer_restart (*function)(struct hrtimer *); struct hrtimer_clock_base *base; u8 state; u8 is_rel; int start_pid; void *start_site; char start_comm[16U]; } ;
125 struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base; int index; clockid_t clockid; struct timerqueue_head active; ktime_t (*get_time)(); ktime_t offset; } ;
158 struct hrtimer_cpu_base { raw_spinlock_t lock; seqcount_t seq; struct hrtimer *running; unsigned int cpu; unsigned int active_bases; unsigned int clock_was_set_seq; bool migration_enabled; bool nohz_active; unsigned char in_hrtirq; unsigned char hres_active; unsigned char hang_detected; ktime_t expires_next; struct hrtimer *next_timer; unsigned int nr_events; unsigned int nr_retries; unsigned int nr_hangs; unsigned int max_hang_time; struct hrtimer_clock_base clock_base[4U]; } ;
12 enum kcov_mode { KCOV_MODE_DISABLED = 0, KCOV_MODE_TRACE = 1 } ;
17 struct task_io_accounting { u64 rchar; u64 wchar; u64 syscr; u64 syscw; u64 read_bytes; u64 write_bytes; u64 cancelled_write_bytes; } ;
45 struct latency_record { unsigned long backtrace[12U]; unsigned int count; unsigned long time; unsigned long max; } ;
41 struct percpu_ref ;
55 typedef void percpu_ref_func_t(struct percpu_ref *);
68 struct percpu_ref { atomic_long_t count; unsigned long percpu_count_ptr; percpu_ref_func_t *release; percpu_ref_func_t *confirm_switch; bool force_atomic; struct callback_head rcu; } ;
325 struct cgroup ;
326 struct cgroup_root ;
327 struct cgroup_subsys ;
328 struct cgroup_taskset ;
372 struct cgroup_file { struct kernfs_node *kn; } ;
90 struct cgroup_subsys_state { struct cgroup *cgroup; struct cgroup_subsys *ss; struct percpu_ref refcnt; struct cgroup_subsys_state *parent; struct list_head sibling; struct list_head children; int id; unsigned int flags; u64 serial_nr; atomic_t online_cnt; struct callback_head callback_head; struct work_struct destroy_work; } ;
141 struct css_set { atomic_t refcount; struct hlist_node hlist; struct list_head tasks; struct list_head mg_tasks; struct list_head cgrp_links; struct cgroup *dfl_cgrp; struct cgroup_subsys_state *subsys[13U]; struct list_head mg_preload_node; struct list_head mg_node; struct cgroup *mg_src_cgrp; struct cgroup *mg_dst_cgrp; struct css_set *mg_dst_cset; struct list_head e_cset_node[13U]; struct list_head task_iters; bool dead; struct callback_head callback_head; } ;
221 struct cgroup { struct cgroup_subsys_state self; unsigned long flags; int id; int level; int populated_cnt; struct kernfs_node *kn; struct cgroup_file procs_file; struct cgroup_file events_file; u16 subtree_control; u16 subtree_ss_mask; u16 old_subtree_control; u16 old_subtree_ss_mask; struct cgroup_subsys_state *subsys[13U]; struct cgroup_root *root; struct list_head cset_links; struct list_head e_csets[13U]; struct list_head pidlists; struct mutex pidlist_mutex; wait_queue_head_t offline_waitq; struct work_struct release_agent_work; int ancestor_ids[]; } ;
306 struct cgroup_root { struct kernfs_root *kf_root; unsigned int subsys_mask; int hierarchy_id; struct cgroup cgrp; int cgrp_ancestor_id_storage; atomic_t nr_cgrps; struct list_head root_list; unsigned int flags; struct idr cgroup_idr; char release_agent_path[4096U]; char name[64U]; } ;
345 struct cftype { char name[64U]; unsigned long private; size_t max_write_len; unsigned int flags; unsigned int file_offset; struct cgroup_subsys *ss; struct list_head node; struct kernfs_ops *kf_ops; u64 (*read_u64)(struct cgroup_subsys_state *, struct cftype *); s64 (*read_s64)(struct cgroup_subsys_state *, struct cftype *); int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); int (*write_u64)(struct cgroup_subsys_state *, struct cftype *, u64 ); int (*write_s64)(struct cgroup_subsys_state *, struct cftype *, s64 ); ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); struct lock_class_key lockdep_key; } ;
430 struct cgroup_subsys { struct cgroup_subsys_state * (*css_alloc)(struct cgroup_subsys_state *); int (*css_online)(struct cgroup_subsys_state *); void (*css_offline)(struct cgroup_subsys_state *); void (*css_released)(struct cgroup_subsys_state *); void (*css_free)(struct cgroup_subsys_state *); void (*css_reset)(struct cgroup_subsys_state *); int (*can_attach)(struct cgroup_taskset *); void (*cancel_attach)(struct cgroup_taskset *); void (*attach)(struct cgroup_taskset *); void (*post_attach)(); int (*can_fork)(struct task_struct *); void (*cancel_fork)(struct task_struct *); void (*fork)(struct task_struct *); void (*exit)(struct task_struct *); void (*free)(struct task_struct *); void (*bind)(struct cgroup_subsys_state *); bool early_init; bool implicit_on_dfl; bool broken_hierarchy; bool warned_broken_hierarchy; int id; const char *name; const char *legacy_name; struct cgroup_root *root; struct idr css_idr; struct list_head cfts; struct cftype *dfl_cftypes; struct cftype *legacy_cftypes; unsigned int depends_on; } ;
128 struct futex_pi_state ;
129 struct robust_list_head ;
130 struct bio_list ;
131 struct fs_struct ;
132 struct perf_event_context ;
133 struct blk_plug ;
134 struct nameidata ;
188 struct cfs_rq ;
189 struct task_group ;
495 struct sighand_struct { atomic_t count; struct k_sigaction action[64U]; spinlock_t siglock; wait_queue_head_t signalfd_wqh; } ;
539 struct pacct_struct { int ac_flag; long ac_exitcode; unsigned long ac_mem; cputime_t ac_utime; cputime_t ac_stime; unsigned long ac_minflt; unsigned long ac_majflt; } ;
547 struct cpu_itimer { cputime_t expires; cputime_t incr; u32 error; u32 incr_error; } ;
554 struct prev_cputime { cputime_t utime; cputime_t stime; raw_spinlock_t lock; } ;
579 struct task_cputime { cputime_t utime; cputime_t stime; unsigned long long sum_exec_runtime; } ;
595 struct task_cputime_atomic { atomic64_t utime; atomic64_t stime; atomic64_t sum_exec_runtime; } ;
617 struct thread_group_cputimer { struct task_cputime_atomic cputime_atomic; bool running; bool checking_timer; } ;
662 struct autogroup ;
663 struct tty_struct ;
663 struct taskstats ;
663 struct tty_audit_buf ;
663 struct signal_struct { atomic_t sigcnt; atomic_t live; int nr_threads; struct list_head thread_head; wait_queue_head_t wait_chldexit; struct task_struct *curr_target; struct sigpending shared_pending; int group_exit_code; int notify_count; struct task_struct *group_exit_task; int group_stop_count; unsigned int flags; unsigned char is_child_subreaper; unsigned char has_child_subreaper; int posix_timer_id; struct list_head posix_timers; struct hrtimer real_timer; struct pid *leader_pid; ktime_t it_real_incr; struct cpu_itimer it[2U]; struct thread_group_cputimer cputimer; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; struct pid *tty_old_pgrp; int leader; struct tty_struct *tty; struct autogroup *autogroup; seqlock_t stats_lock; cputime_t utime; cputime_t stime; cputime_t cutime; cputime_t cstime; cputime_t gtime; cputime_t cgtime; struct prev_cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; unsigned long cnvcsw; unsigned long cnivcsw; unsigned long min_flt; unsigned long maj_flt; unsigned long cmin_flt; unsigned long cmaj_flt; unsigned long inblock; unsigned long oublock; unsigned long cinblock; unsigned long coublock; unsigned long maxrss; unsigned long cmaxrss; struct task_io_accounting ioac; unsigned long long sum_sched_runtime; struct rlimit rlim[16U]; struct pacct_struct pacct; struct taskstats *stats; unsigned int audit_tty; struct tty_audit_buf *tty_audit_buf; bool oom_flag_origin; short oom_score_adj; short oom_score_adj_min; struct mm_struct *oom_mm; struct mutex cred_guard_mutex; } ;
839 struct user_struct { atomic_t __count; atomic_t processes; atomic_t sigpending; atomic_t inotify_watches; atomic_t inotify_devs; atomic_t fanotify_listeners; atomic_long_t epoll_watches; unsigned long mq_bytes; unsigned long locked_shm; unsigned long unix_inflight; atomic_long_t pipe_bufs; struct key *uid_keyring; struct key *session_keyring; struct hlist_node uidhash_node; kuid_t uid; atomic_long_t locked_vm; } ;
884 struct reclaim_state ;
885 struct sched_info { unsigned long pcount; unsigned long long run_delay; unsigned long long last_arrival; unsigned long long last_queued; } ;
900 struct task_delay_info { spinlock_t lock; unsigned int flags; u64 blkio_start; u64 blkio_delay; u64 swapin_delay; u32 blkio_count; u32 swapin_count; u64 freepages_start; u64 freepages_delay; u32 freepages_count; } ;
957 struct wake_q_node { struct wake_q_node *next; } ;
1235 struct load_weight { unsigned long weight; u32 inv_weight; } ;
1243 struct sched_avg { u64 last_update_time; u64 load_sum; u32 util_sum; u32 period_contrib; unsigned long load_avg; unsigned long util_avg; } ;
1301 struct sched_statistics { u64 wait_start; u64 wait_max; u64 wait_count; u64 wait_sum; u64 iowait_count; u64 iowait_sum; u64 sleep_start; u64 sleep_max; s64 sum_sleep_runtime; u64 block_start; u64 block_max; u64 exec_max; u64 slice_max; u64 nr_migrations_cold; u64 nr_failed_migrations_affine; u64 nr_failed_migrations_running; u64 nr_failed_migrations_hot; u64 nr_forced_migrations; u64 nr_wakeups; u64 nr_wakeups_sync; u64 nr_wakeups_migrate; u64 nr_wakeups_local; u64 nr_wakeups_remote; u64 nr_wakeups_affine; u64 nr_wakeups_affine_attempts; u64 nr_wakeups_passive; u64 nr_wakeups_idle; } ;
1336 struct sched_entity { struct load_weight load; struct rb_node run_node; struct list_head group_node; unsigned int on_rq; u64 exec_start; u64 sum_exec_runtime; u64 vruntime; u64 prev_sum_exec_runtime; u64 nr_migrations; struct sched_statistics statistics; int depth; struct sched_entity *parent; struct cfs_rq *cfs_rq; struct cfs_rq *my_q; struct sched_avg avg; } ;
1373 struct rt_rq ;
1373 struct sched_rt_entity { struct list_head run_list; unsigned long timeout; unsigned long watchdog_stamp; unsigned int time_slice; unsigned short on_rq; unsigned short on_list; struct sched_rt_entity *back; struct sched_rt_entity *parent; struct rt_rq *rt_rq; struct rt_rq *my_q; } ;
1391 struct sched_dl_entity { struct rb_node rb_node; u64 dl_runtime; u64 dl_deadline; u64 dl_period; u64 dl_bw; s64 runtime; u64 deadline; unsigned int flags; int dl_throttled; int dl_boosted; int dl_yielded; struct hrtimer dl_timer; } ;
1455 struct tlbflush_unmap_batch { struct cpumask cpumask; bool flush_required; bool writable; } ;
1474 struct sched_class ;
1474 struct compat_robust_list_head ;
1474 struct numa_group ;
1474 struct kcov ;
1474 struct task_struct { struct thread_info thread_info; volatile long state; void *stack; atomic_t usage; unsigned int flags; unsigned int ptrace; struct llist_node wake_entry; int on_cpu; unsigned int cpu; unsigned int wakee_flips; unsigned long wakee_flip_decay_ts; struct task_struct *last_wakee; int wake_cpu; int on_rq; int prio; int static_prio; int normal_prio; unsigned int rt_priority; const struct sched_class *sched_class; struct sched_entity se; struct sched_rt_entity rt; struct task_group *sched_task_group; struct sched_dl_entity dl; struct hlist_head preempt_notifiers; unsigned int policy; int nr_cpus_allowed; cpumask_t cpus_allowed; unsigned long rcu_tasks_nvcsw; bool rcu_tasks_holdout; struct list_head rcu_tasks_holdout_list; int rcu_tasks_idle_cpu; struct sched_info sched_info; struct list_head tasks; struct plist_node pushable_tasks; struct rb_node pushable_dl_tasks; struct mm_struct *mm; struct mm_struct *active_mm; u32 vmacache_seqnum; struct vm_area_struct *vmacache[4U]; struct task_rss_stat rss_stat; int exit_state; int exit_code; int exit_signal; int pdeath_signal; unsigned long jobctl; unsigned int personality; unsigned char sched_reset_on_fork; unsigned char sched_contributes_to_load; unsigned char sched_migrated; unsigned char sched_remote_wakeup; unsigned char; unsigned char in_execve; unsigned char in_iowait; unsigned char restore_sigmask; unsigned char memcg_may_oom; unsigned char memcg_kmem_skip_account; unsigned char brk_randomized; unsigned long atomic_flags; struct restart_block restart_block; pid_t pid; pid_t tgid; struct task_struct *real_parent; struct task_struct *parent; struct list_head children; struct list_head sibling; struct task_struct *group_leader; struct list_head ptraced; struct list_head ptrace_entry; struct pid_link pids[3U]; struct list_head thread_group; struct list_head thread_node; struct completion *vfork_done; int *set_child_tid; int *clear_child_tid; cputime_t utime; cputime_t stime; cputime_t utimescaled; cputime_t stimescaled; cputime_t gtime; struct prev_cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; u64 start_time; u64 real_start_time; unsigned long min_flt; unsigned long maj_flt; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; const struct cred *real_cred; const struct cred *cred; char comm[16U]; struct nameidata *nameidata; struct sysv_sem sysvsem; struct sysv_shm sysvshm; unsigned long last_switch_count; struct fs_struct *fs; struct files_struct *files; struct nsproxy *nsproxy; struct signal_struct *signal; struct sighand_struct *sighand; sigset_t blocked; sigset_t real_blocked; sigset_t saved_sigmask; struct sigpending pending; unsigned long sas_ss_sp; size_t sas_ss_size; unsigned int sas_ss_flags; struct callback_head *task_works; struct audit_context *audit_context; kuid_t loginuid; unsigned int sessionid; struct seccomp seccomp; u32 parent_exec_id; u32 self_exec_id; spinlock_t alloc_lock; raw_spinlock_t pi_lock; struct wake_q_node wake_q; struct rb_root pi_waiters; struct rb_node *pi_waiters_leftmost; struct rt_mutex_waiter *pi_blocked_on; struct mutex_waiter *blocked_on; unsigned int irq_events; unsigned long hardirq_enable_ip; unsigned long hardirq_disable_ip; unsigned int hardirq_enable_event; unsigned int hardirq_disable_event; int hardirqs_enabled; int hardirq_context; unsigned long softirq_disable_ip; unsigned long softirq_enable_ip; unsigned int softirq_disable_event; unsigned int softirq_enable_event; int softirqs_enabled; int softirq_context; u64 curr_chain_key; int lockdep_depth; unsigned int lockdep_recursion; struct held_lock held_locks[48U]; gfp_t lockdep_reclaim_gfp; unsigned int in_ubsan; void *journal_info; struct bio_list *bio_list; struct blk_plug *plug; struct reclaim_state *reclaim_state; struct backing_dev_info *backing_dev_info; struct io_context *io_context; unsigned long ptrace_message; siginfo_t *last_siginfo; struct task_io_accounting ioac; u64 acct_rss_mem1; u64 acct_vm_mem1; cputime_t acct_timexpd; nodemask_t mems_allowed; seqcount_t mems_allowed_seq; int cpuset_mem_spread_rotor; int cpuset_slab_spread_rotor; struct css_set *cgroups; struct list_head cg_list; struct robust_list_head *robust_list; struct compat_robust_list_head *compat_robust_list; struct list_head pi_state_list; struct futex_pi_state *pi_state_cache; struct perf_event_context *perf_event_ctxp[2U]; struct mutex perf_event_mutex; struct list_head perf_event_list; struct mempolicy *mempolicy; short il_next; short pref_node_fork; int numa_scan_seq; unsigned int numa_scan_period; unsigned int numa_scan_period_max; int numa_preferred_nid; unsigned long numa_migrate_retry; u64 node_stamp; u64 last_task_numa_placement; u64 last_sum_exec_runtime; struct callback_head numa_work; struct list_head numa_entry; struct numa_group *numa_group; unsigned long *numa_faults; unsigned long total_numa_faults; unsigned long numa_faults_locality[3U]; unsigned long numa_pages_migrated; struct tlbflush_unmap_batch tlb_ubc; struct callback_head rcu; struct pipe_inode_info *splice_pipe; struct page_frag task_frag; struct task_delay_info *delays; int make_it_fail; int nr_dirtied; int nr_dirtied_pause; unsigned long dirty_paused_when; int latency_record_count; struct latency_record latency_record[32U]; u64 timer_slack_ns; u64 default_timer_slack_ns; unsigned int kasan_depth; unsigned long trace; unsigned long trace_recursion; enum kcov_mode kcov_mode; unsigned int kcov_size; void *kcov_area; struct kcov *kcov; struct mem_cgroup *memcg_in_oom; gfp_t memcg_oom_gfp_mask; int memcg_oom_order; unsigned int memcg_nr_pages_over_high; struct uprobe_task *utask; unsigned int sequential_io; unsigned int sequential_io_avg; unsigned long task_state_change; int pagefault_disabled; struct task_struct *oom_reaper_list; atomic_t stack_refcount; struct thread_struct thread; } ;
76 struct dma_map_ops ;
76 struct dev_archdata { struct dma_map_ops *dma_ops; void *iommu; } ;
21 struct pdev_archdata { } ;
24 struct device_private ;
25 struct device_driver ;
26 struct driver_private ;
27 struct class ;
28 struct subsys_private ;
29 struct bus_type ;
30 struct device_node ;
31 struct fwnode_handle ;
32 struct iommu_ops ;
33 struct iommu_group ;
34 struct iommu_fwspec ;
62 struct device_attribute ;
62 struct bus_type { const char *name; const char *dev_name; struct device *dev_root; struct device_attribute *dev_attrs; const struct attribute_group **bus_groups; const struct attribute_group **dev_groups; const struct attribute_group **drv_groups; int (*match)(struct device *, struct device_driver *); int (*uevent)(struct device *, struct kobj_uevent_env *); int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*online)(struct device *); int (*offline)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct dev_pm_ops *pm; const struct iommu_ops *iommu_ops; struct subsys_private *p; struct lock_class_key lock_key; } ;
143 struct device_type ;
202 enum probe_type { PROBE_DEFAULT_STRATEGY = 0, PROBE_PREFER_ASYNCHRONOUS = 1, PROBE_FORCE_SYNCHRONOUS = 2 } ;
208 struct of_device_id ;
208 struct acpi_device_id ;
208 struct device_driver { const char *name; struct bus_type *bus; struct module *owner; const char *mod_name; bool suppress_bind_attrs; enum probe_type probe_type; const struct of_device_id *of_match_table; const struct acpi_device_id *acpi_match_table; int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct attribute_group **groups; const struct dev_pm_ops *pm; struct driver_private *p; } ;
358 struct class_attribute ;
358 struct class { const char *name; struct module *owner; struct class_attribute *class_attrs; const struct attribute_group **dev_groups; struct kobject *dev_kobj; int (*dev_uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *); void (*class_release)(struct class *); void (*dev_release)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct kobj_ns_type_operations *ns_type; const void * (*namespace)(struct device *); const struct dev_pm_ops *pm; struct subsys_private *p; } ;
451 struct class_attribute { struct attribute attr; ssize_t (*show)(struct class *, struct class_attribute *, char *); ssize_t (*store)(struct class *, struct class_attribute *, const char *, size_t ); } ;
519 struct device_type { const char *name; const struct attribute_group **groups; int (*uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *, kuid_t *, kgid_t *); void (*release)(struct device *); const struct dev_pm_ops *pm; } ;
547 struct device_attribute { struct attribute attr; ssize_t (*show)(struct device *, struct device_attribute *, char *); ssize_t (*store)(struct device *, struct device_attribute *, const char *, size_t ); } ;
700 struct device_dma_parameters { unsigned int max_segment_size; unsigned long segment_boundary_mask; } ;
709 struct irq_domain ;
709 struct dma_coherent_mem ;
709 struct cma ;
709 struct device { struct device *parent; struct device_private *p; struct kobject kobj; const char *init_name; const struct device_type *type; struct mutex mutex; struct bus_type *bus; struct device_driver *driver; void *platform_data; void *driver_data; struct dev_pm_info power; struct dev_pm_domain *pm_domain; struct irq_domain *msi_domain; struct dev_pin_info *pins; struct list_head msi_list; int numa_node; u64 *dma_mask; u64 coherent_dma_mask; unsigned long dma_pfn_offset; struct device_dma_parameters *dma_parms; struct list_head dma_pools; struct dma_coherent_mem *dma_mem; struct cma *cma_area; struct dev_archdata archdata; struct device_node *of_node; struct fwnode_handle *fwnode; dev_t devt; u32 id; spinlock_t devres_lock; struct list_head devres_head; struct klist_node knode_class; struct class *class; const struct attribute_group **groups; void (*release)(struct device *); struct iommu_group *iommu_group; struct iommu_fwspec *iommu_fwspec; bool offline_disabled; bool offline; } ;
865 struct wakeup_source { const char *name; struct list_head entry; spinlock_t lock; struct wake_irq *wakeirq; struct timer_list timer; unsigned long timer_expires; ktime_t total_time; ktime_t max_time; ktime_t last_time; ktime_t start_prevent_time; ktime_t prevent_sleep_time; unsigned long event_count; unsigned long active_count; unsigned long relax_count; unsigned long expire_count; unsigned long wakeup_count; bool active; bool autosleep_enabled; } ;
13 typedef unsigned long kernel_ulong_t;
186 struct acpi_device_id { __u8 id[9U]; kernel_ulong_t driver_data; __u32 cls; __u32 cls_msk; } ;
229 struct of_device_id { char name[32U]; char type[32U]; char compatible[128U]; const void *data; } ;
484 struct platform_device_id { char name[20U]; kernel_ulong_t driver_data; } ;
674 struct mfd_cell ;
676 struct platform_device { const char *name; int id; bool id_auto; struct device dev; u32 num_resources; struct resource *resource; const struct platform_device_id *id_entry; char *driver_override; struct mfd_cell *mfd_cell; struct pdev_archdata archdata; } ;
352 enum irqreturn { IRQ_NONE = 0, IRQ_HANDLED = 1, IRQ_WAKE_THREAD = 2 } ;
16 typedef enum irqreturn irqreturn_t;
130 struct exception_table_entry { int insn; int fixup; int handler; } ;
716 struct uio_map ;
717 struct uio_mem { const char *name; phys_addr_t addr; resource_size_t size; int memtype; void *internal_addr; struct uio_map *map; } ;
43 struct uio_portio ;
44 struct uio_port { const char *name; unsigned long start; unsigned long size; int porttype; struct uio_portio *portio; } ;
63 struct uio_info ;
63 struct uio_device { struct module *owner; struct device *dev; int minor; atomic_t event; struct fasync_struct *async_queue; wait_queue_head_t wait; struct uio_info *info; struct kobject *map_dir; struct kobject *portio_dir; } ;
77 struct uio_info { struct uio_device *uio_dev; const char *name; const char *version; struct uio_mem mem[5U]; struct uio_port port[5U]; long irq; unsigned long irq_flags; void *priv; irqreturn_t (*handler)(int, struct uio_info *); int (*mmap)(struct uio_info *, struct vm_area_struct *); int (*open)(struct uio_info *, struct inode *); int (*release)(struct uio_info *, struct inode *); int (*irqcontrol)(struct uio_info *, s32 ); } ;
121 struct gen_pool ;
121 struct uio_pruss_pdata { u32 pintc_base; struct gen_pool *sram_pool; } ;
511 struct scatterlist ;
96 enum dma_data_direction { DMA_BIDIRECTIONAL = 0, DMA_TO_DEVICE = 1, DMA_FROM_DEVICE = 2, DMA_NONE = 3 } ;
273 struct vm_fault { unsigned int flags; gfp_t gfp_mask; unsigned long pgoff; void *virtual_address; struct page *cow_page; struct page *page; void *entry; } ;
308 struct fault_env { struct vm_area_struct *vma; unsigned long address; unsigned int flags; pmd_t *pmd; pte_t *pte; spinlock_t *ptl; pgtable_t prealloc_pte; } ;
335 struct vm_operations_struct { void (*open)(struct vm_area_struct *); void (*close)(struct vm_area_struct *); int (*mremap)(struct vm_area_struct *); int (*fault)(struct vm_area_struct *, struct vm_fault *); int (*pmd_fault)(struct vm_area_struct *, unsigned long, pmd_t *, unsigned int); void (*map_pages)(struct fault_env *, unsigned long, unsigned long); int (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *); int (*pfn_mkwrite)(struct vm_area_struct *, struct vm_fault *); int (*access)(struct vm_area_struct *, unsigned long, void *, int, int); const char * (*name)(struct vm_area_struct *); int (*set_policy)(struct vm_area_struct *, struct mempolicy *); struct mempolicy * (*get_policy)(struct vm_area_struct *, unsigned long); struct page * (*find_special_page)(struct vm_area_struct *, unsigned long); } ;
2450 struct scatterlist { unsigned long sg_magic; unsigned long page_link; unsigned int offset; unsigned int length; dma_addr_t dma_address; unsigned int dma_length; } ;
21 struct sg_table { struct scatterlist *sgl; unsigned int nents; unsigned int orig_nents; } ;
158 struct dma_map_ops { void * (*alloc)(struct device *, size_t , dma_addr_t *, gfp_t , unsigned long); void (*free)(struct device *, size_t , void *, dma_addr_t , unsigned long); int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t , size_t , unsigned long); int (*get_sgtable)(struct device *, struct sg_table *, void *, dma_addr_t , size_t , unsigned long); dma_addr_t (*map_page)(struct device *, struct page *, unsigned long, size_t , enum dma_data_direction , unsigned long); void (*unmap_page)(struct device *, dma_addr_t , size_t , enum dma_data_direction , unsigned long); int (*map_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , unsigned long); void (*unmap_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , unsigned long); dma_addr_t (*map_resource)(struct device *, phys_addr_t , size_t , enum dma_data_direction , unsigned long); void (*unmap_resource)(struct device *, dma_addr_t , size_t , enum dma_data_direction , unsigned long); void (*sync_single_for_cpu)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_single_for_device)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_sg_for_cpu)(struct device *, struct scatterlist *, int, enum dma_data_direction ); void (*sync_sg_for_device)(struct device *, struct scatterlist *, int, enum dma_data_direction ); int (*mapping_error)(struct device *, dma_addr_t ); int (*dma_supported)(struct device *, u64 ); int (*set_dma_mask)(struct device *, u64 ); int is_phys; } ;
49 struct gen_pool { spinlock_t lock; struct list_head chunks; int min_alloc_order; unsigned long int (*algo)(unsigned long *, unsigned long, unsigned long, unsigned int, void *, struct gen_pool *); void *data; const char *name; } ;
42 struct uio_pruss_dev { struct uio_info *info; struct clk *pruss_clk; dma_addr_t sram_paddr; dma_addr_t ddr_paddr; void *prussio_vaddr; unsigned long sram_vaddr; void *ddr_vaddr; unsigned int hostirq_start; unsigned int pintc_base; struct gen_pool *sram_pool; } ;
1 long int __builtin_expect(long, long);
34 extern struct module __this_module;
419 char * kasprintf(gfp_t , const char *, ...);
3 bool ldv_is_err(const void *ptr);
6 long int ldv_ptr_err(const void *ptr);
32 long int PTR_ERR(const void *ptr);
41 bool IS_ERR(const void *ptr);
193 resource_size_t resource_size(const struct resource *res);
181 void * ioremap_nocache(resource_size_t , unsigned long);
192 void * ioremap(resource_size_t offset, unsigned long size);
197 void iounmap(volatile void *);
31 unsigned int ioread32(void *);
41 void iowrite32(u32 , void *);
11 void ldv_clk_disable_clk(struct clk *clk);
12 int ldv_clk_enable_clk();
14 int ldv_clk_enable_pruss_clk_of_uio_pruss_dev();
915 void * dev_get_drvdata(const struct device *dev);
920 void dev_set_drvdata(struct device *dev, void *data);
1049 void * dev_get_platdata(const struct device *dev);
1138 void dev_err(const struct device *, const char *, ...);
52 struct resource * platform_get_resource(struct platform_device *, unsigned int, unsigned int);
54 int platform_get_irq(struct platform_device *, unsigned int);
211 void * platform_get_drvdata(const struct platform_device *pdev);
216 void platform_set_drvdata(struct platform_device *pdev, void *data);
111 int __uio_register_device(struct module *, struct device *, struct uio_info *);
119 void uio_unregister_device(struct uio_info *);
229 struct clk * clk_get(struct device *, const char *);
264 int ldv_clk_enable_5(struct clk *clk);
268 int ldv_clk_enable_7(struct clk *clk);
280 void ldv_clk_disable_6(struct clk *clk);
296 void clk_put(struct clk *);
53 void debug_dma_alloc_coherent(struct device *, size_t , dma_addr_t , void *);
28 extern struct dma_map_ops *dma_ops;
30 struct dma_map_ops * get_dma_ops(struct device *dev);
42 bool arch_dma_alloc_attrs(struct device **, gfp_t *);
450 void * dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs);
491 void * dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag);
497 void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle);
154 void kfree(const void *);
330 void * __kmalloc(size_t , gfp_t );
478 void * kmalloc(size_t size, gfp_t flags);
634 void * kzalloc(size_t size, gfp_t flags);
119 void * gen_pool_dma_alloc(struct gen_pool *, size_t , dma_addr_t *);
121 void gen_pool_free(struct gen_pool *, unsigned long, size_t );
35 int sram_pool_sz = 16384;
39 int extram_pool_sz = 262144;
77 irqreturn_t pruss_handler(int irq, struct uio_info *info);
96 void pruss_cleanup(struct device *dev, struct uio_pruss_dev *gdev);
119 int pruss_probe(struct platform_device *pdev);
223 int pruss_remove(struct platform_device *dev);
262 void ldv_check_final_state();
265 void ldv_check_return_value(int);
268 void ldv_check_return_value_probe(int);
271 void ldv_initialize();
274 void ldv_handler_precall();
277 int nondet_int();
280 int LDV_IN_INTERRUPT = 0;
283 void ldv_main0_sequence_infinite_withcheck_stateful();
10 void ldv_error();
25 int ldv_undef_int();
14 void * ldv_err_ptr(long error);
28 bool ldv_is_err_or_null(const void *ptr);
9 int ldv_counter_clk = 0;
32 int ldv_counter_pruss_clk_of_uio_pruss_dev = 0;
35 void ldv_clk_disable_pruss_clk_of_uio_pruss_dev(struct clk *clk);
return ;
}
-entry_point
{
285 struct platform_device *var_group1;
286 int res_pruss_probe_2;
287 int ldv_s_pruss_driver_platform_driver;
288 int tmp;
289 int tmp___0;
331 ldv_s_pruss_driver_platform_driver = 0;
321 LDV_IN_INTERRUPT = 1;
330 ldv_initialize() { /* Function call is skipped due to function is undefined */}
334 goto ldv_33505;
334 tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */}
334 assume(tmp___0 != 0);
337 goto ldv_33504;
335 ldv_33504:;
338 tmp = nondet_int() { /* Function call is skipped due to function is undefined */}
338 switch (tmp);
339 assume(tmp == 0);
343 assume(ldv_s_pruss_driver_platform_driver == 0);
356 -pruss_probe(var_group1)
{
121 struct uio_info *p;
122 struct uio_pruss_dev *gdev;
123 struct resource *regs_prussio;
124 struct device *dev;
125 int ret;
126 int cnt;
127 int len;
128 struct uio_pruss_pdata *pdata;
129 void *tmp;
130 void *tmp___0;
131 void *tmp___1;
132 long tmp___2;
133 _Bool tmp___3;
134 void *tmp___4;
135 unsigned long long tmp___5;
136 int tmp___6;
137 char *tmp___7;
124 dev = &(pdev->dev);
125 ret = -19;
125 cnt = 0;
126 -dev_get_platdata((const struct device *)dev)
{
1051 void *__CPAchecker_TMP_0 = (void *)(dev->platform_data);
1051 return __CPAchecker_TMP_0;;
}
126 pdata = (struct uio_pruss_pdata *)tmp;
128 -kzalloc(72UL, 37748928U)
{
636 void *tmp;
636 -kmalloc(size, flags | 32768U)
{
480 void *tmp___2;
495 tmp___2 = __kmalloc(size, flags) { /* Function call is skipped due to function is undefined */}
495 return tmp___2;;
}
636 return tmp;;
}
128 gdev = (struct uio_pruss_dev *)tmp___0;
129 assume(!(((unsigned long)gdev) == ((unsigned long)((struct uio_pruss_dev *)0))));
132 -kzalloc(4224UL, 37748928U)
{
636 void *tmp;
636 -kmalloc(size, flags | 32768U)
{
480 void *tmp___2;
495 tmp___2 = __kmalloc(size, flags) { /* Function call is skipped due to function is undefined */}
495 return tmp___2;;
}
636 return tmp;;
}
132 gdev->info = (struct uio_info *)tmp___1;
133 unsigned long __CPAchecker_TMP_0 = (unsigned long)(gdev->info);
133 assume(!(__CPAchecker_TMP_0 == ((unsigned long)((struct uio_info *)0))));
139 gdev->pruss_clk = clk_get(dev, "pruss") { /* Function call is skipped due to function is undefined */}
140 const void *__CPAchecker_TMP_1 = (const void *)(gdev->pruss_clk);
140 -IS_ERR(__CPAchecker_TMP_1)
{
30 _Bool tmp;
31 -ldv_is_err(ptr)
{
10 return ((unsigned long)ptr) > 2012UL;;
}
31 return tmp;;
}
140 assume(((int)tmp___3) == 0);
147 -ldv_clk_enable_7(gdev->pruss_clk)
{
54 int tmp;
55 -ldv_clk_enable_pruss_clk_of_uio_pruss_dev()
{
44 int retval;
45 int tmp;
44 tmp = ldv_undef_int() { /* Function call is skipped due to function is undefined */}
44 retval = tmp;
45 assume(retval == 0);
48 ldv_counter_pruss_clk_of_uio_pruss_dev = 1;
50 return retval;;
}
55 return tmp;;
}
150 regs_prussio = platform_get_resource(pdev, 512U, 0U) { /* Function call is skipped due to function is undefined */}
151 assume(!(((unsigned long)regs_prussio) == ((unsigned long)((struct resource *)0))));
156 assume(!((regs_prussio->start) == 0ULL));
161 unsigned long __CPAchecker_TMP_4 = (unsigned long)(pdata->sram_pool);
161 assume(__CPAchecker_TMP_4 != ((unsigned long)((struct gen_pool *)0)));
162 gdev->sram_pool = pdata->sram_pool;
163 tmp___4 = gen_pool_dma_alloc(gdev->sram_pool, (size_t )sram_pool_sz, &(gdev->sram_paddr)) { /* Function call is skipped due to function is undefined */}
163 gdev->sram_vaddr = (unsigned long)tmp___4;
166 assume(!((gdev->sram_vaddr) == 0UL));
172 -dma_alloc_coherent(dev, (size_t )extram_pool_sz, &(gdev->ddr_paddr), 37748929U)
{
494 void *tmp;
494 -dma_alloc_attrs(dev, size, dma_handle, flag, 0UL)
{
453 struct dma_map_ops *ops;
454 struct dma_map_ops *tmp;
455 void *cpu_addr;
456 long tmp___0;
457 _Bool tmp___1;
458 int tmp___2;
454 -get_dma_ops(dev)
{
32 long tmp;
35 tmp = __builtin_expect(((unsigned long)dev) == ((unsigned long)((struct device *)0)), 0L) { /* Function call is skipped due to function is undefined */}
35 assume(tmp != 0L);
36 return dma_ops;;
}
454 ops = tmp;
457 tmp___0 = __builtin_expect(((unsigned long)ops) == ((unsigned long)((struct dma_map_ops *)0)), 0L) { /* Function call is skipped due to function is undefined */}
457 assume(!(tmp___0 != 0L));
462 tmp___1 = arch_dma_alloc_attrs(&dev, &flag) { /* Function call is skipped due to function is undefined */}
462 assume(!(tmp___1 == 0));
462 tmp___2 = 0;
462 assume(tmp___2 == 0);
464 unsigned long __CPAchecker_TMP_0 = (unsigned long)(ops->alloc);
464 assume(!(__CPAchecker_TMP_0 == ((unsigned long)((void * (*)(struct device *, size_t , dma_addr_t *, gfp_t , unsigned long))0))));
467 cpu_addr = (*(ops->alloc))(dev, size, dma_handle, flag, attrs);
468 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr) { /* Function call is skipped due to function is undefined */}
469 return cpu_addr;;
}
494 return tmp;;
}
174 unsigned long __CPAchecker_TMP_5 = (unsigned long)(gdev->ddr_vaddr);
174 assume(!(__CPAchecker_TMP_5 == ((unsigned long)((void *)0))));
179 -resource_size((const struct resource *)regs_prussio)
{
195 unsigned long long __CPAchecker_TMP_0 = (unsigned long long)(res->end);
195 unsigned long long __CPAchecker_TMP_1 = (unsigned long long)(res->start);
195 return (__CPAchecker_TMP_0 - __CPAchecker_TMP_1) + 1ULL;;
}
179 len = (int)tmp___5;
180 -ioremap(regs_prussio->start, (unsigned long)len)
{
194 void *tmp;
194 tmp = ioremap_nocache(offset, size) { /* Function call is skipped due to function is undefined */}
194 return tmp;;
}
181 unsigned long __CPAchecker_TMP_6 = (unsigned long)(gdev->prussio_vaddr);
181 assume(!(__CPAchecker_TMP_6 == ((unsigned long)((void *)0))));
186 gdev->pintc_base = pdata->pintc_base;
187 tmp___6 = platform_get_irq(pdev, 0U) { /* Function call is skipped due to function is undefined */}
187 gdev->hostirq_start = (unsigned int)tmp___6;
189 cnt = 0;
189 p = gdev->info;
189 goto ldv_33453;
189 assume(cnt <= 7);
191 goto ldv_33452;
190 ldv_33452:;
190 ((p->mem)[0]).addr = regs_prussio->start;
191 -resource_size((const struct resource *)regs_prussio)
{
195 unsigned long long __CPAchecker_TMP_0 = (unsigned long long)(res->end);
195 unsigned long long __CPAchecker_TMP_1 = (unsigned long long)(res->start);
195 return (__CPAchecker_TMP_0 - __CPAchecker_TMP_1) + 1ULL;;
}
192 ((p->mem)[0]).memtype = 1;
194 ((p->mem)[1]).addr = gdev->sram_paddr;
195 ((p->mem)[1]).size = (resource_size_t )sram_pool_sz;
196 ((p->mem)[1]).memtype = 1;
198 ((p->mem)[2]).addr = gdev->ddr_paddr;
199 ((p->mem)[2]).size = (resource_size_t )extram_pool_sz;
200 ((p->mem)[2]).memtype = 1;
202 tmp___7 = kasprintf(37748928U, "pruss_evt%d", cnt) { /* Function call is skipped due to function is undefined */}
202 p->name = (const char *)tmp___7;
203 p->version = "1.0";
206 p->irq = (long)((gdev->hostirq_start) + ((unsigned int)cnt));
207 p->handler = &pruss_handler;
208 p->priv = (void *)gdev;
210 ret = __uio_register_device(&__this_module, dev, p) { /* Function call is skipped due to function is undefined */}
211 assume(!(ret < 0));
189 cnt = cnt + 1;
189 p = p + 1;
190 ldv_33453:;
189 assume(cnt <= 7);
191 goto ldv_33452;
190 ldv_33452:;
190 ((p->mem)[0]).addr = regs_prussio->start;
191 -resource_size((const struct resource *)regs_prussio)
{
195 unsigned long long __CPAchecker_TMP_0 = (unsigned long long)(res->end);
195 unsigned long long __CPAchecker_TMP_1 = (unsigned long long)(res->start);
195 return (__CPAchecker_TMP_0 - __CPAchecker_TMP_1) + 1ULL;;
}
192 ((p->mem)[0]).memtype = 1;
194 ((p->mem)[1]).addr = gdev->sram_paddr;
195 ((p->mem)[1]).size = (resource_size_t )sram_pool_sz;
196 ((p->mem)[1]).memtype = 1;
198 ((p->mem)[2]).addr = gdev->ddr_paddr;
199 ((p->mem)[2]).size = (resource_size_t )extram_pool_sz;
200 ((p->mem)[2]).memtype = 1;
202 tmp___7 = kasprintf(37748928U, "pruss_evt%d", cnt) { /* Function call is skipped due to function is undefined */}
202 p->name = (const char *)tmp___7;
203 p->version = "1.0";
206 p->irq = (long)((gdev->hostirq_start) + ((unsigned int)cnt));
207 p->handler = &pruss_handler;
208 p->priv = (void *)gdev;
210 ret = __uio_register_device(&__this_module, dev, p) { /* Function call is skipped due to function is undefined */}
211 assume(!(ret < 0));
189 cnt = cnt + 1;
189 p = p + 1;
190 ldv_33453:;
189 assume(cnt <= 7);
191 goto ldv_33452;
190 ldv_33452:;
190 ((p->mem)[0]).addr = regs_prussio->start;
191 -resource_size((const struct resource *)regs_prussio)
{
195 unsigned long long __CPAchecker_TMP_0 = (unsigned long long)(res->end);
195 unsigned long long __CPAchecker_TMP_1 = (unsigned long long)(res->start);
195 return (__CPAchecker_TMP_0 - __CPAchecker_TMP_1) + 1ULL;;
}
192 ((p->mem)[0]).memtype = 1;
194 ((p->mem)[1]).addr = gdev->sram_paddr;
195 ((p->mem)[1]).size = (resource_size_t )sram_pool_sz;
196 ((p->mem)[1]).memtype = 1;
198 ((p->mem)[2]).addr = gdev->ddr_paddr;
199 ((p->mem)[2]).size = (resource_size_t )extram_pool_sz;
200 ((p->mem)[2]).memtype = 1;
202 tmp___7 = kasprintf(37748928U, "pruss_evt%d", cnt) { /* Function call is skipped due to function is undefined */}
202 p->name = (const char *)tmp___7;
203 p->version = "1.0";
206 p->irq = (long)((gdev->hostirq_start) + ((unsigned int)cnt));
207 p->handler = &pruss_handler;
208 p->priv = (void *)gdev;
210 ret = __uio_register_device(&__this_module, dev, p) { /* Function call is skipped due to function is undefined */}
211 assume(!(ret < 0));
189 cnt = cnt + 1;
189 p = p + 1;
190 ldv_33453:;
189 assume(cnt <= 7);
191 goto ldv_33452;
190 ldv_33452:;
190 ((p->mem)[0]).addr = regs_prussio->start;
191 -resource_size((const struct resource *)regs_prussio)
{
195 unsigned long long __CPAchecker_TMP_0 = (unsigned long long)(res->end);
195 unsigned long long __CPAchecker_TMP_1 = (unsigned long long)(res->start);
195 return (__CPAchecker_TMP_0 - __CPAchecker_TMP_1) + 1ULL;;
}
192 ((p->mem)[0]).memtype = 1;
194 ((p->mem)[1]).addr = gdev->sram_paddr;
195 ((p->mem)[1]).size = (resource_size_t )sram_pool_sz;
196 ((p->mem)[1]).memtype = 1;
198 ((p->mem)[2]).addr = gdev->ddr_paddr;
199 ((p->mem)[2]).size = (resource_size_t )extram_pool_sz;
200 ((p->mem)[2]).memtype = 1;
202 tmp___7 = kasprintf(37748928U, "pruss_evt%d", cnt) { /* Function call is skipped due to function is undefined */}
202 p->name = (const char *)tmp___7;
203 p->version = "1.0";
206 p->irq = (long)((gdev->hostirq_start) + ((unsigned int)cnt));
207 p->handler = &pruss_handler;
208 p->priv = (void *)gdev;
210 ret = __uio_register_device(&__this_module, dev, p) { /* Function call is skipped due to function is undefined */}
211 assume(!(ret < 0));
189 cnt = cnt + 1;
189 p = p + 1;
190 ldv_33453:;
189 assume(cnt <= 7);
191 goto ldv_33452;
190 ldv_33452:;
190 ((p->mem)[0]).addr = regs_prussio->start;
191 -resource_size((const struct resource *)regs_prussio)
{
195 unsigned long long __CPAchecker_TMP_0 = (unsigned long long)(res->end);
195 unsigned long long __CPAchecker_TMP_1 = (unsigned long long)(res->start);
195 return (__CPAchecker_TMP_0 - __CPAchecker_TMP_1) + 1ULL;;
}
192 ((p->mem)[0]).memtype = 1;
194 ((p->mem)[1]).addr = gdev->sram_paddr;
195 ((p->mem)[1]).size = (resource_size_t )sram_pool_sz;
196 ((p->mem)[1]).memtype = 1;
198 ((p->mem)[2]).addr = gdev->ddr_paddr;
199 ((p->mem)[2]).size = (resource_size_t )extram_pool_sz;
200 ((p->mem)[2]).memtype = 1;
202 tmp___7 = kasprintf(37748928U, "pruss_evt%d", cnt) { /* Function call is skipped due to function is undefined */}
202 p->name = (const char *)tmp___7;
203 p->version = "1.0";
206 p->irq = (long)((gdev->hostirq_start) + ((unsigned int)cnt));
207 p->handler = &pruss_handler;
208 p->priv = (void *)gdev;
210 ret = __uio_register_device(&__this_module, dev, p) { /* Function call is skipped due to function is undefined */}
211 assume(!(ret < 0));
189 cnt = cnt + 1;
189 p = p + 1;
190 ldv_33453:;
189 assume(cnt <= 7);
191 goto ldv_33452;
190 ldv_33452:;
190 ((p->mem)[0]).addr = regs_prussio->start;
191 -resource_size((const struct resource *)regs_prussio)
{
195 unsigned long long __CPAchecker_TMP_0 = (unsigned long long)(res->end);
195 unsigned long long __CPAchecker_TMP_1 = (unsigned long long)(res->start);
195 return (__CPAchecker_TMP_0 - __CPAchecker_TMP_1) + 1ULL;;
}
192 ((p->mem)[0]).memtype = 1;
194 ((p->mem)[1]).addr = gdev->sram_paddr;
195 ((p->mem)[1]).size = (resource_size_t )sram_pool_sz;
196 ((p->mem)[1]).memtype = 1;
198 ((p->mem)[2]).addr = gdev->ddr_paddr;
199 ((p->mem)[2]).size = (resource_size_t )extram_pool_sz;
200 ((p->mem)[2]).memtype = 1;
202 tmp___7 = kasprintf(37748928U, "pruss_evt%d", cnt) { /* Function call is skipped due to function is undefined */}
202 p->name = (const char *)tmp___7;
203 p->version = "1.0";
206 p->irq = (long)((gdev->hostirq_start) + ((unsigned int)cnt));
207 p->handler = &pruss_handler;
208 p->priv = (void *)gdev;
210 ret = __uio_register_device(&__this_module, dev, p) { /* Function call is skipped due to function is undefined */}
211 assume(!(ret < 0));
189 cnt = cnt + 1;
189 p = p + 1;
190 ldv_33453:;
189 assume(cnt <= 7);
191 goto ldv_33452;
190 ldv_33452:;
190 ((p->mem)[0]).addr = regs_prussio->start;
191 -resource_size((const struct resource *)regs_prussio)
{
195 unsigned long long __CPAchecker_TMP_0 = (unsigned long long)(res->end);
195 unsigned long long __CPAchecker_TMP_1 = (unsigned long long)(res->start);
195 return (__CPAchecker_TMP_0 - __CPAchecker_TMP_1) + 1ULL;;
}
192 ((p->mem)[0]).memtype = 1;
194 ((p->mem)[1]).addr = gdev->sram_paddr;
195 ((p->mem)[1]).size = (resource_size_t )sram_pool_sz;
196 ((p->mem)[1]).memtype = 1;
198 ((p->mem)[2]).addr = gdev->ddr_paddr;
199 ((p->mem)[2]).size = (resource_size_t )extram_pool_sz;
200 ((p->mem)[2]).memtype = 1;
202 tmp___7 = kasprintf(37748928U, "pruss_evt%d", cnt) { /* Function call is skipped due to function is undefined */}
202 p->name = (const char *)tmp___7;
203 p->version = "1.0";
206 p->irq = (long)((gdev->hostirq_start) + ((unsigned int)cnt));
207 p->handler = &pruss_handler;
208 p->priv = (void *)gdev;
210 ret = __uio_register_device(&__this_module, dev, p) { /* Function call is skipped due to function is undefined */}
211 assume(!(ret < 0));
189 cnt = cnt + 1;
189 p = p + 1;
190 ldv_33453:;
189 assume(cnt <= 7);
191 goto ldv_33452;
190 ldv_33452:;
190 ((p->mem)[0]).addr = regs_prussio->start;
191 -resource_size((const struct resource *)regs_prussio)
{
195 unsigned long long __CPAchecker_TMP_0 = (unsigned long long)(res->end);
195 unsigned long long __CPAchecker_TMP_1 = (unsigned long long)(res->start);
195 return (__CPAchecker_TMP_0 - __CPAchecker_TMP_1) + 1ULL;;
}
192 ((p->mem)[0]).memtype = 1;
194 ((p->mem)[1]).addr = gdev->sram_paddr;
195 ((p->mem)[1]).size = (resource_size_t )sram_pool_sz;
196 ((p->mem)[1]).memtype = 1;
198 ((p->mem)[2]).addr = gdev->ddr_paddr;
199 ((p->mem)[2]).size = (resource_size_t )extram_pool_sz;
200 ((p->mem)[2]).memtype = 1;
202 tmp___7 = kasprintf(37748928U, "pruss_evt%d", cnt) { /* Function call is skipped due to function is undefined */}
202 p->name = (const char *)tmp___7;
203 p->version = "1.0";
206 p->irq = (long)((gdev->hostirq_start) + ((unsigned int)cnt));
207 p->handler = &pruss_handler;
208 p->priv = (void *)gdev;
210 ret = __uio_register_device(&__this_module, dev, p) { /* Function call is skipped due to function is undefined */}
211 assume(!(ret < 0));
189 cnt = cnt + 1;
189 p = p + 1;
190 ldv_33453:;
189 assume(!(cnt <= 7));
215 -platform_set_drvdata(pdev, (void *)gdev)
{
219 -dev_set_drvdata(&(pdev->dev), data)
{
922 dev->driver_data = data;
923 return ;;
}
220 return ;;
}
216 return 0;;
}
357 ldv_check_return_value(res_pruss_probe_2) { /* Function call is skipped due to function is undefined */}
358 ldv_check_return_value_probe(res_pruss_probe_2) { /* Function call is skipped due to function is undefined */}
359 assume(!(res_pruss_probe_2 != 0));
361 ldv_s_pruss_driver_platform_driver = ldv_s_pruss_driver_platform_driver + 1;
367 goto ldv_33501;
395 ldv_33501:;
396 ldv_33505:;
334 tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */}
334 assume(!(tmp___0 != 0));
334 assume(ldv_s_pruss_driver_platform_driver != 0);
337 goto ldv_33504;
335 ldv_33504:;
338 tmp = nondet_int() { /* Function call is skipped due to function is undefined */}
338 switch (tmp);
339 assume(!(tmp == 0));
368 assume(tmp == 1);
371 assume(ldv_s_pruss_driver_platform_driver == 1);
384 ldv_handler_precall() { /* Function call is skipped due to function is undefined */}
385 -pruss_remove(var_group1)
{
225 struct uio_pruss_dev *gdev;
226 void *tmp;
225 -platform_get_drvdata((const struct platform_device *)dev)
{
213 void *tmp;
213 -dev_get_drvdata(&(pdev->dev))
{
917 void *__CPAchecker_TMP_0 = (void *)(dev->driver_data);
917 return __CPAchecker_TMP_0;;
}
213 return tmp;;
}
225 gdev = (struct uio_pruss_dev *)tmp;
227 -pruss_cleanup(&(dev->dev), gdev)
{
98 int cnt;
99 struct uio_info *p;
99 p = gdev->info;
101 cnt = 0;
101 goto ldv_33438;
101 assume(cnt <= 7);
103 goto ldv_33437;
102 ldv_33437:;
102 uio_unregister_device(p) { /* Function call is skipped due to function is undefined */}
103 const void *__CPAchecker_TMP_0 = (const void *)(p->name);
103 kfree(__CPAchecker_TMP_0) { /* Function call is skipped due to function is undefined */}
101 cnt = cnt + 1;
101 p = p + 1;
102 ldv_33438:;
101 assume(cnt <= 7);
103 goto ldv_33437;
102 ldv_33437:;
102 uio_unregister_device(p) { /* Function call is skipped due to function is undefined */}
103 const void *__CPAchecker_TMP_0 = (const void *)(p->name);
103 kfree(__CPAchecker_TMP_0) { /* Function call is skipped due to function is undefined */}
101 cnt = cnt + 1;
101 p = p + 1;
102 ldv_33438:;
101 assume(cnt <= 7);
103 goto ldv_33437;
102 ldv_33437:;
102 uio_unregister_device(p) { /* Function call is skipped due to function is undefined */}
103 const void *__CPAchecker_TMP_0 = (const void *)(p->name);
103 kfree(__CPAchecker_TMP_0) { /* Function call is skipped due to function is undefined */}
101 cnt = cnt + 1;
101 p = p + 1;
102 ldv_33438:;
101 assume(cnt <= 7);
103 goto ldv_33437;
102 ldv_33437:;
102 uio_unregister_device(p) { /* Function call is skipped due to function is undefined */}
103 const void *__CPAchecker_TMP_0 = (const void *)(p->name);
103 kfree(__CPAchecker_TMP_0) { /* Function call is skipped due to function is undefined */}
101 cnt = cnt + 1;
101 p = p + 1;
102 ldv_33438:;
101 assume(cnt <= 7);
103 goto ldv_33437;
102 ldv_33437:;
102 uio_unregister_device(p) { /* Function call is skipped due to function is undefined */}
103 const void *__CPAchecker_TMP_0 = (const void *)(p->name);
103 kfree(__CPAchecker_TMP_0) { /* Function call is skipped due to function is undefined */}
101 cnt = cnt + 1;
101 p = p + 1;
102 ldv_33438:;
101 assume(cnt <= 7);
103 goto ldv_33437;
102 ldv_33437:;
102 uio_unregister_device(p) { /* Function call is skipped due to function is undefined */}
103 const void *__CPAchecker_TMP_0 = (const void *)(p->name);
103 kfree(__CPAchecker_TMP_0) { /* Function call is skipped due to function is undefined */}
101 cnt = cnt + 1;
101 p = p + 1;
102 ldv_33438:;
101 assume(cnt <= 7);
103 goto ldv_33437;
102 ldv_33437:;
102 uio_unregister_device(p) { /* Function call is skipped due to function is undefined */}
103 const void *__CPAchecker_TMP_0 = (const void *)(p->name);
103 kfree(__CPAchecker_TMP_0) { /* Function call is skipped due to function is undefined */}
101 cnt = cnt + 1;
101 p = p + 1;
102 ldv_33438:;
101 assume(cnt <= 7);
103 goto ldv_33437;
102 ldv_33437:;
102 uio_unregister_device(p) { /* Function call is skipped due to function is undefined */}
103 const void *__CPAchecker_TMP_0 = (const void *)(p->name);
103 kfree(__CPAchecker_TMP_0) { /* Function call is skipped due to function is undefined */}
101 cnt = cnt + 1;
101 p = p + 1;
102 ldv_33438:;
101 assume(!(cnt <= 7));
105 volatile void *__CPAchecker_TMP_1 = (volatile void *)(gdev->prussio_vaddr);
105 iounmap(__CPAchecker_TMP_1) { /* Function call is skipped due to function is undefined */}
106 unsigned long __CPAchecker_TMP_2 = (unsigned long)(gdev->ddr_vaddr);
106 assume(__CPAchecker_TMP_2 != ((unsigned long)((void *)0)));
107 -dma_free_coherent(dev, (size_t )extram_pool_sz, gdev->ddr_vaddr, gdev->ddr_paddr)
{
500 return ;;
}
110 assume((gdev->sram_vaddr) != 0UL);
111 gen_pool_free(gdev->sram_pool, gdev->sram_vaddr, (size_t )sram_pool_sz) { /* Function call is skipped due to function is undefined */}
114 const void *__CPAchecker_TMP_3 = (const void *)(gdev->info);
114 kfree(__CPAchecker_TMP_3) { /* Function call is skipped due to function is undefined */}
115 clk_put(gdev->pruss_clk) { /* Function call is skipped due to function is undefined */}
116 kfree((const void *)gdev) { /* Function call is skipped due to function is undefined */}
117 return ;;
}
228 return 0;;
}
386 ldv_s_pruss_driver_platform_driver = 0;
392 goto ldv_33501;
395 ldv_33501:;
396 ldv_33505:;
334 tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */}
334 assume(!(tmp___0 != 0));
334 assume(!(ldv_s_pruss_driver_platform_driver != 0));
342 ldv_module_exit:;
402 -ldv_check_final_state()
{
58 assume(!(ldv_counter_clk != 0));
60 assume(ldv_counter_pruss_clk_of_uio_pruss_dev != 0);
60 -ldv_error()
{
15 LDV_ERROR:;
}
}
}
Source code
1 #ifndef _ASM_X86_DMA_MAPPING_H 2 #define _ASM_X86_DMA_MAPPING_H 3 4 /* 5 * IOMMU interface. See Documentation/DMA-API-HOWTO.txt and 6 * Documentation/DMA-API.txt for documentation. 7 */ 8 9 #include <linux/kmemcheck.h> 10 #include <linux/scatterlist.h> 11 #include <linux/dma-debug.h> 12 #include <asm/io.h> 13 #include <asm/swiotlb.h> 14 #include <linux/dma-contiguous.h> 15 16 #ifdef CONFIG_ISA 17 # define ISA_DMA_BIT_MASK DMA_BIT_MASK(24) 18 #else 19 # define ISA_DMA_BIT_MASK DMA_BIT_MASK(32) 20 #endif 21 22 #define DMA_ERROR_CODE 0 23 24 extern int iommu_merge; 25 extern struct device x86_dma_fallback_dev; 26 extern int panic_on_overflow; 27 28 extern struct dma_map_ops *dma_ops; 29 30 static inline struct dma_map_ops *get_dma_ops(struct device *dev) 31 { 32 #ifndef CONFIG_X86_DEV_DMA_OPS 33 return dma_ops; 34 #else 35 if (unlikely(!dev) || !dev->archdata.dma_ops) 36 return dma_ops; 37 else 38 return dev->archdata.dma_ops; 39 #endif 40 } 41 42 bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp); 43 #define arch_dma_alloc_attrs arch_dma_alloc_attrs 44 45 #define HAVE_ARCH_DMA_SUPPORTED 1 46 extern int dma_supported(struct device *hwdev, u64 mask); 47 48 extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, 49 dma_addr_t *dma_addr, gfp_t flag, 50 unsigned long attrs); 51 52 extern void dma_generic_free_coherent(struct device *dev, size_t size, 53 void *vaddr, dma_addr_t dma_addr, 54 unsigned long attrs); 55 56 #ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */ 57 extern bool dma_capable(struct device *dev, dma_addr_t addr, size_t size); 58 extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr); 59 extern phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr); 60 #else 61 62 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) 63 { 64 if (!dev->dma_mask) 65 return 0; 66 67 return addr + size - 1 <= *dev->dma_mask; 68 } 69 70 static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) 71 { 72 return paddr; 73 } 74 75 static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) 76 { 77 return daddr; 78 } 79 #endif /* CONFIG_X86_DMA_REMAP */ 80 81 static inline void 82 dma_cache_sync(struct device *dev, void *vaddr, size_t size, 83 enum dma_data_direction dir) 84 { 85 flush_write_buffers(); 86 } 87 88 static inline unsigned long dma_alloc_coherent_mask(struct device *dev, 89 gfp_t gfp) 90 { 91 unsigned long dma_mask = 0; 92 93 dma_mask = dev->coherent_dma_mask; 94 if (!dma_mask) 95 dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32); 96 97 return dma_mask; 98 } 99 100 static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp) 101 { 102 unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp); 103 104 if (dma_mask <= DMA_BIT_MASK(24)) 105 gfp |= GFP_DMA; 106 #ifdef CONFIG_X86_64 107 if (dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA)) 108 gfp |= GFP_DMA32; 109 #endif 110 return gfp; 111 } 112 113 #endif
1 #ifndef _ASM_X86_IO_H 2 #define _ASM_X86_IO_H 3 4 /* 5 * This file contains the definitions for the x86 IO instructions 6 * inb/inw/inl/outb/outw/outl and the "string versions" of the same 7 * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing" 8 * versions of the single-IO instructions (inb_p/inw_p/..). 9 * 10 * This file is not meant to be obfuscating: it's just complicated 11 * to (a) handle it all in a way that makes gcc able to optimize it 12 * as well as possible and (b) trying to avoid writing the same thing 13 * over and over again with slight variations and possibly making a 14 * mistake somewhere. 15 */ 16 17 /* 18 * Thanks to James van Artsdalen for a better timing-fix than 19 * the two short jumps: using outb's to a nonexistent port seems 20 * to guarantee better timings even on fast machines. 21 * 22 * On the other hand, I'd like to be sure of a non-existent port: 23 * I feel a bit unsafe about using 0x80 (should be safe, though) 24 * 25 * Linus 26 */ 27 28 /* 29 * Bit simplified and optimized by Jan Hubicka 30 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999. 31 * 32 * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added, 33 * isa_read[wl] and isa_write[wl] fixed 34 * - Arnaldo Carvalho de Melo <acme@conectiva.com.br> 35 */ 36 37 #define ARCH_HAS_IOREMAP_WC 38 #define ARCH_HAS_IOREMAP_WT 39 40 #include <linux/string.h> 41 #include <linux/compiler.h> 42 #include <asm/page.h> 43 #include <asm/early_ioremap.h> 44 #include <asm/pgtable_types.h> 45 46 #define build_mmio_read(name, size, type, reg, barrier) \ 47 static inline type name(const volatile void __iomem *addr) \ 48 { type ret; asm volatile("mov" size " %1,%0":reg (ret) \ 49 :"m" (*(volatile type __force *)addr) barrier); return ret; } 50 51 #define build_mmio_write(name, size, type, reg, barrier) \ 52 static inline void name(type val, volatile void __iomem *addr) \ 53 { asm volatile("mov" size " %0,%1": :reg (val), \ 54 "m" (*(volatile type __force *)addr) barrier); } 55 56 build_mmio_read(readb, "b", unsigned char, "=q", :"memory") 57 build_mmio_read(readw, "w", unsigned short, "=r", :"memory") 58 build_mmio_read(readl, "l", unsigned int, "=r", :"memory") 59 60 build_mmio_read(__readb, "b", unsigned char, "=q", ) 61 build_mmio_read(__readw, "w", unsigned short, "=r", ) 62 build_mmio_read(__readl, "l", unsigned int, "=r", ) 63 64 build_mmio_write(writeb, "b", unsigned char, "q", :"memory") 65 build_mmio_write(writew, "w", unsigned short, "r", :"memory") 66 build_mmio_write(writel, "l", unsigned int, "r", :"memory") 67 68 build_mmio_write(__writeb, "b", unsigned char, "q", ) 69 build_mmio_write(__writew, "w", unsigned short, "r", ) 70 build_mmio_write(__writel, "l", unsigned int, "r", ) 71 72 #define readb_relaxed(a) __readb(a) 73 #define readw_relaxed(a) __readw(a) 74 #define readl_relaxed(a) __readl(a) 75 #define __raw_readb __readb 76 #define __raw_readw __readw 77 #define __raw_readl __readl 78 79 #define writeb_relaxed(v, a) __writeb(v, a) 80 #define writew_relaxed(v, a) __writew(v, a) 81 #define writel_relaxed(v, a) __writel(v, a) 82 #define __raw_writeb __writeb 83 #define __raw_writew __writew 84 #define __raw_writel __writel 85 86 #define mmiowb() barrier() 87 88 #ifdef CONFIG_X86_64 89 90 build_mmio_read(readq, "q", unsigned long, "=r", :"memory") 91 build_mmio_write(writeq, "q", unsigned long, "r", :"memory") 92 93 #define readq_relaxed(a) readq(a) 94 #define writeq_relaxed(v, a) writeq(v, a) 95 96 #define __raw_readq(a) readq(a) 97 #define __raw_writeq(val, addr) writeq(val, addr) 98 99 /* Let people know that we have them */ 100 #define readq readq 101 #define writeq writeq 102 103 #endif 104 105 /** 106 * virt_to_phys - map virtual addresses to physical 107 * @address: address to remap 108 * 109 * The returned physical address is the physical (CPU) mapping for 110 * the memory address given. It is only valid to use this function on 111 * addresses directly mapped or allocated via kmalloc. 112 * 113 * This function does not give bus mappings for DMA transfers. In 114 * almost all conceivable cases a device driver should not be using 115 * this function 116 */ 117 118 static inline phys_addr_t virt_to_phys(volatile void *address) 119 { 120 return __pa(address); 121 } 122 123 /** 124 * phys_to_virt - map physical address to virtual 125 * @address: address to remap 126 * 127 * The returned virtual address is a current CPU mapping for 128 * the memory address given. It is only valid to use this function on 129 * addresses that have a kernel mapping 130 * 131 * This function does not handle bus mappings for DMA transfers. In 132 * almost all conceivable cases a device driver should not be using 133 * this function 134 */ 135 136 static inline void *phys_to_virt(phys_addr_t address) 137 { 138 return __va(address); 139 } 140 141 /* 142 * Change "struct page" to physical address. 143 */ 144 #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) 145 146 /* 147 * ISA I/O bus memory addresses are 1:1 with the physical address. 148 * However, we truncate the address to unsigned int to avoid undesirable 149 * promitions in legacy drivers. 150 */ 151 static inline unsigned int isa_virt_to_bus(volatile void *address) 152 { 153 return (unsigned int)virt_to_phys(address); 154 } 155 #define isa_page_to_bus(page) ((unsigned int)page_to_phys(page)) 156 #define isa_bus_to_virt phys_to_virt 157 158 /* 159 * However PCI ones are not necessarily 1:1 and therefore these interfaces 160 * are forbidden in portable PCI drivers. 161 * 162 * Allow them on x86 for legacy drivers, though. 163 */ 164 #define virt_to_bus virt_to_phys 165 #define bus_to_virt phys_to_virt 166 167 /** 168 * ioremap - map bus memory into CPU space 169 * @offset: bus address of the memory 170 * @size: size of the resource to map 171 * 172 * ioremap performs a platform specific sequence of operations to 173 * make bus memory CPU accessible via the readb/readw/readl/writeb/ 174 * writew/writel functions and the other mmio helpers. The returned 175 * address is not guaranteed to be usable directly as a virtual 176 * address. 177 * 178 * If the area you are trying to map is a PCI BAR you should have a 179 * look at pci_iomap(). 180 */ 181 extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size); 182 extern void __iomem *ioremap_uc(resource_size_t offset, unsigned long size); 183 #define ioremap_uc ioremap_uc 184 185 extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size); 186 extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size, 187 unsigned long prot_val); 188 189 /* 190 * The default ioremap() behavior is non-cached: 191 */ 192 static inline void __iomem *ioremap(resource_size_t offset, unsigned long size) 193 { 194 return ioremap_nocache(offset, size); 195 } 196 197 extern void iounmap(volatile void __iomem *addr); 198 199 extern void set_iounmap_nonlazy(void); 200 201 #ifdef __KERNEL__ 202 203 #include <asm-generic/iomap.h> 204 205 /* 206 * Convert a virtual cached pointer to an uncached pointer 207 */ 208 #define xlate_dev_kmem_ptr(p) p 209 210 static inline void 211 memset_io(volatile void __iomem *addr, unsigned char val, size_t count) 212 { 213 memset((void __force *)addr, val, count); 214 } 215 216 static inline void 217 memcpy_fromio(void *dst, const volatile void __iomem *src, size_t count) 218 { 219 memcpy(dst, (const void __force *)src, count); 220 } 221 222 static inline void 223 memcpy_toio(volatile void __iomem *dst, const void *src, size_t count) 224 { 225 memcpy((void __force *)dst, src, count); 226 } 227 228 /* 229 * ISA space is 'always mapped' on a typical x86 system, no need to 230 * explicitly ioremap() it. The fact that the ISA IO space is mapped 231 * to PAGE_OFFSET is pure coincidence - it does not mean ISA values 232 * are physical addresses. The following constant pointer can be 233 * used as the IO-area pointer (it can be iounmapped as well, so the 234 * analogy with PCI is quite large): 235 */ 236 #define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET)) 237 238 /* 239 * Cache management 240 * 241 * This needed for two cases 242 * 1. Out of order aware processors 243 * 2. Accidentally out of order processors (PPro errata #51) 244 */ 245 246 static inline void flush_write_buffers(void) 247 { 248 #if defined(CONFIG_X86_PPRO_FENCE) 249 asm volatile("lock; addl $0,0(%%esp)": : :"memory"); 250 #endif 251 } 252 253 #endif /* __KERNEL__ */ 254 255 extern void native_io_delay(void); 256 257 extern int io_delay_type; 258 extern void io_delay_init(void); 259 260 #if defined(CONFIG_PARAVIRT) 261 #include <asm/paravirt.h> 262 #else 263 264 static inline void slow_down_io(void) 265 { 266 native_io_delay(); 267 #ifdef REALLY_SLOW_IO 268 native_io_delay(); 269 native_io_delay(); 270 native_io_delay(); 271 #endif 272 } 273 274 #endif 275 276 #define BUILDIO(bwl, bw, type) \ 277 static inline void out##bwl(unsigned type value, int port) \ 278 { \ 279 asm volatile("out" #bwl " %" #bw "0, %w1" \ 280 : : "a"(value), "Nd"(port)); \ 281 } \ 282 \ 283 static inline unsigned type in##bwl(int port) \ 284 { \ 285 unsigned type value; \ 286 asm volatile("in" #bwl " %w1, %" #bw "0" \ 287 : "=a"(value) : "Nd"(port)); \ 288 return value; \ 289 } \ 290 \ 291 static inline void out##bwl##_p(unsigned type value, int port) \ 292 { \ 293 out##bwl(value, port); \ 294 slow_down_io(); \ 295 } \ 296 \ 297 static inline unsigned type in##bwl##_p(int port) \ 298 { \ 299 unsigned type value = in##bwl(port); \ 300 slow_down_io(); \ 301 return value; \ 302 } \ 303 \ 304 static inline void outs##bwl(int port, const void *addr, unsigned long count) \ 305 { \ 306 asm volatile("rep; outs" #bwl \ 307 : "+S"(addr), "+c"(count) : "d"(port)); \ 308 } \ 309 \ 310 static inline void ins##bwl(int port, void *addr, unsigned long count) \ 311 { \ 312 asm volatile("rep; ins" #bwl \ 313 : "+D"(addr), "+c"(count) : "d"(port)); \ 314 } 315 316 BUILDIO(b, b, char) 317 BUILDIO(w, w, short) 318 BUILDIO(l, , int) 319 320 extern void *xlate_dev_mem_ptr(phys_addr_t phys); 321 extern void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr); 322 323 extern int ioremap_change_attr(unsigned long vaddr, unsigned long size, 324 enum page_cache_mode pcm); 325 extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size); 326 extern void __iomem *ioremap_wt(resource_size_t offset, unsigned long size); 327 328 extern bool is_early_ioremap_ptep(pte_t *ptep); 329 330 #ifdef CONFIG_XEN 331 #include <xen/xen.h> 332 struct bio_vec; 333 334 extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, 335 const struct bio_vec *vec2); 336 337 #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ 338 (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \ 339 (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2))) 340 #endif /* CONFIG_XEN */ 341 342 #define IO_SPACE_LIMIT 0xffff 343 344 #ifdef CONFIG_MTRR 345 extern int __must_check arch_phys_wc_index(int handle); 346 #define arch_phys_wc_index arch_phys_wc_index 347 348 extern int __must_check arch_phys_wc_add(unsigned long base, 349 unsigned long size); 350 extern void arch_phys_wc_del(int handle); 351 #define arch_phys_wc_add arch_phys_wc_add 352 #endif 353 354 #endif /* _ASM_X86_IO_H */
1 2 /* 3 * Programmable Real-Time Unit Sub System (PRUSS) UIO driver (uio_pruss) 4 * 5 * This driver exports PRUSS host event out interrupts and PRUSS, L3 RAM, 6 * and DDR RAM to user space for applications interacting with PRUSS firmware 7 * 8 * Copyright (C) 2010-11 Texas Instruments Incorporated - http://www.ti.com/ 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License as 12 * published by the Free Software Foundation version 2. 13 * 14 * This program is distributed "as is" WITHOUT ANY WARRANTY of any 15 * kind, whether express or implied; without even the implied warranty 16 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 */ 19 #include <linux/device.h> 20 #include <linux/module.h> 21 #include <linux/moduleparam.h> 22 #include <linux/platform_device.h> 23 #include <linux/uio_driver.h> 24 #include <linux/platform_data/uio_pruss.h> 25 #include <linux/io.h> 26 #include <linux/clk.h> 27 #include <linux/dma-mapping.h> 28 #include <linux/sizes.h> 29 #include <linux/slab.h> 30 #include <linux/genalloc.h> 31 32 #define DRV_NAME "pruss_uio" 33 #define DRV_VERSION "1.0" 34 35 static int sram_pool_sz = SZ_16K; 36 module_param(sram_pool_sz, int, 0); 37 MODULE_PARM_DESC(sram_pool_sz, "sram pool size to allocate "); 38 39 static int extram_pool_sz = SZ_256K; 40 module_param(extram_pool_sz, int, 0); 41 MODULE_PARM_DESC(extram_pool_sz, "external ram pool size to allocate"); 42 43 /* 44 * Host event IRQ numbers from PRUSS - PRUSS can generate up to 8 interrupt 45 * events to AINTC of ARM host processor - which can be used for IPC b/w PRUSS 46 * firmware and user space application, async notification from PRU firmware 47 * to user space application 48 * 3 PRU_EVTOUT0 49 * 4 PRU_EVTOUT1 50 * 5 PRU_EVTOUT2 51 * 6 PRU_EVTOUT3 52 * 7 PRU_EVTOUT4 53 * 8 PRU_EVTOUT5 54 * 9 PRU_EVTOUT6 55 * 10 PRU_EVTOUT7 56 */ 57 #define MAX_PRUSS_EVT 8 58 59 #define PINTC_HIDISR 0x0038 60 #define PINTC_HIPIR 0x0900 61 #define HIPIR_NOPEND 0x80000000 62 #define PINTC_HIER 0x1500 63 64 struct uio_pruss_dev { 65 struct uio_info *info; 66 struct clk *pruss_clk; 67 dma_addr_t sram_paddr; 68 dma_addr_t ddr_paddr; 69 void __iomem *prussio_vaddr; 70 unsigned long sram_vaddr; 71 void *ddr_vaddr; 72 unsigned int hostirq_start; 73 unsigned int pintc_base; 74 struct gen_pool *sram_pool; 75 }; 76 77 static irqreturn_t pruss_handler(int irq, struct uio_info *info) 78 { 79 struct uio_pruss_dev *gdev = info->priv; 80 int intr_bit = (irq - gdev->hostirq_start + 2); 81 int val, intr_mask = (1 << intr_bit); 82 void __iomem *base = gdev->prussio_vaddr + gdev->pintc_base; 83 void __iomem *intren_reg = base + PINTC_HIER; 84 void __iomem *intrdis_reg = base + PINTC_HIDISR; 85 void __iomem *intrstat_reg = base + PINTC_HIPIR + (intr_bit << 2); 86 87 val = ioread32(intren_reg); 88 /* Is interrupt enabled and active ? */ 89 if (!(val & intr_mask) && (ioread32(intrstat_reg) & HIPIR_NOPEND)) 90 return IRQ_NONE; 91 /* Disable interrupt */ 92 iowrite32(intr_bit, intrdis_reg); 93 return IRQ_HANDLED; 94 } 95 96 static void pruss_cleanup(struct device *dev, struct uio_pruss_dev *gdev) 97 { 98 int cnt; 99 struct uio_info *p = gdev->info; 100 101 for (cnt = 0; cnt < MAX_PRUSS_EVT; cnt++, p++) { 102 uio_unregister_device(p); 103 kfree(p->name); 104 } 105 iounmap(gdev->prussio_vaddr); 106 if (gdev->ddr_vaddr) { 107 dma_free_coherent(dev, extram_pool_sz, gdev->ddr_vaddr, 108 gdev->ddr_paddr); 109 } 110 if (gdev->sram_vaddr) 111 gen_pool_free(gdev->sram_pool, 112 gdev->sram_vaddr, 113 sram_pool_sz); 114 kfree(gdev->info); 115 clk_put(gdev->pruss_clk); 116 kfree(gdev); 117 } 118 119 static int pruss_probe(struct platform_device *pdev) 120 { 121 struct uio_info *p; 122 struct uio_pruss_dev *gdev; 123 struct resource *regs_prussio; 124 struct device *dev = &pdev->dev; 125 int ret = -ENODEV, cnt = 0, len; 126 struct uio_pruss_pdata *pdata = dev_get_platdata(dev); 127 128 gdev = kzalloc(sizeof(struct uio_pruss_dev), GFP_KERNEL); 129 if (!gdev) 130 return -ENOMEM; 131 132 gdev->info = kzalloc(sizeof(*p) * MAX_PRUSS_EVT, GFP_KERNEL); 133 if (!gdev->info) { 134 kfree(gdev); 135 return -ENOMEM; 136 } 137 138 /* Power on PRU in case its not done as part of boot-loader */ 139 gdev->pruss_clk = clk_get(dev, "pruss"); 140 if (IS_ERR(gdev->pruss_clk)) { 141 dev_err(dev, "Failed to get clock\n"); 142 ret = PTR_ERR(gdev->pruss_clk); 143 kfree(gdev->info); 144 kfree(gdev); 145 return ret; 146 } else { 147 clk_enable(gdev->pruss_clk); 148 } 149 150 regs_prussio = platform_get_resource(pdev, IORESOURCE_MEM, 0); 151 if (!regs_prussio) { 152 dev_err(dev, "No PRUSS I/O resource specified\n"); 153 goto out_free; 154 } 155 156 if (!regs_prussio->start) { 157 dev_err(dev, "Invalid memory resource\n"); 158 goto out_free; 159 } 160 161 if (pdata->sram_pool) { 162 gdev->sram_pool = pdata->sram_pool; 163 gdev->sram_vaddr = 164 (unsigned long)gen_pool_dma_alloc(gdev->sram_pool, 165 sram_pool_sz, &gdev->sram_paddr); 166 if (!gdev->sram_vaddr) { 167 dev_err(dev, "Could not allocate SRAM pool\n"); 168 goto out_free; 169 } 170 } 171 172 gdev->ddr_vaddr = dma_alloc_coherent(dev, extram_pool_sz, 173 &(gdev->ddr_paddr), GFP_KERNEL | GFP_DMA); 174 if (!gdev->ddr_vaddr) { 175 dev_err(dev, "Could not allocate external memory\n"); 176 goto out_free; 177 } 178 179 len = resource_size(regs_prussio); 180 gdev->prussio_vaddr = ioremap(regs_prussio->start, len); 181 if (!gdev->prussio_vaddr) { 182 dev_err(dev, "Can't remap PRUSS I/O address range\n"); 183 goto out_free; 184 } 185 186 gdev->pintc_base = pdata->pintc_base; 187 gdev->hostirq_start = platform_get_irq(pdev, 0); 188 189 for (cnt = 0, p = gdev->info; cnt < MAX_PRUSS_EVT; cnt++, p++) { 190 p->mem[0].addr = regs_prussio->start; 191 p->mem[0].size = resource_size(regs_prussio); 192 p->mem[0].memtype = UIO_MEM_PHYS; 193 194 p->mem[1].addr = gdev->sram_paddr; 195 p->mem[1].size = sram_pool_sz; 196 p->mem[1].memtype = UIO_MEM_PHYS; 197 198 p->mem[2].addr = gdev->ddr_paddr; 199 p->mem[2].size = extram_pool_sz; 200 p->mem[2].memtype = UIO_MEM_PHYS; 201 202 p->name = kasprintf(GFP_KERNEL, "pruss_evt%d", cnt); 203 p->version = DRV_VERSION; 204 205 /* Register PRUSS IRQ lines */ 206 p->irq = gdev->hostirq_start + cnt; 207 p->handler = pruss_handler; 208 p->priv = gdev; 209 210 ret = uio_register_device(dev, p); 211 if (ret < 0) 212 goto out_free; 213 } 214 215 platform_set_drvdata(pdev, gdev); 216 return 0; 217 218 out_free: 219 pruss_cleanup(dev, gdev); 220 return ret; 221 } 222 223 static int pruss_remove(struct platform_device *dev) 224 { 225 struct uio_pruss_dev *gdev = platform_get_drvdata(dev); 226 227 pruss_cleanup(&dev->dev, gdev); 228 return 0; 229 } 230 231 static struct platform_driver pruss_driver = { 232 .probe = pruss_probe, 233 .remove = pruss_remove, 234 .driver = { 235 .name = DRV_NAME, 236 }, 237 }; 238 239 module_platform_driver(pruss_driver); 240 241 MODULE_LICENSE("GPL v2"); 242 MODULE_VERSION(DRV_VERSION); 243 MODULE_AUTHOR("Amit Chatterjee <amit.chatterjee@ti.com>"); 244 MODULE_AUTHOR("Pratheesh Gangadhar <pratheesh@ti.com>"); 245 246 247 248 249 250 /* LDV_COMMENT_BEGIN_MAIN */ 251 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful 252 253 /*###########################################################################*/ 254 255 /*############## Driver Environment Generator 0.2 output ####################*/ 256 257 /*###########################################################################*/ 258 259 260 261 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */ 262 void ldv_check_final_state(void); 263 264 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */ 265 void ldv_check_return_value(int res); 266 267 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */ 268 void ldv_check_return_value_probe(int res); 269 270 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */ 271 void ldv_initialize(void); 272 273 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */ 274 void ldv_handler_precall(void); 275 276 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */ 277 int nondet_int(void); 278 279 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */ 280 int LDV_IN_INTERRUPT; 281 282 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */ 283 void ldv_main0_sequence_infinite_withcheck_stateful(void) { 284 285 286 287 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */ 288 /*============================= VARIABLE DECLARATION PART =============================*/ 289 /** STRUCT: struct type: platform_driver, struct name: pruss_driver **/ 290 /* content: static int pruss_probe(struct platform_device *pdev)*/ 291 /* LDV_COMMENT_BEGIN_PREP */ 292 #define DRV_NAME "pruss_uio" 293 #define DRV_VERSION "1.0" 294 #define MAX_PRUSS_EVT 8 295 #define PINTC_HIDISR 0x0038 296 #define PINTC_HIPIR 0x0900 297 #define HIPIR_NOPEND 0x80000000 298 #define PINTC_HIER 0x1500 299 /* LDV_COMMENT_END_PREP */ 300 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "pruss_probe" */ 301 struct platform_device * var_group1; 302 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "pruss_probe" */ 303 static int res_pruss_probe_2; 304 /* content: static int pruss_remove(struct platform_device *dev)*/ 305 /* LDV_COMMENT_BEGIN_PREP */ 306 #define DRV_NAME "pruss_uio" 307 #define DRV_VERSION "1.0" 308 #define MAX_PRUSS_EVT 8 309 #define PINTC_HIDISR 0x0038 310 #define PINTC_HIPIR 0x0900 311 #define HIPIR_NOPEND 0x80000000 312 #define PINTC_HIER 0x1500 313 /* LDV_COMMENT_END_PREP */ 314 315 316 317 318 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */ 319 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */ 320 /*============================= VARIABLE INITIALIZING PART =============================*/ 321 LDV_IN_INTERRUPT=1; 322 323 324 325 326 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */ 327 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */ 328 /*============================= FUNCTION CALL SECTION =============================*/ 329 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */ 330 ldv_initialize(); 331 int ldv_s_pruss_driver_platform_driver = 0; 332 333 334 while( nondet_int() 335 || !(ldv_s_pruss_driver_platform_driver == 0) 336 ) { 337 338 switch(nondet_int()) { 339 340 case 0: { 341 342 /** STRUCT: struct type: platform_driver, struct name: pruss_driver **/ 343 if(ldv_s_pruss_driver_platform_driver==0) { 344 345 /* content: static int pruss_probe(struct platform_device *pdev)*/ 346 /* LDV_COMMENT_BEGIN_PREP */ 347 #define DRV_NAME "pruss_uio" 348 #define DRV_VERSION "1.0" 349 #define MAX_PRUSS_EVT 8 350 #define PINTC_HIDISR 0x0038 351 #define PINTC_HIPIR 0x0900 352 #define HIPIR_NOPEND 0x80000000 353 #define PINTC_HIER 0x1500 354 /* LDV_COMMENT_END_PREP */ 355 /* LDV_COMMENT_FUNCTION_CALL Function from field "probe" from driver structure with callbacks "pruss_driver". Standart function test for correct return result. */ 356 res_pruss_probe_2 = pruss_probe( var_group1); 357 ldv_check_return_value(res_pruss_probe_2); 358 ldv_check_return_value_probe(res_pruss_probe_2); 359 if(res_pruss_probe_2) 360 goto ldv_module_exit; 361 ldv_s_pruss_driver_platform_driver++; 362 363 } 364 365 } 366 367 break; 368 case 1: { 369 370 /** STRUCT: struct type: platform_driver, struct name: pruss_driver **/ 371 if(ldv_s_pruss_driver_platform_driver==1) { 372 373 /* content: static int pruss_remove(struct platform_device *dev)*/ 374 /* LDV_COMMENT_BEGIN_PREP */ 375 #define DRV_NAME "pruss_uio" 376 #define DRV_VERSION "1.0" 377 #define MAX_PRUSS_EVT 8 378 #define PINTC_HIDISR 0x0038 379 #define PINTC_HIPIR 0x0900 380 #define HIPIR_NOPEND 0x80000000 381 #define PINTC_HIER 0x1500 382 /* LDV_COMMENT_END_PREP */ 383 /* LDV_COMMENT_FUNCTION_CALL Function from field "remove" from driver structure with callbacks "pruss_driver" */ 384 ldv_handler_precall(); 385 pruss_remove( var_group1); 386 ldv_s_pruss_driver_platform_driver=0; 387 388 } 389 390 } 391 392 break; 393 default: break; 394 395 } 396 397 } 398 399 ldv_module_exit: 400 401 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */ 402 ldv_final: ldv_check_final_state(); 403 404 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */ 405 return; 406 407 } 408 #endif 409 410 /* LDV_COMMENT_END_MAIN */
1 2 #include <linux/kernel.h> 3 bool ldv_is_err(const void *ptr); 4 bool ldv_is_err_or_null(const void *ptr); 5 void* ldv_err_ptr(long error); 6 long ldv_ptr_err(const void *ptr); 7 8 #include <linux/module.h> 9 struct clk; 10 11 extern void ldv_clk_disable_clk(struct clk *clk); 12 extern int ldv_clk_enable_clk(void); 13 extern void ldv_clk_disable_pruss_clk_of_uio_pruss_dev(struct clk *clk); 14 extern int ldv_clk_enable_pruss_clk_of_uio_pruss_dev(void); 15 #line 1 "/work/ldvuser/ref_launch/work/current--X--drivers--X--defaultlinux-4.9-rc1.tar.xz--X--320_7a--X--cpachecker/linux-4.9-rc1.tar.xz/csd_deg_dscv/14083/dscv_tempdir/dscv/ri/320_7a/drivers/uio/uio_pruss.c" 16 17 /* 18 * Programmable Real-Time Unit Sub System (PRUSS) UIO driver (uio_pruss) 19 * 20 * This driver exports PRUSS host event out interrupts and PRUSS, L3 RAM, 21 * and DDR RAM to user space for applications interacting with PRUSS firmware 22 * 23 * Copyright (C) 2010-11 Texas Instruments Incorporated - http://www.ti.com/ 24 * 25 * This program is free software; you can redistribute it and/or 26 * modify it under the terms of the GNU General Public License as 27 * published by the Free Software Foundation version 2. 28 * 29 * This program is distributed "as is" WITHOUT ANY WARRANTY of any 30 * kind, whether express or implied; without even the implied warranty 31 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 32 * GNU General Public License for more details. 33 */ 34 #include <linux/device.h> 35 #include <linux/module.h> 36 #include <linux/moduleparam.h> 37 #include <linux/platform_device.h> 38 #include <linux/uio_driver.h> 39 #include <linux/platform_data/uio_pruss.h> 40 #include <linux/io.h> 41 #include <linux/clk.h> 42 #include <linux/dma-mapping.h> 43 #include <linux/sizes.h> 44 #include <linux/slab.h> 45 #include <linux/genalloc.h> 46 47 #define DRV_NAME "pruss_uio" 48 #define DRV_VERSION "1.0" 49 50 static int sram_pool_sz = SZ_16K; 51 module_param(sram_pool_sz, int, 0); 52 MODULE_PARM_DESC(sram_pool_sz, "sram pool size to allocate "); 53 54 static int extram_pool_sz = SZ_256K; 55 module_param(extram_pool_sz, int, 0); 56 MODULE_PARM_DESC(extram_pool_sz, "external ram pool size to allocate"); 57 58 /* 59 * Host event IRQ numbers from PRUSS - PRUSS can generate up to 8 interrupt 60 * events to AINTC of ARM host processor - which can be used for IPC b/w PRUSS 61 * firmware and user space application, async notification from PRU firmware 62 * to user space application 63 * 3 PRU_EVTOUT0 64 * 4 PRU_EVTOUT1 65 * 5 PRU_EVTOUT2 66 * 6 PRU_EVTOUT3 67 * 7 PRU_EVTOUT4 68 * 8 PRU_EVTOUT5 69 * 9 PRU_EVTOUT6 70 * 10 PRU_EVTOUT7 71 */ 72 #define MAX_PRUSS_EVT 8 73 74 #define PINTC_HIDISR 0x0038 75 #define PINTC_HIPIR 0x0900 76 #define HIPIR_NOPEND 0x80000000 77 #define PINTC_HIER 0x1500 78 79 struct uio_pruss_dev { 80 struct uio_info *info; 81 struct clk *pruss_clk; 82 dma_addr_t sram_paddr; 83 dma_addr_t ddr_paddr; 84 void __iomem *prussio_vaddr; 85 unsigned long sram_vaddr; 86 void *ddr_vaddr; 87 unsigned int hostirq_start; 88 unsigned int pintc_base; 89 struct gen_pool *sram_pool; 90 }; 91 92 static irqreturn_t pruss_handler(int irq, struct uio_info *info) 93 { 94 struct uio_pruss_dev *gdev = info->priv; 95 int intr_bit = (irq - gdev->hostirq_start + 2); 96 int val, intr_mask = (1 << intr_bit); 97 void __iomem *base = gdev->prussio_vaddr + gdev->pintc_base; 98 void __iomem *intren_reg = base + PINTC_HIER; 99 void __iomem *intrdis_reg = base + PINTC_HIDISR; 100 void __iomem *intrstat_reg = base + PINTC_HIPIR + (intr_bit << 2); 101 102 val = ioread32(intren_reg); 103 /* Is interrupt enabled and active ? */ 104 if (!(val & intr_mask) && (ioread32(intrstat_reg) & HIPIR_NOPEND)) 105 return IRQ_NONE; 106 /* Disable interrupt */ 107 iowrite32(intr_bit, intrdis_reg); 108 return IRQ_HANDLED; 109 } 110 111 static void pruss_cleanup(struct device *dev, struct uio_pruss_dev *gdev) 112 { 113 int cnt; 114 struct uio_info *p = gdev->info; 115 116 for (cnt = 0; cnt < MAX_PRUSS_EVT; cnt++, p++) { 117 uio_unregister_device(p); 118 kfree(p->name); 119 } 120 iounmap(gdev->prussio_vaddr); 121 if (gdev->ddr_vaddr) { 122 dma_free_coherent(dev, extram_pool_sz, gdev->ddr_vaddr, 123 gdev->ddr_paddr); 124 } 125 if (gdev->sram_vaddr) 126 gen_pool_free(gdev->sram_pool, 127 gdev->sram_vaddr, 128 sram_pool_sz); 129 kfree(gdev->info); 130 clk_put(gdev->pruss_clk); 131 kfree(gdev); 132 } 133 134 static int pruss_probe(struct platform_device *pdev) 135 { 136 struct uio_info *p; 137 struct uio_pruss_dev *gdev; 138 struct resource *regs_prussio; 139 struct device *dev = &pdev->dev; 140 int ret = -ENODEV, cnt = 0, len; 141 struct uio_pruss_pdata *pdata = dev_get_platdata(dev); 142 143 gdev = kzalloc(sizeof(struct uio_pruss_dev), GFP_KERNEL); 144 if (!gdev) 145 return -ENOMEM; 146 147 gdev->info = kzalloc(sizeof(*p) * MAX_PRUSS_EVT, GFP_KERNEL); 148 if (!gdev->info) { 149 kfree(gdev); 150 return -ENOMEM; 151 } 152 153 /* Power on PRU in case its not done as part of boot-loader */ 154 gdev->pruss_clk = clk_get(dev, "pruss"); 155 if (IS_ERR(gdev->pruss_clk)) { 156 dev_err(dev, "Failed to get clock\n"); 157 ret = PTR_ERR(gdev->pruss_clk); 158 kfree(gdev->info); 159 kfree(gdev); 160 return ret; 161 } else { 162 clk_enable(gdev->pruss_clk); 163 } 164 165 regs_prussio = platform_get_resource(pdev, IORESOURCE_MEM, 0); 166 if (!regs_prussio) { 167 dev_err(dev, "No PRUSS I/O resource specified\n"); 168 goto out_free; 169 } 170 171 if (!regs_prussio->start) { 172 dev_err(dev, "Invalid memory resource\n"); 173 goto out_free; 174 } 175 176 if (pdata->sram_pool) { 177 gdev->sram_pool = pdata->sram_pool; 178 gdev->sram_vaddr = 179 (unsigned long)gen_pool_dma_alloc(gdev->sram_pool, 180 sram_pool_sz, &gdev->sram_paddr); 181 if (!gdev->sram_vaddr) { 182 dev_err(dev, "Could not allocate SRAM pool\n"); 183 goto out_free; 184 } 185 } 186 187 gdev->ddr_vaddr = dma_alloc_coherent(dev, extram_pool_sz, 188 &(gdev->ddr_paddr), GFP_KERNEL | GFP_DMA); 189 if (!gdev->ddr_vaddr) { 190 dev_err(dev, "Could not allocate external memory\n"); 191 goto out_free; 192 } 193 194 len = resource_size(regs_prussio); 195 gdev->prussio_vaddr = ioremap(regs_prussio->start, len); 196 if (!gdev->prussio_vaddr) { 197 dev_err(dev, "Can't remap PRUSS I/O address range\n"); 198 goto out_free; 199 } 200 201 gdev->pintc_base = pdata->pintc_base; 202 gdev->hostirq_start = platform_get_irq(pdev, 0); 203 204 for (cnt = 0, p = gdev->info; cnt < MAX_PRUSS_EVT; cnt++, p++) { 205 p->mem[0].addr = regs_prussio->start; 206 p->mem[0].size = resource_size(regs_prussio); 207 p->mem[0].memtype = UIO_MEM_PHYS; 208 209 p->mem[1].addr = gdev->sram_paddr; 210 p->mem[1].size = sram_pool_sz; 211 p->mem[1].memtype = UIO_MEM_PHYS; 212 213 p->mem[2].addr = gdev->ddr_paddr; 214 p->mem[2].size = extram_pool_sz; 215 p->mem[2].memtype = UIO_MEM_PHYS; 216 217 p->name = kasprintf(GFP_KERNEL, "pruss_evt%d", cnt); 218 p->version = DRV_VERSION; 219 220 /* Register PRUSS IRQ lines */ 221 p->irq = gdev->hostirq_start + cnt; 222 p->handler = pruss_handler; 223 p->priv = gdev; 224 225 ret = uio_register_device(dev, p); 226 if (ret < 0) 227 goto out_free; 228 } 229 230 platform_set_drvdata(pdev, gdev); 231 return 0; 232 233 out_free: 234 pruss_cleanup(dev, gdev); 235 return ret; 236 } 237 238 static int pruss_remove(struct platform_device *dev) 239 { 240 struct uio_pruss_dev *gdev = platform_get_drvdata(dev); 241 242 pruss_cleanup(&dev->dev, gdev); 243 return 0; 244 } 245 246 static struct platform_driver pruss_driver = { 247 .probe = pruss_probe, 248 .remove = pruss_remove, 249 .driver = { 250 .name = DRV_NAME, 251 }, 252 }; 253 254 module_platform_driver(pruss_driver); 255 256 MODULE_LICENSE("GPL v2"); 257 MODULE_VERSION(DRV_VERSION); 258 MODULE_AUTHOR("Amit Chatterjee <amit.chatterjee@ti.com>"); 259 MODULE_AUTHOR("Pratheesh Gangadhar <pratheesh@ti.com>"); 260 261 262 263 264 265 /* LDV_COMMENT_BEGIN_MAIN */ 266 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful 267 268 /*###########################################################################*/ 269 270 /*############## Driver Environment Generator 0.2 output ####################*/ 271 272 /*###########################################################################*/ 273 274 275 276 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */ 277 void ldv_check_final_state(void); 278 279 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */ 280 void ldv_check_return_value(int res); 281 282 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */ 283 void ldv_check_return_value_probe(int res); 284 285 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */ 286 void ldv_initialize(void); 287 288 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */ 289 void ldv_handler_precall(void); 290 291 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */ 292 int nondet_int(void); 293 294 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */ 295 int LDV_IN_INTERRUPT; 296 297 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */ 298 void ldv_main0_sequence_infinite_withcheck_stateful(void) { 299 300 301 302 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */ 303 /*============================= VARIABLE DECLARATION PART =============================*/ 304 /** STRUCT: struct type: platform_driver, struct name: pruss_driver **/ 305 /* content: static int pruss_probe(struct platform_device *pdev)*/ 306 /* LDV_COMMENT_BEGIN_PREP */ 307 #define DRV_NAME "pruss_uio" 308 #define DRV_VERSION "1.0" 309 #define MAX_PRUSS_EVT 8 310 #define PINTC_HIDISR 0x0038 311 #define PINTC_HIPIR 0x0900 312 #define HIPIR_NOPEND 0x80000000 313 #define PINTC_HIER 0x1500 314 /* LDV_COMMENT_END_PREP */ 315 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "pruss_probe" */ 316 struct platform_device * var_group1; 317 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "pruss_probe" */ 318 static int res_pruss_probe_2; 319 /* content: static int pruss_remove(struct platform_device *dev)*/ 320 /* LDV_COMMENT_BEGIN_PREP */ 321 #define DRV_NAME "pruss_uio" 322 #define DRV_VERSION "1.0" 323 #define MAX_PRUSS_EVT 8 324 #define PINTC_HIDISR 0x0038 325 #define PINTC_HIPIR 0x0900 326 #define HIPIR_NOPEND 0x80000000 327 #define PINTC_HIER 0x1500 328 /* LDV_COMMENT_END_PREP */ 329 330 331 332 333 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */ 334 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */ 335 /*============================= VARIABLE INITIALIZING PART =============================*/ 336 LDV_IN_INTERRUPT=1; 337 338 339 340 341 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */ 342 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */ 343 /*============================= FUNCTION CALL SECTION =============================*/ 344 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */ 345 ldv_initialize(); 346 int ldv_s_pruss_driver_platform_driver = 0; 347 348 349 while( nondet_int() 350 || !(ldv_s_pruss_driver_platform_driver == 0) 351 ) { 352 353 switch(nondet_int()) { 354 355 case 0: { 356 357 /** STRUCT: struct type: platform_driver, struct name: pruss_driver **/ 358 if(ldv_s_pruss_driver_platform_driver==0) { 359 360 /* content: static int pruss_probe(struct platform_device *pdev)*/ 361 /* LDV_COMMENT_BEGIN_PREP */ 362 #define DRV_NAME "pruss_uio" 363 #define DRV_VERSION "1.0" 364 #define MAX_PRUSS_EVT 8 365 #define PINTC_HIDISR 0x0038 366 #define PINTC_HIPIR 0x0900 367 #define HIPIR_NOPEND 0x80000000 368 #define PINTC_HIER 0x1500 369 /* LDV_COMMENT_END_PREP */ 370 /* LDV_COMMENT_FUNCTION_CALL Function from field "probe" from driver structure with callbacks "pruss_driver". Standart function test for correct return result. */ 371 res_pruss_probe_2 = pruss_probe( var_group1); 372 ldv_check_return_value(res_pruss_probe_2); 373 ldv_check_return_value_probe(res_pruss_probe_2); 374 if(res_pruss_probe_2) 375 goto ldv_module_exit; 376 ldv_s_pruss_driver_platform_driver++; 377 378 } 379 380 } 381 382 break; 383 case 1: { 384 385 /** STRUCT: struct type: platform_driver, struct name: pruss_driver **/ 386 if(ldv_s_pruss_driver_platform_driver==1) { 387 388 /* content: static int pruss_remove(struct platform_device *dev)*/ 389 /* LDV_COMMENT_BEGIN_PREP */ 390 #define DRV_NAME "pruss_uio" 391 #define DRV_VERSION "1.0" 392 #define MAX_PRUSS_EVT 8 393 #define PINTC_HIDISR 0x0038 394 #define PINTC_HIPIR 0x0900 395 #define HIPIR_NOPEND 0x80000000 396 #define PINTC_HIER 0x1500 397 /* LDV_COMMENT_END_PREP */ 398 /* LDV_COMMENT_FUNCTION_CALL Function from field "remove" from driver structure with callbacks "pruss_driver" */ 399 ldv_handler_precall(); 400 pruss_remove( var_group1); 401 ldv_s_pruss_driver_platform_driver=0; 402 403 } 404 405 } 406 407 break; 408 default: break; 409 410 } 411 412 } 413 414 ldv_module_exit: 415 416 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */ 417 ldv_final: ldv_check_final_state(); 418 419 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */ 420 return; 421 422 } 423 #endif 424 425 /* LDV_COMMENT_END_MAIN */ 426 427 #line 15 "/work/ldvuser/ref_launch/work/current--X--drivers--X--defaultlinux-4.9-rc1.tar.xz--X--320_7a--X--cpachecker/linux-4.9-rc1.tar.xz/csd_deg_dscv/14083/dscv_tempdir/dscv/ri/320_7a/drivers/uio/uio_pruss.o.c.prepared"
1 2 #include <verifier/rcv.h> 3 #include <kernel-model/ERR.inc> 4 5 struct clk; 6 7 8 /* LDV_COMMENT_CHANGE_STATE Initialize counter to zero. */ 9 int ldv_counter_clk = 0; 10 11 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_clk_disable_clk') Release. */ 12 void ldv_clk_disable_clk(struct clk *clk) 13 { 14 /* LDV_COMMENT_CHANGE_STATE Increase counter. */ 15 ldv_counter_clk = 0; 16 } 17 18 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_clk_enable_clk') Reset counter. */ 19 int ldv_clk_enable_clk(void) 20 { 21 int retval = ldv_undef_int(); 22 if (!retval) 23 { 24 /* LDV_COMMENT_CHANGE_STATE Increase counter. */ 25 ldv_counter_clk = 1; 26 } 27 return retval; 28 } 29 30 31 /* LDV_COMMENT_CHANGE_STATE Initialize counter to zero. */ 32 int ldv_counter_pruss_clk_of_uio_pruss_dev = 0; 33 34 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_clk_disable_pruss_clk_of_uio_pruss_dev') Release. */ 35 void ldv_clk_disable_pruss_clk_of_uio_pruss_dev(struct clk *clk) 36 { 37 /* LDV_COMMENT_CHANGE_STATE Increase counter. */ 38 ldv_counter_pruss_clk_of_uio_pruss_dev = 0; 39 } 40 41 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_clk_enable_pruss_clk_of_uio_pruss_dev') Reset counter. */ 42 int ldv_clk_enable_pruss_clk_of_uio_pruss_dev(void) 43 { 44 int retval = ldv_undef_int(); 45 if (!retval) 46 { 47 /* LDV_COMMENT_CHANGE_STATE Increase counter. */ 48 ldv_counter_pruss_clk_of_uio_pruss_dev = 1; 49 } 50 return retval; 51 } 52 53 54 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_check_final_state') Check that all clk are freed at the end */ 55 void ldv_check_final_state(void) 56 { 57 /* LDV_COMMENT_ASSERT Spin 'clk' must be unlocked at the end */ 58 ldv_assert(ldv_counter_clk == 0); 59 /* LDV_COMMENT_ASSERT Spin 'pruss_clk_of_uio_pruss_dev' must be unlocked at the end */ 60 ldv_assert(ldv_counter_pruss_clk_of_uio_pruss_dev == 0); 61 }
1 #ifndef _LDV_ERR_ 2 #define _LDV_ERR_ 3 4 #include <linux/kernel.h> 5 6 /* LDV_COMMENT_MODEL_FUNCTION_DEFENITION(name='ldv_is_err') This function return result of checking if pointer is impossible. */ 7 bool ldv_is_err(const void *ptr) 8 { 9 /*LDV_COMMENT_RETURN Return value of function ldv_is_err_val().*/ 10 return ((unsigned long)ptr > LDV_PTR_MAX); 11 } 12 13 /* LDV_COMMENT_MODEL_FUNCTION_DEFENITION(name='ldv_err_ptr') This function return pointer. */ 14 void* ldv_err_ptr(long error) 15 { 16 /*LDV_COMMENT_RETURN Return error pointer.*/ 17 return (void *)(LDV_PTR_MAX - error); 18 } 19 20 /* LDV_COMMENT_MODEL_FUNCTION_DEFENITION(name='ldv_ptr_err') This function return error if pointer is impossible. */ 21 long ldv_ptr_err(const void *ptr) 22 { 23 /*LDV_COMMENT_RETURN Return error code.*/ 24 return (long)(LDV_PTR_MAX - (unsigned long)ptr); 25 } 26 27 /* LDV_COMMENT_MODEL_FUNCTION_DEFENITION(name='ldv_is_err_or_null') This function check if pointer is impossible or null. */ 28 bool ldv_is_err_or_null(const void *ptr) 29 { 30 /*LDV_COMMENT_RETURN Return 0 if pointer is possible and not zero, and 1 in other cases*/ 31 return !ptr || ldv_is_err((unsigned long)ptr); 32 } 33 34 #endif /* _LDV_ERR_ */
1 #ifndef _LDV_RCV_H_ 2 #define _LDV_RCV_H_ 3 4 /* If expr evaluates to zero, ldv_assert() causes a program to reach the error 5 label like the standard assert(). */ 6 #define ldv_assert(expr) ((expr) ? 0 : ldv_error()) 7 8 /* The error label wrapper. It is used because of some static verifiers (like 9 BLAST) don't accept multiple error labels through a program. */ 10 static inline void ldv_error(void) 11 { 12 LDV_ERROR: goto LDV_ERROR; 13 } 14 15 /* If expr evaluates to zero, ldv_assume() causes an infinite loop that is 16 avoided by verifiers. */ 17 #define ldv_assume(expr) ((expr) ? 0 : ldv_stop()) 18 19 /* Infinite loop, that causes verifiers to skip such paths. */ 20 static inline void ldv_stop(void) { 21 LDV_STOP: goto LDV_STOP; 22 } 23 24 /* Special nondeterministic functions. */ 25 int ldv_undef_int(void); 26 void *ldv_undef_ptr(void); 27 unsigned long ldv_undef_ulong(void); 28 long ldv_undef_long(void); 29 /* Return nondeterministic negative integer number. */ 30 static inline int ldv_undef_int_negative(void) 31 { 32 int ret = ldv_undef_int(); 33 34 ldv_assume(ret < 0); 35 36 return ret; 37 } 38 /* Return nondeterministic nonpositive integer number. */ 39 static inline int ldv_undef_int_nonpositive(void) 40 { 41 int ret = ldv_undef_int(); 42 43 ldv_assume(ret <= 0); 44 45 return ret; 46 } 47 48 /* Add explicit model for __builin_expect GCC function. Without the model a 49 return value will be treated as nondetermined by verifiers. */ 50 static inline long __builtin_expect(long exp, long c) 51 { 52 return exp; 53 } 54 55 /* This function causes the program to exit abnormally. GCC implements this 56 function by using a target-dependent mechanism (such as intentionally executing 57 an illegal instruction) or by calling abort. The mechanism used may vary from 58 release to release so you should not rely on any particular implementation. 59 http://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html */ 60 static inline void __builtin_trap(void) 61 { 62 ldv_assert(0); 63 } 64 65 /* The constant is for simulating an error of ldv_undef_ptr() function. */ 66 #define LDV_PTR_MAX 2012 67 68 #endif /* _LDV_RCV_H_ */
1 /* 2 * device.h - generic, centralized driver model 3 * 4 * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org> 5 * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de> 6 * Copyright (c) 2008-2009 Novell Inc. 7 * 8 * This file is released under the GPLv2 9 * 10 * See Documentation/driver-model/ for more information. 11 */ 12 13 #ifndef _DEVICE_H_ 14 #define _DEVICE_H_ 15 16 #include <linux/ioport.h> 17 #include <linux/kobject.h> 18 #include <linux/klist.h> 19 #include <linux/list.h> 20 #include <linux/lockdep.h> 21 #include <linux/compiler.h> 22 #include <linux/types.h> 23 #include <linux/mutex.h> 24 #include <linux/pinctrl/devinfo.h> 25 #include <linux/pm.h> 26 #include <linux/atomic.h> 27 #include <linux/ratelimit.h> 28 #include <linux/uidgid.h> 29 #include <linux/gfp.h> 30 #include <asm/device.h> 31 32 struct device; 33 struct device_private; 34 struct device_driver; 35 struct driver_private; 36 struct module; 37 struct class; 38 struct subsys_private; 39 struct bus_type; 40 struct device_node; 41 struct fwnode_handle; 42 struct iommu_ops; 43 struct iommu_group; 44 struct iommu_fwspec; 45 46 struct bus_attribute { 47 struct attribute attr; 48 ssize_t (*show)(struct bus_type *bus, char *buf); 49 ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count); 50 }; 51 52 #define BUS_ATTR(_name, _mode, _show, _store) \ 53 struct bus_attribute bus_attr_##_name = __ATTR(_name, _mode, _show, _store) 54 #define BUS_ATTR_RW(_name) \ 55 struct bus_attribute bus_attr_##_name = __ATTR_RW(_name) 56 #define BUS_ATTR_RO(_name) \ 57 struct bus_attribute bus_attr_##_name = __ATTR_RO(_name) 58 59 extern int __must_check bus_create_file(struct bus_type *, 60 struct bus_attribute *); 61 extern void bus_remove_file(struct bus_type *, struct bus_attribute *); 62 63 /** 64 * struct bus_type - The bus type of the device 65 * 66 * @name: The name of the bus. 67 * @dev_name: Used for subsystems to enumerate devices like ("foo%u", dev->id). 68 * @dev_root: Default device to use as the parent. 69 * @dev_attrs: Default attributes of the devices on the bus. 70 * @bus_groups: Default attributes of the bus. 71 * @dev_groups: Default attributes of the devices on the bus. 72 * @drv_groups: Default attributes of the device drivers on the bus. 73 * @match: Called, perhaps multiple times, whenever a new device or driver 74 * is added for this bus. It should return a positive value if the 75 * given device can be handled by the given driver and zero 76 * otherwise. It may also return error code if determining that 77 * the driver supports the device is not possible. In case of 78 * -EPROBE_DEFER it will queue the device for deferred probing. 79 * @uevent: Called when a device is added, removed, or a few other things 80 * that generate uevents to add the environment variables. 81 * @probe: Called when a new device or driver add to this bus, and callback 82 * the specific driver's probe to initial the matched device. 83 * @remove: Called when a device removed from this bus. 84 * @shutdown: Called at shut-down time to quiesce the device. 85 * 86 * @online: Called to put the device back online (after offlining it). 87 * @offline: Called to put the device offline for hot-removal. May fail. 88 * 89 * @suspend: Called when a device on this bus wants to go to sleep mode. 90 * @resume: Called to bring a device on this bus out of sleep mode. 91 * @pm: Power management operations of this bus, callback the specific 92 * device driver's pm-ops. 93 * @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU 94 * driver implementations to a bus and allow the driver to do 95 * bus-specific setup 96 * @p: The private data of the driver core, only the driver core can 97 * touch this. 98 * @lock_key: Lock class key for use by the lock validator 99 * 100 * A bus is a channel between the processor and one or more devices. For the 101 * purposes of the device model, all devices are connected via a bus, even if 102 * it is an internal, virtual, "platform" bus. Buses can plug into each other. 103 * A USB controller is usually a PCI device, for example. The device model 104 * represents the actual connections between buses and the devices they control. 105 * A bus is represented by the bus_type structure. It contains the name, the 106 * default attributes, the bus' methods, PM operations, and the driver core's 107 * private data. 108 */ 109 struct bus_type { 110 const char *name; 111 const char *dev_name; 112 struct device *dev_root; 113 struct device_attribute *dev_attrs; /* use dev_groups instead */ 114 const struct attribute_group **bus_groups; 115 const struct attribute_group **dev_groups; 116 const struct attribute_group **drv_groups; 117 118 int (*match)(struct device *dev, struct device_driver *drv); 119 int (*uevent)(struct device *dev, struct kobj_uevent_env *env); 120 int (*probe)(struct device *dev); 121 int (*remove)(struct device *dev); 122 void (*shutdown)(struct device *dev); 123 124 int (*online)(struct device *dev); 125 int (*offline)(struct device *dev); 126 127 int (*suspend)(struct device *dev, pm_message_t state); 128 int (*resume)(struct device *dev); 129 130 const struct dev_pm_ops *pm; 131 132 const struct iommu_ops *iommu_ops; 133 134 struct subsys_private *p; 135 struct lock_class_key lock_key; 136 }; 137 138 extern int __must_check bus_register(struct bus_type *bus); 139 140 extern void bus_unregister(struct bus_type *bus); 141 142 extern int __must_check bus_rescan_devices(struct bus_type *bus); 143 144 /* iterator helpers for buses */ 145 struct subsys_dev_iter { 146 struct klist_iter ki; 147 const struct device_type *type; 148 }; 149 void subsys_dev_iter_init(struct subsys_dev_iter *iter, 150 struct bus_type *subsys, 151 struct device *start, 152 const struct device_type *type); 153 struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter); 154 void subsys_dev_iter_exit(struct subsys_dev_iter *iter); 155 156 int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data, 157 int (*fn)(struct device *dev, void *data)); 158 struct device *bus_find_device(struct bus_type *bus, struct device *start, 159 void *data, 160 int (*match)(struct device *dev, void *data)); 161 struct device *bus_find_device_by_name(struct bus_type *bus, 162 struct device *start, 163 const char *name); 164 struct device *subsys_find_device_by_id(struct bus_type *bus, unsigned int id, 165 struct device *hint); 166 int bus_for_each_drv(struct bus_type *bus, struct device_driver *start, 167 void *data, int (*fn)(struct device_driver *, void *)); 168 void bus_sort_breadthfirst(struct bus_type *bus, 169 int (*compare)(const struct device *a, 170 const struct device *b)); 171 /* 172 * Bus notifiers: Get notified of addition/removal of devices 173 * and binding/unbinding of drivers to devices. 174 * In the long run, it should be a replacement for the platform 175 * notify hooks. 176 */ 177 struct notifier_block; 178 179 extern int bus_register_notifier(struct bus_type *bus, 180 struct notifier_block *nb); 181 extern int bus_unregister_notifier(struct bus_type *bus, 182 struct notifier_block *nb); 183 184 /* All 4 notifers below get called with the target struct device * 185 * as an argument. Note that those functions are likely to be called 186 * with the device lock held in the core, so be careful. 187 */ 188 #define BUS_NOTIFY_ADD_DEVICE 0x00000001 /* device added */ 189 #define BUS_NOTIFY_DEL_DEVICE 0x00000002 /* device to be removed */ 190 #define BUS_NOTIFY_REMOVED_DEVICE 0x00000003 /* device removed */ 191 #define BUS_NOTIFY_BIND_DRIVER 0x00000004 /* driver about to be 192 bound */ 193 #define BUS_NOTIFY_BOUND_DRIVER 0x00000005 /* driver bound to device */ 194 #define BUS_NOTIFY_UNBIND_DRIVER 0x00000006 /* driver about to be 195 unbound */ 196 #define BUS_NOTIFY_UNBOUND_DRIVER 0x00000007 /* driver is unbound 197 from the device */ 198 #define BUS_NOTIFY_DRIVER_NOT_BOUND 0x00000008 /* driver fails to be bound */ 199 200 extern struct kset *bus_get_kset(struct bus_type *bus); 201 extern struct klist *bus_get_device_klist(struct bus_type *bus); 202 203 /** 204 * enum probe_type - device driver probe type to try 205 * Device drivers may opt in for special handling of their 206 * respective probe routines. This tells the core what to 207 * expect and prefer. 208 * 209 * @PROBE_DEFAULT_STRATEGY: Used by drivers that work equally well 210 * whether probed synchronously or asynchronously. 211 * @PROBE_PREFER_ASYNCHRONOUS: Drivers for "slow" devices which 212 * probing order is not essential for booting the system may 213 * opt into executing their probes asynchronously. 214 * @PROBE_FORCE_SYNCHRONOUS: Use this to annotate drivers that need 215 * their probe routines to run synchronously with driver and 216 * device registration (with the exception of -EPROBE_DEFER 217 * handling - re-probing always ends up being done asynchronously). 218 * 219 * Note that the end goal is to switch the kernel to use asynchronous 220 * probing by default, so annotating drivers with 221 * %PROBE_PREFER_ASYNCHRONOUS is a temporary measure that allows us 222 * to speed up boot process while we are validating the rest of the 223 * drivers. 224 */ 225 enum probe_type { 226 PROBE_DEFAULT_STRATEGY, 227 PROBE_PREFER_ASYNCHRONOUS, 228 PROBE_FORCE_SYNCHRONOUS, 229 }; 230 231 /** 232 * struct device_driver - The basic device driver structure 233 * @name: Name of the device driver. 234 * @bus: The bus which the device of this driver belongs to. 235 * @owner: The module owner. 236 * @mod_name: Used for built-in modules. 237 * @suppress_bind_attrs: Disables bind/unbind via sysfs. 238 * @probe_type: Type of the probe (synchronous or asynchronous) to use. 239 * @of_match_table: The open firmware table. 240 * @acpi_match_table: The ACPI match table. 241 * @probe: Called to query the existence of a specific device, 242 * whether this driver can work with it, and bind the driver 243 * to a specific device. 244 * @remove: Called when the device is removed from the system to 245 * unbind a device from this driver. 246 * @shutdown: Called at shut-down time to quiesce the device. 247 * @suspend: Called to put the device to sleep mode. Usually to a 248 * low power state. 249 * @resume: Called to bring a device from sleep mode. 250 * @groups: Default attributes that get created by the driver core 251 * automatically. 252 * @pm: Power management operations of the device which matched 253 * this driver. 254 * @p: Driver core's private data, no one other than the driver 255 * core can touch this. 256 * 257 * The device driver-model tracks all of the drivers known to the system. 258 * The main reason for this tracking is to enable the driver core to match 259 * up drivers with new devices. Once drivers are known objects within the 260 * system, however, a number of other things become possible. Device drivers 261 * can export information and configuration variables that are independent 262 * of any specific device. 263 */ 264 struct device_driver { 265 const char *name; 266 struct bus_type *bus; 267 268 struct module *owner; 269 const char *mod_name; /* used for built-in modules */ 270 271 bool suppress_bind_attrs; /* disables bind/unbind via sysfs */ 272 enum probe_type probe_type; 273 274 const struct of_device_id *of_match_table; 275 const struct acpi_device_id *acpi_match_table; 276 277 int (*probe) (struct device *dev); 278 int (*remove) (struct device *dev); 279 void (*shutdown) (struct device *dev); 280 int (*suspend) (struct device *dev, pm_message_t state); 281 int (*resume) (struct device *dev); 282 const struct attribute_group **groups; 283 284 const struct dev_pm_ops *pm; 285 286 struct driver_private *p; 287 }; 288 289 290 extern int __must_check driver_register(struct device_driver *drv); 291 extern void driver_unregister(struct device_driver *drv); 292 293 extern struct device_driver *driver_find(const char *name, 294 struct bus_type *bus); 295 extern int driver_probe_done(void); 296 extern void wait_for_device_probe(void); 297 298 299 /* sysfs interface for exporting driver attributes */ 300 301 struct driver_attribute { 302 struct attribute attr; 303 ssize_t (*show)(struct device_driver *driver, char *buf); 304 ssize_t (*store)(struct device_driver *driver, const char *buf, 305 size_t count); 306 }; 307 308 #define DRIVER_ATTR(_name, _mode, _show, _store) \ 309 struct driver_attribute driver_attr_##_name = __ATTR(_name, _mode, _show, _store) 310 #define DRIVER_ATTR_RW(_name) \ 311 struct driver_attribute driver_attr_##_name = __ATTR_RW(_name) 312 #define DRIVER_ATTR_RO(_name) \ 313 struct driver_attribute driver_attr_##_name = __ATTR_RO(_name) 314 #define DRIVER_ATTR_WO(_name) \ 315 struct driver_attribute driver_attr_##_name = __ATTR_WO(_name) 316 317 extern int __must_check driver_create_file(struct device_driver *driver, 318 const struct driver_attribute *attr); 319 extern void driver_remove_file(struct device_driver *driver, 320 const struct driver_attribute *attr); 321 322 extern int __must_check driver_for_each_device(struct device_driver *drv, 323 struct device *start, 324 void *data, 325 int (*fn)(struct device *dev, 326 void *)); 327 struct device *driver_find_device(struct device_driver *drv, 328 struct device *start, void *data, 329 int (*match)(struct device *dev, void *data)); 330 331 /** 332 * struct subsys_interface - interfaces to device functions 333 * @name: name of the device function 334 * @subsys: subsytem of the devices to attach to 335 * @node: the list of functions registered at the subsystem 336 * @add_dev: device hookup to device function handler 337 * @remove_dev: device hookup to device function handler 338 * 339 * Simple interfaces attached to a subsystem. Multiple interfaces can 340 * attach to a subsystem and its devices. Unlike drivers, they do not 341 * exclusively claim or control devices. Interfaces usually represent 342 * a specific functionality of a subsystem/class of devices. 343 */ 344 struct subsys_interface { 345 const char *name; 346 struct bus_type *subsys; 347 struct list_head node; 348 int (*add_dev)(struct device *dev, struct subsys_interface *sif); 349 void (*remove_dev)(struct device *dev, struct subsys_interface *sif); 350 }; 351 352 int subsys_interface_register(struct subsys_interface *sif); 353 void subsys_interface_unregister(struct subsys_interface *sif); 354 355 int subsys_system_register(struct bus_type *subsys, 356 const struct attribute_group **groups); 357 int subsys_virtual_register(struct bus_type *subsys, 358 const struct attribute_group **groups); 359 360 /** 361 * struct class - device classes 362 * @name: Name of the class. 363 * @owner: The module owner. 364 * @class_attrs: Default attributes of this class. 365 * @dev_groups: Default attributes of the devices that belong to the class. 366 * @dev_kobj: The kobject that represents this class and links it into the hierarchy. 367 * @dev_uevent: Called when a device is added, removed from this class, or a 368 * few other things that generate uevents to add the environment 369 * variables. 370 * @devnode: Callback to provide the devtmpfs. 371 * @class_release: Called to release this class. 372 * @dev_release: Called to release the device. 373 * @suspend: Used to put the device to sleep mode, usually to a low power 374 * state. 375 * @resume: Used to bring the device from the sleep mode. 376 * @ns_type: Callbacks so sysfs can detemine namespaces. 377 * @namespace: Namespace of the device belongs to this class. 378 * @pm: The default device power management operations of this class. 379 * @p: The private data of the driver core, no one other than the 380 * driver core can touch this. 381 * 382 * A class is a higher-level view of a device that abstracts out low-level 383 * implementation details. Drivers may see a SCSI disk or an ATA disk, but, 384 * at the class level, they are all simply disks. Classes allow user space 385 * to work with devices based on what they do, rather than how they are 386 * connected or how they work. 387 */ 388 struct class { 389 const char *name; 390 struct module *owner; 391 392 struct class_attribute *class_attrs; 393 const struct attribute_group **dev_groups; 394 struct kobject *dev_kobj; 395 396 int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env); 397 char *(*devnode)(struct device *dev, umode_t *mode); 398 399 void (*class_release)(struct class *class); 400 void (*dev_release)(struct device *dev); 401 402 int (*suspend)(struct device *dev, pm_message_t state); 403 int (*resume)(struct device *dev); 404 405 const struct kobj_ns_type_operations *ns_type; 406 const void *(*namespace)(struct device *dev); 407 408 const struct dev_pm_ops *pm; 409 410 struct subsys_private *p; 411 }; 412 413 struct class_dev_iter { 414 struct klist_iter ki; 415 const struct device_type *type; 416 }; 417 418 extern struct kobject *sysfs_dev_block_kobj; 419 extern struct kobject *sysfs_dev_char_kobj; 420 extern int __must_check __class_register(struct class *class, 421 struct lock_class_key *key); 422 extern void class_unregister(struct class *class); 423 424 /* This is a #define to keep the compiler from merging different 425 * instances of the __key variable */ 426 #define class_register(class) \ 427 ({ \ 428 static struct lock_class_key __key; \ 429 __class_register(class, &__key); \ 430 }) 431 432 struct class_compat; 433 struct class_compat *class_compat_register(const char *name); 434 void class_compat_unregister(struct class_compat *cls); 435 int class_compat_create_link(struct class_compat *cls, struct device *dev, 436 struct device *device_link); 437 void class_compat_remove_link(struct class_compat *cls, struct device *dev, 438 struct device *device_link); 439 440 extern void class_dev_iter_init(struct class_dev_iter *iter, 441 struct class *class, 442 struct device *start, 443 const struct device_type *type); 444 extern struct device *class_dev_iter_next(struct class_dev_iter *iter); 445 extern void class_dev_iter_exit(struct class_dev_iter *iter); 446 447 extern int class_for_each_device(struct class *class, struct device *start, 448 void *data, 449 int (*fn)(struct device *dev, void *data)); 450 extern struct device *class_find_device(struct class *class, 451 struct device *start, const void *data, 452 int (*match)(struct device *, const void *)); 453 454 struct class_attribute { 455 struct attribute attr; 456 ssize_t (*show)(struct class *class, struct class_attribute *attr, 457 char *buf); 458 ssize_t (*store)(struct class *class, struct class_attribute *attr, 459 const char *buf, size_t count); 460 }; 461 462 #define CLASS_ATTR(_name, _mode, _show, _store) \ 463 struct class_attribute class_attr_##_name = __ATTR(_name, _mode, _show, _store) 464 #define CLASS_ATTR_RW(_name) \ 465 struct class_attribute class_attr_##_name = __ATTR_RW(_name) 466 #define CLASS_ATTR_RO(_name) \ 467 struct class_attribute class_attr_##_name = __ATTR_RO(_name) 468 469 extern int __must_check class_create_file_ns(struct class *class, 470 const struct class_attribute *attr, 471 const void *ns); 472 extern void class_remove_file_ns(struct class *class, 473 const struct class_attribute *attr, 474 const void *ns); 475 476 static inline int __must_check class_create_file(struct class *class, 477 const struct class_attribute *attr) 478 { 479 return class_create_file_ns(class, attr, NULL); 480 } 481 482 static inline void class_remove_file(struct class *class, 483 const struct class_attribute *attr) 484 { 485 return class_remove_file_ns(class, attr, NULL); 486 } 487 488 /* Simple class attribute that is just a static string */ 489 struct class_attribute_string { 490 struct class_attribute attr; 491 char *str; 492 }; 493 494 /* Currently read-only only */ 495 #define _CLASS_ATTR_STRING(_name, _mode, _str) \ 496 { __ATTR(_name, _mode, show_class_attr_string, NULL), _str } 497 #define CLASS_ATTR_STRING(_name, _mode, _str) \ 498 struct class_attribute_string class_attr_##_name = \ 499 _CLASS_ATTR_STRING(_name, _mode, _str) 500 501 extern ssize_t show_class_attr_string(struct class *class, struct class_attribute *attr, 502 char *buf); 503 504 struct class_interface { 505 struct list_head node; 506 struct class *class; 507 508 int (*add_dev) (struct device *, struct class_interface *); 509 void (*remove_dev) (struct device *, struct class_interface *); 510 }; 511 512 extern int __must_check class_interface_register(struct class_interface *); 513 extern void class_interface_unregister(struct class_interface *); 514 515 extern struct class * __must_check __class_create(struct module *owner, 516 const char *name, 517 struct lock_class_key *key); 518 extern void class_destroy(struct class *cls); 519 520 /* This is a #define to keep the compiler from merging different 521 * instances of the __key variable */ 522 #define class_create(owner, name) \ 523 ({ \ 524 static struct lock_class_key __key; \ 525 __class_create(owner, name, &__key); \ 526 }) 527 528 /* 529 * The type of device, "struct device" is embedded in. A class 530 * or bus can contain devices of different types 531 * like "partitions" and "disks", "mouse" and "event". 532 * This identifies the device type and carries type-specific 533 * information, equivalent to the kobj_type of a kobject. 534 * If "name" is specified, the uevent will contain it in 535 * the DEVTYPE variable. 536 */ 537 struct device_type { 538 const char *name; 539 const struct attribute_group **groups; 540 int (*uevent)(struct device *dev, struct kobj_uevent_env *env); 541 char *(*devnode)(struct device *dev, umode_t *mode, 542 kuid_t *uid, kgid_t *gid); 543 void (*release)(struct device *dev); 544 545 const struct dev_pm_ops *pm; 546 }; 547 548 /* interface for exporting device attributes */ 549 struct device_attribute { 550 struct attribute attr; 551 ssize_t (*show)(struct device *dev, struct device_attribute *attr, 552 char *buf); 553 ssize_t (*store)(struct device *dev, struct device_attribute *attr, 554 const char *buf, size_t count); 555 }; 556 557 struct dev_ext_attribute { 558 struct device_attribute attr; 559 void *var; 560 }; 561 562 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr, 563 char *buf); 564 ssize_t device_store_ulong(struct device *dev, struct device_attribute *attr, 565 const char *buf, size_t count); 566 ssize_t device_show_int(struct device *dev, struct device_attribute *attr, 567 char *buf); 568 ssize_t device_store_int(struct device *dev, struct device_attribute *attr, 569 const char *buf, size_t count); 570 ssize_t device_show_bool(struct device *dev, struct device_attribute *attr, 571 char *buf); 572 ssize_t device_store_bool(struct device *dev, struct device_attribute *attr, 573 const char *buf, size_t count); 574 575 #define DEVICE_ATTR(_name, _mode, _show, _store) \ 576 struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store) 577 #define DEVICE_ATTR_RW(_name) \ 578 struct device_attribute dev_attr_##_name = __ATTR_RW(_name) 579 #define DEVICE_ATTR_RO(_name) \ 580 struct device_attribute dev_attr_##_name = __ATTR_RO(_name) 581 #define DEVICE_ATTR_WO(_name) \ 582 struct device_attribute dev_attr_##_name = __ATTR_WO(_name) 583 #define DEVICE_ULONG_ATTR(_name, _mode, _var) \ 584 struct dev_ext_attribute dev_attr_##_name = \ 585 { __ATTR(_name, _mode, device_show_ulong, device_store_ulong), &(_var) } 586 #define DEVICE_INT_ATTR(_name, _mode, _var) \ 587 struct dev_ext_attribute dev_attr_##_name = \ 588 { __ATTR(_name, _mode, device_show_int, device_store_int), &(_var) } 589 #define DEVICE_BOOL_ATTR(_name, _mode, _var) \ 590 struct dev_ext_attribute dev_attr_##_name = \ 591 { __ATTR(_name, _mode, device_show_bool, device_store_bool), &(_var) } 592 #define DEVICE_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \ 593 struct device_attribute dev_attr_##_name = \ 594 __ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) 595 596 extern int device_create_file(struct device *device, 597 const struct device_attribute *entry); 598 extern void device_remove_file(struct device *dev, 599 const struct device_attribute *attr); 600 extern bool device_remove_file_self(struct device *dev, 601 const struct device_attribute *attr); 602 extern int __must_check device_create_bin_file(struct device *dev, 603 const struct bin_attribute *attr); 604 extern void device_remove_bin_file(struct device *dev, 605 const struct bin_attribute *attr); 606 607 /* device resource management */ 608 typedef void (*dr_release_t)(struct device *dev, void *res); 609 typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data); 610 611 #ifdef CONFIG_DEBUG_DEVRES 612 extern void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, 613 int nid, const char *name) __malloc; 614 #define devres_alloc(release, size, gfp) \ 615 __devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release) 616 #define devres_alloc_node(release, size, gfp, nid) \ 617 __devres_alloc_node(release, size, gfp, nid, #release) 618 #else 619 extern void *devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, 620 int nid) __malloc; 621 static inline void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp) 622 { 623 return devres_alloc_node(release, size, gfp, NUMA_NO_NODE); 624 } 625 #endif 626 627 extern void devres_for_each_res(struct device *dev, dr_release_t release, 628 dr_match_t match, void *match_data, 629 void (*fn)(struct device *, void *, void *), 630 void *data); 631 extern void devres_free(void *res); 632 extern void devres_add(struct device *dev, void *res); 633 extern void *devres_find(struct device *dev, dr_release_t release, 634 dr_match_t match, void *match_data); 635 extern void *devres_get(struct device *dev, void *new_res, 636 dr_match_t match, void *match_data); 637 extern void *devres_remove(struct device *dev, dr_release_t release, 638 dr_match_t match, void *match_data); 639 extern int devres_destroy(struct device *dev, dr_release_t release, 640 dr_match_t match, void *match_data); 641 extern int devres_release(struct device *dev, dr_release_t release, 642 dr_match_t match, void *match_data); 643 644 /* devres group */ 645 extern void * __must_check devres_open_group(struct device *dev, void *id, 646 gfp_t gfp); 647 extern void devres_close_group(struct device *dev, void *id); 648 extern void devres_remove_group(struct device *dev, void *id); 649 extern int devres_release_group(struct device *dev, void *id); 650 651 /* managed devm_k.alloc/kfree for device drivers */ 652 extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) __malloc; 653 extern __printf(3, 0) 654 char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, 655 va_list ap) __malloc; 656 extern __printf(3, 4) 657 char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) __malloc; 658 static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp) 659 { 660 return devm_kmalloc(dev, size, gfp | __GFP_ZERO); 661 } 662 static inline void *devm_kmalloc_array(struct device *dev, 663 size_t n, size_t size, gfp_t flags) 664 { 665 if (size != 0 && n > SIZE_MAX / size) 666 return NULL; 667 return devm_kmalloc(dev, n * size, flags); 668 } 669 static inline void *devm_kcalloc(struct device *dev, 670 size_t n, size_t size, gfp_t flags) 671 { 672 return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO); 673 } 674 extern void devm_kfree(struct device *dev, void *p); 675 extern char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __malloc; 676 extern void *devm_kmemdup(struct device *dev, const void *src, size_t len, 677 gfp_t gfp); 678 679 extern unsigned long devm_get_free_pages(struct device *dev, 680 gfp_t gfp_mask, unsigned int order); 681 extern void devm_free_pages(struct device *dev, unsigned long addr); 682 683 void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res); 684 685 /* allows to add/remove a custom action to devres stack */ 686 int devm_add_action(struct device *dev, void (*action)(void *), void *data); 687 void devm_remove_action(struct device *dev, void (*action)(void *), void *data); 688 689 static inline int devm_add_action_or_reset(struct device *dev, 690 void (*action)(void *), void *data) 691 { 692 int ret; 693 694 ret = devm_add_action(dev, action, data); 695 if (ret) 696 action(data); 697 698 return ret; 699 } 700 701 struct device_dma_parameters { 702 /* 703 * a low level driver may set these to teach IOMMU code about 704 * sg limitations. 705 */ 706 unsigned int max_segment_size; 707 unsigned long segment_boundary_mask; 708 }; 709 710 /** 711 * struct device - The basic device structure 712 * @parent: The device's "parent" device, the device to which it is attached. 713 * In most cases, a parent device is some sort of bus or host 714 * controller. If parent is NULL, the device, is a top-level device, 715 * which is not usually what you want. 716 * @p: Holds the private data of the driver core portions of the device. 717 * See the comment of the struct device_private for detail. 718 * @kobj: A top-level, abstract class from which other classes are derived. 719 * @init_name: Initial name of the device. 720 * @type: The type of device. 721 * This identifies the device type and carries type-specific 722 * information. 723 * @mutex: Mutex to synchronize calls to its driver. 724 * @bus: Type of bus device is on. 725 * @driver: Which driver has allocated this 726 * @platform_data: Platform data specific to the device. 727 * Example: For devices on custom boards, as typical of embedded 728 * and SOC based hardware, Linux often uses platform_data to point 729 * to board-specific structures describing devices and how they 730 * are wired. That can include what ports are available, chip 731 * variants, which GPIO pins act in what additional roles, and so 732 * on. This shrinks the "Board Support Packages" (BSPs) and 733 * minimizes board-specific #ifdefs in drivers. 734 * @driver_data: Private pointer for driver specific info. 735 * @power: For device power management. 736 * See Documentation/power/devices.txt for details. 737 * @pm_domain: Provide callbacks that are executed during system suspend, 738 * hibernation, system resume and during runtime PM transitions 739 * along with subsystem-level and driver-level callbacks. 740 * @pins: For device pin management. 741 * See Documentation/pinctrl.txt for details. 742 * @msi_list: Hosts MSI descriptors 743 * @msi_domain: The generic MSI domain this device is using. 744 * @numa_node: NUMA node this device is close to. 745 * @dma_mask: Dma mask (if dma'ble device). 746 * @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all 747 * hardware supports 64-bit addresses for consistent allocations 748 * such descriptors. 749 * @dma_pfn_offset: offset of DMA memory range relatively of RAM 750 * @dma_parms: A low level driver may set these to teach IOMMU code about 751 * segment limitations. 752 * @dma_pools: Dma pools (if dma'ble device). 753 * @dma_mem: Internal for coherent mem override. 754 * @cma_area: Contiguous memory area for dma allocations 755 * @archdata: For arch-specific additions. 756 * @of_node: Associated device tree node. 757 * @fwnode: Associated device node supplied by platform firmware. 758 * @devt: For creating the sysfs "dev". 759 * @id: device instance 760 * @devres_lock: Spinlock to protect the resource of the device. 761 * @devres_head: The resources list of the device. 762 * @knode_class: The node used to add the device to the class list. 763 * @class: The class of the device. 764 * @groups: Optional attribute groups. 765 * @release: Callback to free the device after all references have 766 * gone away. This should be set by the allocator of the 767 * device (i.e. the bus driver that discovered the device). 768 * @iommu_group: IOMMU group the device belongs to. 769 * @iommu_fwspec: IOMMU-specific properties supplied by firmware. 770 * 771 * @offline_disabled: If set, the device is permanently online. 772 * @offline: Set after successful invocation of bus type's .offline(). 773 * 774 * At the lowest level, every device in a Linux system is represented by an 775 * instance of struct device. The device structure contains the information 776 * that the device model core needs to model the system. Most subsystems, 777 * however, track additional information about the devices they host. As a 778 * result, it is rare for devices to be represented by bare device structures; 779 * instead, that structure, like kobject structures, is usually embedded within 780 * a higher-level representation of the device. 781 */ 782 struct device { 783 struct device *parent; 784 785 struct device_private *p; 786 787 struct kobject kobj; 788 const char *init_name; /* initial name of the device */ 789 const struct device_type *type; 790 791 struct mutex mutex; /* mutex to synchronize calls to 792 * its driver. 793 */ 794 795 struct bus_type *bus; /* type of bus device is on */ 796 struct device_driver *driver; /* which driver has allocated this 797 device */ 798 void *platform_data; /* Platform specific data, device 799 core doesn't touch it */ 800 void *driver_data; /* Driver data, set and get with 801 dev_set/get_drvdata */ 802 struct dev_pm_info power; 803 struct dev_pm_domain *pm_domain; 804 805 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN 806 struct irq_domain *msi_domain; 807 #endif 808 #ifdef CONFIG_PINCTRL 809 struct dev_pin_info *pins; 810 #endif 811 #ifdef CONFIG_GENERIC_MSI_IRQ 812 struct list_head msi_list; 813 #endif 814 815 #ifdef CONFIG_NUMA 816 int numa_node; /* NUMA node this device is close to */ 817 #endif 818 u64 *dma_mask; /* dma mask (if dma'able device) */ 819 u64 coherent_dma_mask;/* Like dma_mask, but for 820 alloc_coherent mappings as 821 not all hardware supports 822 64 bit addresses for consistent 823 allocations such descriptors. */ 824 unsigned long dma_pfn_offset; 825 826 struct device_dma_parameters *dma_parms; 827 828 struct list_head dma_pools; /* dma pools (if dma'ble) */ 829 830 struct dma_coherent_mem *dma_mem; /* internal for coherent mem 831 override */ 832 #ifdef CONFIG_DMA_CMA 833 struct cma *cma_area; /* contiguous memory area for dma 834 allocations */ 835 #endif 836 /* arch specific additions */ 837 struct dev_archdata archdata; 838 839 struct device_node *of_node; /* associated device tree node */ 840 struct fwnode_handle *fwnode; /* firmware device node */ 841 842 dev_t devt; /* dev_t, creates the sysfs "dev" */ 843 u32 id; /* device instance */ 844 845 spinlock_t devres_lock; 846 struct list_head devres_head; 847 848 struct klist_node knode_class; 849 struct class *class; 850 const struct attribute_group **groups; /* optional groups */ 851 852 void (*release)(struct device *dev); 853 struct iommu_group *iommu_group; 854 struct iommu_fwspec *iommu_fwspec; 855 856 bool offline_disabled:1; 857 bool offline:1; 858 }; 859 860 static inline struct device *kobj_to_dev(struct kobject *kobj) 861 { 862 return container_of(kobj, struct device, kobj); 863 } 864 865 /* Get the wakeup routines, which depend on struct device */ 866 #include <linux/pm_wakeup.h> 867 868 static inline const char *dev_name(const struct device *dev) 869 { 870 /* Use the init name until the kobject becomes available */ 871 if (dev->init_name) 872 return dev->init_name; 873 874 return kobject_name(&dev->kobj); 875 } 876 877 extern __printf(2, 3) 878 int dev_set_name(struct device *dev, const char *name, ...); 879 880 #ifdef CONFIG_NUMA 881 static inline int dev_to_node(struct device *dev) 882 { 883 return dev->numa_node; 884 } 885 static inline void set_dev_node(struct device *dev, int node) 886 { 887 dev->numa_node = node; 888 } 889 #else 890 static inline int dev_to_node(struct device *dev) 891 { 892 return -1; 893 } 894 static inline void set_dev_node(struct device *dev, int node) 895 { 896 } 897 #endif 898 899 static inline struct irq_domain *dev_get_msi_domain(const struct device *dev) 900 { 901 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN 902 return dev->msi_domain; 903 #else 904 return NULL; 905 #endif 906 } 907 908 static inline void dev_set_msi_domain(struct device *dev, struct irq_domain *d) 909 { 910 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN 911 dev->msi_domain = d; 912 #endif 913 } 914 915 static inline void *dev_get_drvdata(const struct device *dev) 916 { 917 return dev->driver_data; 918 } 919 920 static inline void dev_set_drvdata(struct device *dev, void *data) 921 { 922 dev->driver_data = data; 923 } 924 925 static inline struct pm_subsys_data *dev_to_psd(struct device *dev) 926 { 927 return dev ? dev->power.subsys_data : NULL; 928 } 929 930 static inline unsigned int dev_get_uevent_suppress(const struct device *dev) 931 { 932 return dev->kobj.uevent_suppress; 933 } 934 935 static inline void dev_set_uevent_suppress(struct device *dev, int val) 936 { 937 dev->kobj.uevent_suppress = val; 938 } 939 940 static inline int device_is_registered(struct device *dev) 941 { 942 return dev->kobj.state_in_sysfs; 943 } 944 945 static inline void device_enable_async_suspend(struct device *dev) 946 { 947 if (!dev->power.is_prepared) 948 dev->power.async_suspend = true; 949 } 950 951 static inline void device_disable_async_suspend(struct device *dev) 952 { 953 if (!dev->power.is_prepared) 954 dev->power.async_suspend = false; 955 } 956 957 static inline bool device_async_suspend_enabled(struct device *dev) 958 { 959 return !!dev->power.async_suspend; 960 } 961 962 static inline void dev_pm_syscore_device(struct device *dev, bool val) 963 { 964 #ifdef CONFIG_PM_SLEEP 965 dev->power.syscore = val; 966 #endif 967 } 968 969 static inline void device_lock(struct device *dev) 970 { 971 mutex_lock(&dev->mutex); 972 } 973 974 static inline int device_lock_interruptible(struct device *dev) 975 { 976 return mutex_lock_interruptible(&dev->mutex); 977 } 978 979 static inline int device_trylock(struct device *dev) 980 { 981 return mutex_trylock(&dev->mutex); 982 } 983 984 static inline void device_unlock(struct device *dev) 985 { 986 mutex_unlock(&dev->mutex); 987 } 988 989 static inline void device_lock_assert(struct device *dev) 990 { 991 lockdep_assert_held(&dev->mutex); 992 } 993 994 static inline struct device_node *dev_of_node(struct device *dev) 995 { 996 if (!IS_ENABLED(CONFIG_OF)) 997 return NULL; 998 return dev->of_node; 999 } 1000 1001 void driver_init(void); 1002 1003 /* 1004 * High level routines for use by the bus drivers 1005 */ 1006 extern int __must_check device_register(struct device *dev); 1007 extern void device_unregister(struct device *dev); 1008 extern void device_initialize(struct device *dev); 1009 extern int __must_check device_add(struct device *dev); 1010 extern void device_del(struct device *dev); 1011 extern int device_for_each_child(struct device *dev, void *data, 1012 int (*fn)(struct device *dev, void *data)); 1013 extern int device_for_each_child_reverse(struct device *dev, void *data, 1014 int (*fn)(struct device *dev, void *data)); 1015 extern struct device *device_find_child(struct device *dev, void *data, 1016 int (*match)(struct device *dev, void *data)); 1017 extern int device_rename(struct device *dev, const char *new_name); 1018 extern int device_move(struct device *dev, struct device *new_parent, 1019 enum dpm_order dpm_order); 1020 extern const char *device_get_devnode(struct device *dev, 1021 umode_t *mode, kuid_t *uid, kgid_t *gid, 1022 const char **tmp); 1023 1024 static inline bool device_supports_offline(struct device *dev) 1025 { 1026 return dev->bus && dev->bus->offline && dev->bus->online; 1027 } 1028 1029 extern void lock_device_hotplug(void); 1030 extern void unlock_device_hotplug(void); 1031 extern int lock_device_hotplug_sysfs(void); 1032 extern int device_offline(struct device *dev); 1033 extern int device_online(struct device *dev); 1034 extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode); 1035 extern void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode); 1036 1037 /* 1038 * Root device objects for grouping under /sys/devices 1039 */ 1040 extern struct device *__root_device_register(const char *name, 1041 struct module *owner); 1042 1043 /* This is a macro to avoid include problems with THIS_MODULE */ 1044 #define root_device_register(name) \ 1045 __root_device_register(name, THIS_MODULE) 1046 1047 extern void root_device_unregister(struct device *root); 1048 1049 static inline void *dev_get_platdata(const struct device *dev) 1050 { 1051 return dev->platform_data; 1052 } 1053 1054 /* 1055 * Manual binding of a device to driver. See drivers/base/bus.c 1056 * for information on use. 1057 */ 1058 extern int __must_check device_bind_driver(struct device *dev); 1059 extern void device_release_driver(struct device *dev); 1060 extern int __must_check device_attach(struct device *dev); 1061 extern int __must_check driver_attach(struct device_driver *drv); 1062 extern void device_initial_probe(struct device *dev); 1063 extern int __must_check device_reprobe(struct device *dev); 1064 1065 extern bool device_is_bound(struct device *dev); 1066 1067 /* 1068 * Easy functions for dynamically creating devices on the fly 1069 */ 1070 extern __printf(5, 0) 1071 struct device *device_create_vargs(struct class *cls, struct device *parent, 1072 dev_t devt, void *drvdata, 1073 const char *fmt, va_list vargs); 1074 extern __printf(5, 6) 1075 struct device *device_create(struct class *cls, struct device *parent, 1076 dev_t devt, void *drvdata, 1077 const char *fmt, ...); 1078 extern __printf(6, 7) 1079 struct device *device_create_with_groups(struct class *cls, 1080 struct device *parent, dev_t devt, void *drvdata, 1081 const struct attribute_group **groups, 1082 const char *fmt, ...); 1083 extern void device_destroy(struct class *cls, dev_t devt); 1084 1085 /* 1086 * Platform "fixup" functions - allow the platform to have their say 1087 * about devices and actions that the general device layer doesn't 1088 * know about. 1089 */ 1090 /* Notify platform of device discovery */ 1091 extern int (*platform_notify)(struct device *dev); 1092 1093 extern int (*platform_notify_remove)(struct device *dev); 1094 1095 1096 /* 1097 * get_device - atomically increment the reference count for the device. 1098 * 1099 */ 1100 extern struct device *get_device(struct device *dev); 1101 extern void put_device(struct device *dev); 1102 1103 #ifdef CONFIG_DEVTMPFS 1104 extern int devtmpfs_create_node(struct device *dev); 1105 extern int devtmpfs_delete_node(struct device *dev); 1106 extern int devtmpfs_mount(const char *mntdir); 1107 #else 1108 static inline int devtmpfs_create_node(struct device *dev) { return 0; } 1109 static inline int devtmpfs_delete_node(struct device *dev) { return 0; } 1110 static inline int devtmpfs_mount(const char *mountpoint) { return 0; } 1111 #endif 1112 1113 /* drivers/base/power/shutdown.c */ 1114 extern void device_shutdown(void); 1115 1116 /* debugging and troubleshooting/diagnostic helpers. */ 1117 extern const char *dev_driver_string(const struct device *dev); 1118 1119 1120 #ifdef CONFIG_PRINTK 1121 1122 extern __printf(3, 0) 1123 int dev_vprintk_emit(int level, const struct device *dev, 1124 const char *fmt, va_list args); 1125 extern __printf(3, 4) 1126 int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...); 1127 1128 extern __printf(3, 4) 1129 void dev_printk(const char *level, const struct device *dev, 1130 const char *fmt, ...); 1131 extern __printf(2, 3) 1132 void dev_emerg(const struct device *dev, const char *fmt, ...); 1133 extern __printf(2, 3) 1134 void dev_alert(const struct device *dev, const char *fmt, ...); 1135 extern __printf(2, 3) 1136 void dev_crit(const struct device *dev, const char *fmt, ...); 1137 extern __printf(2, 3) 1138 void dev_err(const struct device *dev, const char *fmt, ...); 1139 extern __printf(2, 3) 1140 void dev_warn(const struct device *dev, const char *fmt, ...); 1141 extern __printf(2, 3) 1142 void dev_notice(const struct device *dev, const char *fmt, ...); 1143 extern __printf(2, 3) 1144 void _dev_info(const struct device *dev, const char *fmt, ...); 1145 1146 #else 1147 1148 static inline __printf(3, 0) 1149 int dev_vprintk_emit(int level, const struct device *dev, 1150 const char *fmt, va_list args) 1151 { return 0; } 1152 static inline __printf(3, 4) 1153 int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...) 1154 { return 0; } 1155 1156 static inline void __dev_printk(const char *level, const struct device *dev, 1157 struct va_format *vaf) 1158 {} 1159 static inline __printf(3, 4) 1160 void dev_printk(const char *level, const struct device *dev, 1161 const char *fmt, ...) 1162 {} 1163 1164 static inline __printf(2, 3) 1165 void dev_emerg(const struct device *dev, const char *fmt, ...) 1166 {} 1167 static inline __printf(2, 3) 1168 void dev_crit(const struct device *dev, const char *fmt, ...) 1169 {} 1170 static inline __printf(2, 3) 1171 void dev_alert(const struct device *dev, const char *fmt, ...) 1172 {} 1173 static inline __printf(2, 3) 1174 void dev_err(const struct device *dev, const char *fmt, ...) 1175 {} 1176 static inline __printf(2, 3) 1177 void dev_warn(const struct device *dev, const char *fmt, ...) 1178 {} 1179 static inline __printf(2, 3) 1180 void dev_notice(const struct device *dev, const char *fmt, ...) 1181 {} 1182 static inline __printf(2, 3) 1183 void _dev_info(const struct device *dev, const char *fmt, ...) 1184 {} 1185 1186 #endif 1187 1188 /* 1189 * Stupid hackaround for existing uses of non-printk uses dev_info 1190 * 1191 * Note that the definition of dev_info below is actually _dev_info 1192 * and a macro is used to avoid redefining dev_info 1193 */ 1194 1195 #define dev_info(dev, fmt, arg...) _dev_info(dev, fmt, ##arg) 1196 1197 #if defined(CONFIG_DYNAMIC_DEBUG) 1198 #define dev_dbg(dev, format, ...) \ 1199 do { \ 1200 dynamic_dev_dbg(dev, format, ##__VA_ARGS__); \ 1201 } while (0) 1202 #elif defined(DEBUG) 1203 #define dev_dbg(dev, format, arg...) \ 1204 dev_printk(KERN_DEBUG, dev, format, ##arg) 1205 #else 1206 #define dev_dbg(dev, format, arg...) \ 1207 ({ \ 1208 if (0) \ 1209 dev_printk(KERN_DEBUG, dev, format, ##arg); \ 1210 }) 1211 #endif 1212 1213 #ifdef CONFIG_PRINTK 1214 #define dev_level_once(dev_level, dev, fmt, ...) \ 1215 do { \ 1216 static bool __print_once __read_mostly; \ 1217 \ 1218 if (!__print_once) { \ 1219 __print_once = true; \ 1220 dev_level(dev, fmt, ##__VA_ARGS__); \ 1221 } \ 1222 } while (0) 1223 #else 1224 #define dev_level_once(dev_level, dev, fmt, ...) \ 1225 do { \ 1226 if (0) \ 1227 dev_level(dev, fmt, ##__VA_ARGS__); \ 1228 } while (0) 1229 #endif 1230 1231 #define dev_emerg_once(dev, fmt, ...) \ 1232 dev_level_once(dev_emerg, dev, fmt, ##__VA_ARGS__) 1233 #define dev_alert_once(dev, fmt, ...) \ 1234 dev_level_once(dev_alert, dev, fmt, ##__VA_ARGS__) 1235 #define dev_crit_once(dev, fmt, ...) \ 1236 dev_level_once(dev_crit, dev, fmt, ##__VA_ARGS__) 1237 #define dev_err_once(dev, fmt, ...) \ 1238 dev_level_once(dev_err, dev, fmt, ##__VA_ARGS__) 1239 #define dev_warn_once(dev, fmt, ...) \ 1240 dev_level_once(dev_warn, dev, fmt, ##__VA_ARGS__) 1241 #define dev_notice_once(dev, fmt, ...) \ 1242 dev_level_once(dev_notice, dev, fmt, ##__VA_ARGS__) 1243 #define dev_info_once(dev, fmt, ...) \ 1244 dev_level_once(dev_info, dev, fmt, ##__VA_ARGS__) 1245 #define dev_dbg_once(dev, fmt, ...) \ 1246 dev_level_once(dev_dbg, dev, fmt, ##__VA_ARGS__) 1247 1248 #define dev_level_ratelimited(dev_level, dev, fmt, ...) \ 1249 do { \ 1250 static DEFINE_RATELIMIT_STATE(_rs, \ 1251 DEFAULT_RATELIMIT_INTERVAL, \ 1252 DEFAULT_RATELIMIT_BURST); \ 1253 if (__ratelimit(&_rs)) \ 1254 dev_level(dev, fmt, ##__VA_ARGS__); \ 1255 } while (0) 1256 1257 #define dev_emerg_ratelimited(dev, fmt, ...) \ 1258 dev_level_ratelimited(dev_emerg, dev, fmt, ##__VA_ARGS__) 1259 #define dev_alert_ratelimited(dev, fmt, ...) \ 1260 dev_level_ratelimited(dev_alert, dev, fmt, ##__VA_ARGS__) 1261 #define dev_crit_ratelimited(dev, fmt, ...) \ 1262 dev_level_ratelimited(dev_crit, dev, fmt, ##__VA_ARGS__) 1263 #define dev_err_ratelimited(dev, fmt, ...) \ 1264 dev_level_ratelimited(dev_err, dev, fmt, ##__VA_ARGS__) 1265 #define dev_warn_ratelimited(dev, fmt, ...) \ 1266 dev_level_ratelimited(dev_warn, dev, fmt, ##__VA_ARGS__) 1267 #define dev_notice_ratelimited(dev, fmt, ...) \ 1268 dev_level_ratelimited(dev_notice, dev, fmt, ##__VA_ARGS__) 1269 #define dev_info_ratelimited(dev, fmt, ...) \ 1270 dev_level_ratelimited(dev_info, dev, fmt, ##__VA_ARGS__) 1271 #if defined(CONFIG_DYNAMIC_DEBUG) 1272 /* descriptor check is first to prevent flooding with "callbacks suppressed" */ 1273 #define dev_dbg_ratelimited(dev, fmt, ...) \ 1274 do { \ 1275 static DEFINE_RATELIMIT_STATE(_rs, \ 1276 DEFAULT_RATELIMIT_INTERVAL, \ 1277 DEFAULT_RATELIMIT_BURST); \ 1278 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ 1279 if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \ 1280 __ratelimit(&_rs)) \ 1281 __dynamic_dev_dbg(&descriptor, dev, fmt, \ 1282 ##__VA_ARGS__); \ 1283 } while (0) 1284 #elif defined(DEBUG) 1285 #define dev_dbg_ratelimited(dev, fmt, ...) \ 1286 do { \ 1287 static DEFINE_RATELIMIT_STATE(_rs, \ 1288 DEFAULT_RATELIMIT_INTERVAL, \ 1289 DEFAULT_RATELIMIT_BURST); \ 1290 if (__ratelimit(&_rs)) \ 1291 dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \ 1292 } while (0) 1293 #else 1294 #define dev_dbg_ratelimited(dev, fmt, ...) \ 1295 do { \ 1296 if (0) \ 1297 dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \ 1298 } while (0) 1299 #endif 1300 1301 #ifdef VERBOSE_DEBUG 1302 #define dev_vdbg dev_dbg 1303 #else 1304 #define dev_vdbg(dev, format, arg...) \ 1305 ({ \ 1306 if (0) \ 1307 dev_printk(KERN_DEBUG, dev, format, ##arg); \ 1308 }) 1309 #endif 1310 1311 /* 1312 * dev_WARN*() acts like dev_printk(), but with the key difference of 1313 * using WARN/WARN_ONCE to include file/line information and a backtrace. 1314 */ 1315 #define dev_WARN(dev, format, arg...) \ 1316 WARN(1, "%s %s: " format, dev_driver_string(dev), dev_name(dev), ## arg); 1317 1318 #define dev_WARN_ONCE(dev, condition, format, arg...) \ 1319 WARN_ONCE(condition, "%s %s: " format, \ 1320 dev_driver_string(dev), dev_name(dev), ## arg) 1321 1322 /* Create alias, so I can be autoloaded. */ 1323 #define MODULE_ALIAS_CHARDEV(major,minor) \ 1324 MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor)) 1325 #define MODULE_ALIAS_CHARDEV_MAJOR(major) \ 1326 MODULE_ALIAS("char-major-" __stringify(major) "-*") 1327 1328 #ifdef CONFIG_SYSFS_DEPRECATED 1329 extern long sysfs_deprecated; 1330 #else 1331 #define sysfs_deprecated 0 1332 #endif 1333 1334 /** 1335 * module_driver() - Helper macro for drivers that don't do anything 1336 * special in module init/exit. This eliminates a lot of boilerplate. 1337 * Each module may only use this macro once, and calling it replaces 1338 * module_init() and module_exit(). 1339 * 1340 * @__driver: driver name 1341 * @__register: register function for this driver type 1342 * @__unregister: unregister function for this driver type 1343 * @...: Additional arguments to be passed to __register and __unregister. 1344 * 1345 * Use this macro to construct bus specific macros for registering 1346 * drivers, and do not use it on its own. 1347 */ 1348 #define module_driver(__driver, __register, __unregister, ...) \ 1349 static int __init __driver##_init(void) \ 1350 { \ 1351 return __register(&(__driver) , ##__VA_ARGS__); \ 1352 } \ 1353 module_init(__driver##_init); \ 1354 static void __exit __driver##_exit(void) \ 1355 { \ 1356 __unregister(&(__driver) , ##__VA_ARGS__); \ 1357 } \ 1358 module_exit(__driver##_exit); 1359 1360 /** 1361 * builtin_driver() - Helper macro for drivers that don't do anything 1362 * special in init and have no exit. This eliminates some boilerplate. 1363 * Each driver may only use this macro once, and calling it replaces 1364 * device_initcall (or in some cases, the legacy __initcall). This is 1365 * meant to be a direct parallel of module_driver() above but without 1366 * the __exit stuff that is not used for builtin cases. 1367 * 1368 * @__driver: driver name 1369 * @__register: register function for this driver type 1370 * @...: Additional arguments to be passed to __register 1371 * 1372 * Use this macro to construct bus specific macros for registering 1373 * drivers, and do not use it on its own. 1374 */ 1375 #define builtin_driver(__driver, __register, ...) \ 1376 static int __init __driver##_init(void) \ 1377 { \ 1378 return __register(&(__driver) , ##__VA_ARGS__); \ 1379 } \ 1380 device_initcall(__driver##_init); 1381 1382 #endif /* _DEVICE_H_ */
1 #ifndef _LINUX_DMA_MAPPING_H 2 #define _LINUX_DMA_MAPPING_H 3 4 #include <linux/sizes.h> 5 #include <linux/string.h> 6 #include <linux/device.h> 7 #include <linux/err.h> 8 #include <linux/dma-debug.h> 9 #include <linux/dma-direction.h> 10 #include <linux/scatterlist.h> 11 #include <linux/kmemcheck.h> 12 #include <linux/bug.h> 13 14 /** 15 * List of possible attributes associated with a DMA mapping. The semantics 16 * of each attribute should be defined in Documentation/DMA-attributes.txt. 17 * 18 * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute 19 * forces all pending DMA writes to complete. 20 */ 21 #define DMA_ATTR_WRITE_BARRIER (1UL << 0) 22 /* 23 * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping 24 * may be weakly ordered, that is that reads and writes may pass each other. 25 */ 26 #define DMA_ATTR_WEAK_ORDERING (1UL << 1) 27 /* 28 * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be 29 * buffered to improve performance. 30 */ 31 #define DMA_ATTR_WRITE_COMBINE (1UL << 2) 32 /* 33 * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either 34 * consistent or non-consistent memory as it sees fit. 35 */ 36 #define DMA_ATTR_NON_CONSISTENT (1UL << 3) 37 /* 38 * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel 39 * virtual mapping for the allocated buffer. 40 */ 41 #define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4) 42 /* 43 * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of 44 * the CPU cache for the given buffer assuming that it has been already 45 * transferred to 'device' domain. 46 */ 47 #define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5) 48 /* 49 * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer 50 * in physical memory. 51 */ 52 #define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6) 53 /* 54 * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem 55 * that it's probably not worth the time to try to allocate memory to in a way 56 * that gives better TLB efficiency. 57 */ 58 #define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7) 59 /* 60 * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress 61 * allocation failure reports (similarly to __GFP_NOWARN). 62 */ 63 #define DMA_ATTR_NO_WARN (1UL << 8) 64 65 /* 66 * A dma_addr_t can hold any valid DMA or bus address for the platform. 67 * It can be given to a device to use as a DMA source or target. A CPU cannot 68 * reference a dma_addr_t directly because there may be translation between 69 * its physical address space and the bus address space. 70 */ 71 struct dma_map_ops { 72 void* (*alloc)(struct device *dev, size_t size, 73 dma_addr_t *dma_handle, gfp_t gfp, 74 unsigned long attrs); 75 void (*free)(struct device *dev, size_t size, 76 void *vaddr, dma_addr_t dma_handle, 77 unsigned long attrs); 78 int (*mmap)(struct device *, struct vm_area_struct *, 79 void *, dma_addr_t, size_t, 80 unsigned long attrs); 81 82 int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *, 83 dma_addr_t, size_t, unsigned long attrs); 84 85 dma_addr_t (*map_page)(struct device *dev, struct page *page, 86 unsigned long offset, size_t size, 87 enum dma_data_direction dir, 88 unsigned long attrs); 89 void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, 90 size_t size, enum dma_data_direction dir, 91 unsigned long attrs); 92 /* 93 * map_sg returns 0 on error and a value > 0 on success. 94 * It should never return a value < 0. 95 */ 96 int (*map_sg)(struct device *dev, struct scatterlist *sg, 97 int nents, enum dma_data_direction dir, 98 unsigned long attrs); 99 void (*unmap_sg)(struct device *dev, 100 struct scatterlist *sg, int nents, 101 enum dma_data_direction dir, 102 unsigned long attrs); 103 dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr, 104 size_t size, enum dma_data_direction dir, 105 unsigned long attrs); 106 void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle, 107 size_t size, enum dma_data_direction dir, 108 unsigned long attrs); 109 void (*sync_single_for_cpu)(struct device *dev, 110 dma_addr_t dma_handle, size_t size, 111 enum dma_data_direction dir); 112 void (*sync_single_for_device)(struct device *dev, 113 dma_addr_t dma_handle, size_t size, 114 enum dma_data_direction dir); 115 void (*sync_sg_for_cpu)(struct device *dev, 116 struct scatterlist *sg, int nents, 117 enum dma_data_direction dir); 118 void (*sync_sg_for_device)(struct device *dev, 119 struct scatterlist *sg, int nents, 120 enum dma_data_direction dir); 121 int (*mapping_error)(struct device *dev, dma_addr_t dma_addr); 122 int (*dma_supported)(struct device *dev, u64 mask); 123 int (*set_dma_mask)(struct device *dev, u64 mask); 124 #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK 125 u64 (*get_required_mask)(struct device *dev); 126 #endif 127 int is_phys; 128 }; 129 130 extern struct dma_map_ops dma_noop_ops; 131 132 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) 133 134 #define DMA_MASK_NONE 0x0ULL 135 136 static inline int valid_dma_direction(int dma_direction) 137 { 138 return ((dma_direction == DMA_BIDIRECTIONAL) || 139 (dma_direction == DMA_TO_DEVICE) || 140 (dma_direction == DMA_FROM_DEVICE)); 141 } 142 143 static inline int is_device_dma_capable(struct device *dev) 144 { 145 return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE; 146 } 147 148 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT 149 /* 150 * These three functions are only for dma allocator. 151 * Don't use them in device drivers. 152 */ 153 int dma_alloc_from_coherent(struct device *dev, ssize_t size, 154 dma_addr_t *dma_handle, void **ret); 155 int dma_release_from_coherent(struct device *dev, int order, void *vaddr); 156 157 int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, 158 void *cpu_addr, size_t size, int *ret); 159 #else 160 #define dma_alloc_from_coherent(dev, size, handle, ret) (0) 161 #define dma_release_from_coherent(dev, order, vaddr) (0) 162 #define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0) 163 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ 164 165 #ifdef CONFIG_HAS_DMA 166 #include <asm/dma-mapping.h> 167 #else 168 /* 169 * Define the dma api to allow compilation but not linking of 170 * dma dependent code. Code that depends on the dma-mapping 171 * API needs to set 'depends on HAS_DMA' in its Kconfig 172 */ 173 extern struct dma_map_ops bad_dma_ops; 174 static inline struct dma_map_ops *get_dma_ops(struct device *dev) 175 { 176 return &bad_dma_ops; 177 } 178 #endif 179 180 static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, 181 size_t size, 182 enum dma_data_direction dir, 183 unsigned long attrs) 184 { 185 struct dma_map_ops *ops = get_dma_ops(dev); 186 dma_addr_t addr; 187 188 kmemcheck_mark_initialized(ptr, size); 189 BUG_ON(!valid_dma_direction(dir)); 190 addr = ops->map_page(dev, virt_to_page(ptr), 191 offset_in_page(ptr), size, 192 dir, attrs); 193 debug_dma_map_page(dev, virt_to_page(ptr), 194 offset_in_page(ptr), size, 195 dir, addr, true); 196 return addr; 197 } 198 199 static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, 200 size_t size, 201 enum dma_data_direction dir, 202 unsigned long attrs) 203 { 204 struct dma_map_ops *ops = get_dma_ops(dev); 205 206 BUG_ON(!valid_dma_direction(dir)); 207 if (ops->unmap_page) 208 ops->unmap_page(dev, addr, size, dir, attrs); 209 debug_dma_unmap_page(dev, addr, size, dir, true); 210 } 211 212 /* 213 * dma_maps_sg_attrs returns 0 on error and > 0 on success. 214 * It should never return a value < 0. 215 */ 216 static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, 217 int nents, enum dma_data_direction dir, 218 unsigned long attrs) 219 { 220 struct dma_map_ops *ops = get_dma_ops(dev); 221 int i, ents; 222 struct scatterlist *s; 223 224 for_each_sg(sg, s, nents, i) 225 kmemcheck_mark_initialized(sg_virt(s), s->length); 226 BUG_ON(!valid_dma_direction(dir)); 227 ents = ops->map_sg(dev, sg, nents, dir, attrs); 228 BUG_ON(ents < 0); 229 debug_dma_map_sg(dev, sg, nents, ents, dir); 230 231 return ents; 232 } 233 234 static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, 235 int nents, enum dma_data_direction dir, 236 unsigned long attrs) 237 { 238 struct dma_map_ops *ops = get_dma_ops(dev); 239 240 BUG_ON(!valid_dma_direction(dir)); 241 debug_dma_unmap_sg(dev, sg, nents, dir); 242 if (ops->unmap_sg) 243 ops->unmap_sg(dev, sg, nents, dir, attrs); 244 } 245 246 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, 247 size_t offset, size_t size, 248 enum dma_data_direction dir) 249 { 250 struct dma_map_ops *ops = get_dma_ops(dev); 251 dma_addr_t addr; 252 253 kmemcheck_mark_initialized(page_address(page) + offset, size); 254 BUG_ON(!valid_dma_direction(dir)); 255 addr = ops->map_page(dev, page, offset, size, dir, 0); 256 debug_dma_map_page(dev, page, offset, size, dir, addr, false); 257 258 return addr; 259 } 260 261 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, 262 size_t size, enum dma_data_direction dir) 263 { 264 struct dma_map_ops *ops = get_dma_ops(dev); 265 266 BUG_ON(!valid_dma_direction(dir)); 267 if (ops->unmap_page) 268 ops->unmap_page(dev, addr, size, dir, 0); 269 debug_dma_unmap_page(dev, addr, size, dir, false); 270 } 271 272 static inline dma_addr_t dma_map_resource(struct device *dev, 273 phys_addr_t phys_addr, 274 size_t size, 275 enum dma_data_direction dir, 276 unsigned long attrs) 277 { 278 struct dma_map_ops *ops = get_dma_ops(dev); 279 dma_addr_t addr; 280 281 BUG_ON(!valid_dma_direction(dir)); 282 283 /* Don't allow RAM to be mapped */ 284 BUG_ON(pfn_valid(PHYS_PFN(phys_addr))); 285 286 addr = phys_addr; 287 if (ops->map_resource) 288 addr = ops->map_resource(dev, phys_addr, size, dir, attrs); 289 290 debug_dma_map_resource(dev, phys_addr, size, dir, addr); 291 292 return addr; 293 } 294 295 static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr, 296 size_t size, enum dma_data_direction dir, 297 unsigned long attrs) 298 { 299 struct dma_map_ops *ops = get_dma_ops(dev); 300 301 BUG_ON(!valid_dma_direction(dir)); 302 if (ops->unmap_resource) 303 ops->unmap_resource(dev, addr, size, dir, attrs); 304 debug_dma_unmap_resource(dev, addr, size, dir); 305 } 306 307 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, 308 size_t size, 309 enum dma_data_direction dir) 310 { 311 struct dma_map_ops *ops = get_dma_ops(dev); 312 313 BUG_ON(!valid_dma_direction(dir)); 314 if (ops->sync_single_for_cpu) 315 ops->sync_single_for_cpu(dev, addr, size, dir); 316 debug_dma_sync_single_for_cpu(dev, addr, size, dir); 317 } 318 319 static inline void dma_sync_single_for_device(struct device *dev, 320 dma_addr_t addr, size_t size, 321 enum dma_data_direction dir) 322 { 323 struct dma_map_ops *ops = get_dma_ops(dev); 324 325 BUG_ON(!valid_dma_direction(dir)); 326 if (ops->sync_single_for_device) 327 ops->sync_single_for_device(dev, addr, size, dir); 328 debug_dma_sync_single_for_device(dev, addr, size, dir); 329 } 330 331 static inline void dma_sync_single_range_for_cpu(struct device *dev, 332 dma_addr_t addr, 333 unsigned long offset, 334 size_t size, 335 enum dma_data_direction dir) 336 { 337 const struct dma_map_ops *ops = get_dma_ops(dev); 338 339 BUG_ON(!valid_dma_direction(dir)); 340 if (ops->sync_single_for_cpu) 341 ops->sync_single_for_cpu(dev, addr + offset, size, dir); 342 debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir); 343 } 344 345 static inline void dma_sync_single_range_for_device(struct device *dev, 346 dma_addr_t addr, 347 unsigned long offset, 348 size_t size, 349 enum dma_data_direction dir) 350 { 351 const struct dma_map_ops *ops = get_dma_ops(dev); 352 353 BUG_ON(!valid_dma_direction(dir)); 354 if (ops->sync_single_for_device) 355 ops->sync_single_for_device(dev, addr + offset, size, dir); 356 debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir); 357 } 358 359 static inline void 360 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 361 int nelems, enum dma_data_direction dir) 362 { 363 struct dma_map_ops *ops = get_dma_ops(dev); 364 365 BUG_ON(!valid_dma_direction(dir)); 366 if (ops->sync_sg_for_cpu) 367 ops->sync_sg_for_cpu(dev, sg, nelems, dir); 368 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); 369 } 370 371 static inline void 372 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 373 int nelems, enum dma_data_direction dir) 374 { 375 struct dma_map_ops *ops = get_dma_ops(dev); 376 377 BUG_ON(!valid_dma_direction(dir)); 378 if (ops->sync_sg_for_device) 379 ops->sync_sg_for_device(dev, sg, nelems, dir); 380 debug_dma_sync_sg_for_device(dev, sg, nelems, dir); 381 382 } 383 384 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0) 385 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0) 386 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0) 387 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0) 388 389 extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, 390 void *cpu_addr, dma_addr_t dma_addr, size_t size); 391 392 void *dma_common_contiguous_remap(struct page *page, size_t size, 393 unsigned long vm_flags, 394 pgprot_t prot, const void *caller); 395 396 void *dma_common_pages_remap(struct page **pages, size_t size, 397 unsigned long vm_flags, pgprot_t prot, 398 const void *caller); 399 void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags); 400 401 /** 402 * dma_mmap_attrs - map a coherent DMA allocation into user space 403 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 404 * @vma: vm_area_struct describing requested user mapping 405 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs 406 * @handle: device-view address returned from dma_alloc_attrs 407 * @size: size of memory originally requested in dma_alloc_attrs 408 * @attrs: attributes of mapping properties requested in dma_alloc_attrs 409 * 410 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs 411 * into user space. The coherent DMA buffer must not be freed by the 412 * driver until the user space mapping has been released. 413 */ 414 static inline int 415 dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, 416 dma_addr_t dma_addr, size_t size, unsigned long attrs) 417 { 418 struct dma_map_ops *ops = get_dma_ops(dev); 419 BUG_ON(!ops); 420 if (ops->mmap) 421 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); 422 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); 423 } 424 425 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0) 426 427 int 428 dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, 429 void *cpu_addr, dma_addr_t dma_addr, size_t size); 430 431 static inline int 432 dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, 433 dma_addr_t dma_addr, size_t size, 434 unsigned long attrs) 435 { 436 struct dma_map_ops *ops = get_dma_ops(dev); 437 BUG_ON(!ops); 438 if (ops->get_sgtable) 439 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, 440 attrs); 441 return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size); 442 } 443 444 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0) 445 446 #ifndef arch_dma_alloc_attrs 447 #define arch_dma_alloc_attrs(dev, flag) (true) 448 #endif 449 450 static inline void *dma_alloc_attrs(struct device *dev, size_t size, 451 dma_addr_t *dma_handle, gfp_t flag, 452 unsigned long attrs) 453 { 454 struct dma_map_ops *ops = get_dma_ops(dev); 455 void *cpu_addr; 456 457 BUG_ON(!ops); 458 459 if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr)) 460 return cpu_addr; 461 462 if (!arch_dma_alloc_attrs(&dev, &flag)) 463 return NULL; 464 if (!ops->alloc) 465 return NULL; 466 467 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); 468 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); 469 return cpu_addr; 470 } 471 472 static inline void dma_free_attrs(struct device *dev, size_t size, 473 void *cpu_addr, dma_addr_t dma_handle, 474 unsigned long attrs) 475 { 476 struct dma_map_ops *ops = get_dma_ops(dev); 477 478 BUG_ON(!ops); 479 WARN_ON(irqs_disabled()); 480 481 if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) 482 return; 483 484 if (!ops->free || !cpu_addr) 485 return; 486 487 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); 488 ops->free(dev, size, cpu_addr, dma_handle, attrs); 489 } 490 491 static inline void *dma_alloc_coherent(struct device *dev, size_t size, 492 dma_addr_t *dma_handle, gfp_t flag) 493 { 494 return dma_alloc_attrs(dev, size, dma_handle, flag, 0); 495 } 496 497 static inline void dma_free_coherent(struct device *dev, size_t size, 498 void *cpu_addr, dma_addr_t dma_handle) 499 { 500 return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0); 501 } 502 503 static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, 504 dma_addr_t *dma_handle, gfp_t gfp) 505 { 506 return dma_alloc_attrs(dev, size, dma_handle, gfp, 507 DMA_ATTR_NON_CONSISTENT); 508 } 509 510 static inline void dma_free_noncoherent(struct device *dev, size_t size, 511 void *cpu_addr, dma_addr_t dma_handle) 512 { 513 dma_free_attrs(dev, size, cpu_addr, dma_handle, 514 DMA_ATTR_NON_CONSISTENT); 515 } 516 517 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 518 { 519 debug_dma_mapping_error(dev, dma_addr); 520 521 if (get_dma_ops(dev)->mapping_error) 522 return get_dma_ops(dev)->mapping_error(dev, dma_addr); 523 524 #ifdef DMA_ERROR_CODE 525 return dma_addr == DMA_ERROR_CODE; 526 #else 527 return 0; 528 #endif 529 } 530 531 #ifndef HAVE_ARCH_DMA_SUPPORTED 532 static inline int dma_supported(struct device *dev, u64 mask) 533 { 534 struct dma_map_ops *ops = get_dma_ops(dev); 535 536 if (!ops) 537 return 0; 538 if (!ops->dma_supported) 539 return 1; 540 return ops->dma_supported(dev, mask); 541 } 542 #endif 543 544 #ifndef HAVE_ARCH_DMA_SET_MASK 545 static inline int dma_set_mask(struct device *dev, u64 mask) 546 { 547 struct dma_map_ops *ops = get_dma_ops(dev); 548 549 if (ops->set_dma_mask) 550 return ops->set_dma_mask(dev, mask); 551 552 if (!dev->dma_mask || !dma_supported(dev, mask)) 553 return -EIO; 554 *dev->dma_mask = mask; 555 return 0; 556 } 557 #endif 558 559 static inline u64 dma_get_mask(struct device *dev) 560 { 561 if (dev && dev->dma_mask && *dev->dma_mask) 562 return *dev->dma_mask; 563 return DMA_BIT_MASK(32); 564 } 565 566 #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK 567 int dma_set_coherent_mask(struct device *dev, u64 mask); 568 #else 569 static inline int dma_set_coherent_mask(struct device *dev, u64 mask) 570 { 571 if (!dma_supported(dev, mask)) 572 return -EIO; 573 dev->coherent_dma_mask = mask; 574 return 0; 575 } 576 #endif 577 578 /* 579 * Set both the DMA mask and the coherent DMA mask to the same thing. 580 * Note that we don't check the return value from dma_set_coherent_mask() 581 * as the DMA API guarantees that the coherent DMA mask can be set to 582 * the same or smaller than the streaming DMA mask. 583 */ 584 static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask) 585 { 586 int rc = dma_set_mask(dev, mask); 587 if (rc == 0) 588 dma_set_coherent_mask(dev, mask); 589 return rc; 590 } 591 592 /* 593 * Similar to the above, except it deals with the case where the device 594 * does not have dev->dma_mask appropriately setup. 595 */ 596 static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask) 597 { 598 dev->dma_mask = &dev->coherent_dma_mask; 599 return dma_set_mask_and_coherent(dev, mask); 600 } 601 602 extern u64 dma_get_required_mask(struct device *dev); 603 604 #ifndef arch_setup_dma_ops 605 static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, 606 u64 size, const struct iommu_ops *iommu, 607 bool coherent) { } 608 #endif 609 610 #ifndef arch_teardown_dma_ops 611 static inline void arch_teardown_dma_ops(struct device *dev) { } 612 #endif 613 614 static inline unsigned int dma_get_max_seg_size(struct device *dev) 615 { 616 if (dev->dma_parms && dev->dma_parms->max_segment_size) 617 return dev->dma_parms->max_segment_size; 618 return SZ_64K; 619 } 620 621 static inline unsigned int dma_set_max_seg_size(struct device *dev, 622 unsigned int size) 623 { 624 if (dev->dma_parms) { 625 dev->dma_parms->max_segment_size = size; 626 return 0; 627 } 628 return -EIO; 629 } 630 631 static inline unsigned long dma_get_seg_boundary(struct device *dev) 632 { 633 if (dev->dma_parms && dev->dma_parms->segment_boundary_mask) 634 return dev->dma_parms->segment_boundary_mask; 635 return DMA_BIT_MASK(32); 636 } 637 638 static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask) 639 { 640 if (dev->dma_parms) { 641 dev->dma_parms->segment_boundary_mask = mask; 642 return 0; 643 } 644 return -EIO; 645 } 646 647 #ifndef dma_max_pfn 648 static inline unsigned long dma_max_pfn(struct device *dev) 649 { 650 return *dev->dma_mask >> PAGE_SHIFT; 651 } 652 #endif 653 654 static inline void *dma_zalloc_coherent(struct device *dev, size_t size, 655 dma_addr_t *dma_handle, gfp_t flag) 656 { 657 void *ret = dma_alloc_coherent(dev, size, dma_handle, 658 flag | __GFP_ZERO); 659 return ret; 660 } 661 662 #ifdef CONFIG_HAS_DMA 663 static inline int dma_get_cache_alignment(void) 664 { 665 #ifdef ARCH_DMA_MINALIGN 666 return ARCH_DMA_MINALIGN; 667 #endif 668 return 1; 669 } 670 #endif 671 672 /* flags for the coherent memory api */ 673 #define DMA_MEMORY_MAP 0x01 674 #define DMA_MEMORY_IO 0x02 675 #define DMA_MEMORY_INCLUDES_CHILDREN 0x04 676 #define DMA_MEMORY_EXCLUSIVE 0x08 677 678 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT 679 int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, 680 dma_addr_t device_addr, size_t size, int flags); 681 void dma_release_declared_memory(struct device *dev); 682 void *dma_mark_declared_memory_occupied(struct device *dev, 683 dma_addr_t device_addr, size_t size); 684 #else 685 static inline int 686 dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, 687 dma_addr_t device_addr, size_t size, int flags) 688 { 689 return 0; 690 } 691 692 static inline void 693 dma_release_declared_memory(struct device *dev) 694 { 695 } 696 697 static inline void * 698 dma_mark_declared_memory_occupied(struct device *dev, 699 dma_addr_t device_addr, size_t size) 700 { 701 return ERR_PTR(-EBUSY); 702 } 703 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ 704 705 /* 706 * Managed DMA API 707 */ 708 extern void *dmam_alloc_coherent(struct device *dev, size_t size, 709 dma_addr_t *dma_handle, gfp_t gfp); 710 extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, 711 dma_addr_t dma_handle); 712 extern void *dmam_alloc_noncoherent(struct device *dev, size_t size, 713 dma_addr_t *dma_handle, gfp_t gfp); 714 extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr, 715 dma_addr_t dma_handle); 716 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT 717 extern int dmam_declare_coherent_memory(struct device *dev, 718 phys_addr_t phys_addr, 719 dma_addr_t device_addr, size_t size, 720 int flags); 721 extern void dmam_release_declared_memory(struct device *dev); 722 #else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ 723 static inline int dmam_declare_coherent_memory(struct device *dev, 724 phys_addr_t phys_addr, dma_addr_t device_addr, 725 size_t size, gfp_t gfp) 726 { 727 return 0; 728 } 729 730 static inline void dmam_release_declared_memory(struct device *dev) 731 { 732 } 733 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ 734 735 static inline void *dma_alloc_wc(struct device *dev, size_t size, 736 dma_addr_t *dma_addr, gfp_t gfp) 737 { 738 return dma_alloc_attrs(dev, size, dma_addr, gfp, 739 DMA_ATTR_WRITE_COMBINE); 740 } 741 #ifndef dma_alloc_writecombine 742 #define dma_alloc_writecombine dma_alloc_wc 743 #endif 744 745 static inline void dma_free_wc(struct device *dev, size_t size, 746 void *cpu_addr, dma_addr_t dma_addr) 747 { 748 return dma_free_attrs(dev, size, cpu_addr, dma_addr, 749 DMA_ATTR_WRITE_COMBINE); 750 } 751 #ifndef dma_free_writecombine 752 #define dma_free_writecombine dma_free_wc 753 #endif 754 755 static inline int dma_mmap_wc(struct device *dev, 756 struct vm_area_struct *vma, 757 void *cpu_addr, dma_addr_t dma_addr, 758 size_t size) 759 { 760 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, 761 DMA_ATTR_WRITE_COMBINE); 762 } 763 #ifndef dma_mmap_writecombine 764 #define dma_mmap_writecombine dma_mmap_wc 765 #endif 766 767 #if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG) 768 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME 769 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME 770 #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) 771 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) 772 #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) 773 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) 774 #else 775 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) 776 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) 777 #define dma_unmap_addr(PTR, ADDR_NAME) (0) 778 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0) 779 #define dma_unmap_len(PTR, LEN_NAME) (0) 780 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) 781 #endif 782 783 #endif
1 /* 2 * ioport.h Definitions of routines for detecting, reserving and 3 * allocating system resources. 4 * 5 * Authors: Linus Torvalds 6 */ 7 8 #ifndef _LINUX_IOPORT_H 9 #define _LINUX_IOPORT_H 10 11 #ifndef __ASSEMBLY__ 12 #include <linux/compiler.h> 13 #include <linux/types.h> 14 /* 15 * Resources are tree-like, allowing 16 * nesting etc.. 17 */ 18 struct resource { 19 resource_size_t start; 20 resource_size_t end; 21 const char *name; 22 unsigned long flags; 23 unsigned long desc; 24 struct resource *parent, *sibling, *child; 25 }; 26 27 /* 28 * IO resources have these defined flags. 29 * 30 * PCI devices expose these flags to userspace in the "resource" sysfs file, 31 * so don't move them. 32 */ 33 #define IORESOURCE_BITS 0x000000ff /* Bus-specific bits */ 34 35 #define IORESOURCE_TYPE_BITS 0x00001f00 /* Resource type */ 36 #define IORESOURCE_IO 0x00000100 /* PCI/ISA I/O ports */ 37 #define IORESOURCE_MEM 0x00000200 38 #define IORESOURCE_REG 0x00000300 /* Register offsets */ 39 #define IORESOURCE_IRQ 0x00000400 40 #define IORESOURCE_DMA 0x00000800 41 #define IORESOURCE_BUS 0x00001000 42 43 #define IORESOURCE_PREFETCH 0x00002000 /* No side effects */ 44 #define IORESOURCE_READONLY 0x00004000 45 #define IORESOURCE_CACHEABLE 0x00008000 46 #define IORESOURCE_RANGELENGTH 0x00010000 47 #define IORESOURCE_SHADOWABLE 0x00020000 48 49 #define IORESOURCE_SIZEALIGN 0x00040000 /* size indicates alignment */ 50 #define IORESOURCE_STARTALIGN 0x00080000 /* start field is alignment */ 51 52 #define IORESOURCE_MEM_64 0x00100000 53 #define IORESOURCE_WINDOW 0x00200000 /* forwarded by bridge */ 54 #define IORESOURCE_MUXED 0x00400000 /* Resource is software muxed */ 55 56 #define IORESOURCE_EXT_TYPE_BITS 0x01000000 /* Resource extended types */ 57 #define IORESOURCE_SYSRAM 0x01000000 /* System RAM (modifier) */ 58 59 #define IORESOURCE_EXCLUSIVE 0x08000000 /* Userland may not map this resource */ 60 61 #define IORESOURCE_DISABLED 0x10000000 62 #define IORESOURCE_UNSET 0x20000000 /* No address assigned yet */ 63 #define IORESOURCE_AUTO 0x40000000 64 #define IORESOURCE_BUSY 0x80000000 /* Driver has marked this resource busy */ 65 66 /* I/O resource extended types */ 67 #define IORESOURCE_SYSTEM_RAM (IORESOURCE_MEM|IORESOURCE_SYSRAM) 68 69 /* PnP IRQ specific bits (IORESOURCE_BITS) */ 70 #define IORESOURCE_IRQ_HIGHEDGE (1<<0) 71 #define IORESOURCE_IRQ_LOWEDGE (1<<1) 72 #define IORESOURCE_IRQ_HIGHLEVEL (1<<2) 73 #define IORESOURCE_IRQ_LOWLEVEL (1<<3) 74 #define IORESOURCE_IRQ_SHAREABLE (1<<4) 75 #define IORESOURCE_IRQ_OPTIONAL (1<<5) 76 77 /* PnP DMA specific bits (IORESOURCE_BITS) */ 78 #define IORESOURCE_DMA_TYPE_MASK (3<<0) 79 #define IORESOURCE_DMA_8BIT (0<<0) 80 #define IORESOURCE_DMA_8AND16BIT (1<<0) 81 #define IORESOURCE_DMA_16BIT (2<<0) 82 83 #define IORESOURCE_DMA_MASTER (1<<2) 84 #define IORESOURCE_DMA_BYTE (1<<3) 85 #define IORESOURCE_DMA_WORD (1<<4) 86 87 #define IORESOURCE_DMA_SPEED_MASK (3<<6) 88 #define IORESOURCE_DMA_COMPATIBLE (0<<6) 89 #define IORESOURCE_DMA_TYPEA (1<<6) 90 #define IORESOURCE_DMA_TYPEB (2<<6) 91 #define IORESOURCE_DMA_TYPEF (3<<6) 92 93 /* PnP memory I/O specific bits (IORESOURCE_BITS) */ 94 #define IORESOURCE_MEM_WRITEABLE (1<<0) /* dup: IORESOURCE_READONLY */ 95 #define IORESOURCE_MEM_CACHEABLE (1<<1) /* dup: IORESOURCE_CACHEABLE */ 96 #define IORESOURCE_MEM_RANGELENGTH (1<<2) /* dup: IORESOURCE_RANGELENGTH */ 97 #define IORESOURCE_MEM_TYPE_MASK (3<<3) 98 #define IORESOURCE_MEM_8BIT (0<<3) 99 #define IORESOURCE_MEM_16BIT (1<<3) 100 #define IORESOURCE_MEM_8AND16BIT (2<<3) 101 #define IORESOURCE_MEM_32BIT (3<<3) 102 #define IORESOURCE_MEM_SHADOWABLE (1<<5) /* dup: IORESOURCE_SHADOWABLE */ 103 #define IORESOURCE_MEM_EXPANSIONROM (1<<6) 104 105 /* PnP I/O specific bits (IORESOURCE_BITS) */ 106 #define IORESOURCE_IO_16BIT_ADDR (1<<0) 107 #define IORESOURCE_IO_FIXED (1<<1) 108 #define IORESOURCE_IO_SPARSE (1<<2) 109 110 /* PCI ROM control bits (IORESOURCE_BITS) */ 111 #define IORESOURCE_ROM_ENABLE (1<<0) /* ROM is enabled, same as PCI_ROM_ADDRESS_ENABLE */ 112 #define IORESOURCE_ROM_SHADOW (1<<1) /* Use RAM image, not ROM BAR */ 113 114 /* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */ 115 #define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */ 116 #define IORESOURCE_PCI_EA_BEI (1<<5) /* BAR Equivalent Indicator */ 117 118 /* 119 * I/O Resource Descriptors 120 * 121 * Descriptors are used by walk_iomem_res_desc() and region_intersects() 122 * for searching a specific resource range in the iomem table. Assign 123 * a new descriptor when a resource range supports the search interfaces. 124 * Otherwise, resource.desc must be set to IORES_DESC_NONE (0). 125 */ 126 enum { 127 IORES_DESC_NONE = 0, 128 IORES_DESC_CRASH_KERNEL = 1, 129 IORES_DESC_ACPI_TABLES = 2, 130 IORES_DESC_ACPI_NV_STORAGE = 3, 131 IORES_DESC_PERSISTENT_MEMORY = 4, 132 IORES_DESC_PERSISTENT_MEMORY_LEGACY = 5, 133 }; 134 135 /* helpers to define resources */ 136 #define DEFINE_RES_NAMED(_start, _size, _name, _flags) \ 137 { \ 138 .start = (_start), \ 139 .end = (_start) + (_size) - 1, \ 140 .name = (_name), \ 141 .flags = (_flags), \ 142 .desc = IORES_DESC_NONE, \ 143 } 144 145 #define DEFINE_RES_IO_NAMED(_start, _size, _name) \ 146 DEFINE_RES_NAMED((_start), (_size), (_name), IORESOURCE_IO) 147 #define DEFINE_RES_IO(_start, _size) \ 148 DEFINE_RES_IO_NAMED((_start), (_size), NULL) 149 150 #define DEFINE_RES_MEM_NAMED(_start, _size, _name) \ 151 DEFINE_RES_NAMED((_start), (_size), (_name), IORESOURCE_MEM) 152 #define DEFINE_RES_MEM(_start, _size) \ 153 DEFINE_RES_MEM_NAMED((_start), (_size), NULL) 154 155 #define DEFINE_RES_IRQ_NAMED(_irq, _name) \ 156 DEFINE_RES_NAMED((_irq), 1, (_name), IORESOURCE_IRQ) 157 #define DEFINE_RES_IRQ(_irq) \ 158 DEFINE_RES_IRQ_NAMED((_irq), NULL) 159 160 #define DEFINE_RES_DMA_NAMED(_dma, _name) \ 161 DEFINE_RES_NAMED((_dma), 1, (_name), IORESOURCE_DMA) 162 #define DEFINE_RES_DMA(_dma) \ 163 DEFINE_RES_DMA_NAMED((_dma), NULL) 164 165 /* PC/ISA/whatever - the normal PC address spaces: IO and memory */ 166 extern struct resource ioport_resource; 167 extern struct resource iomem_resource; 168 169 extern struct resource *request_resource_conflict(struct resource *root, struct resource *new); 170 extern int request_resource(struct resource *root, struct resource *new); 171 extern int release_resource(struct resource *new); 172 void release_child_resources(struct resource *new); 173 extern void reserve_region_with_split(struct resource *root, 174 resource_size_t start, resource_size_t end, 175 const char *name); 176 extern struct resource *insert_resource_conflict(struct resource *parent, struct resource *new); 177 extern int insert_resource(struct resource *parent, struct resource *new); 178 extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new); 179 extern int remove_resource(struct resource *old); 180 extern void arch_remove_reservations(struct resource *avail); 181 extern int allocate_resource(struct resource *root, struct resource *new, 182 resource_size_t size, resource_size_t min, 183 resource_size_t max, resource_size_t align, 184 resource_size_t (*alignf)(void *, 185 const struct resource *, 186 resource_size_t, 187 resource_size_t), 188 void *alignf_data); 189 struct resource *lookup_resource(struct resource *root, resource_size_t start); 190 int adjust_resource(struct resource *res, resource_size_t start, 191 resource_size_t size); 192 resource_size_t resource_alignment(struct resource *res); 193 static inline resource_size_t resource_size(const struct resource *res) 194 { 195 return res->end - res->start + 1; 196 } 197 static inline unsigned long resource_type(const struct resource *res) 198 { 199 return res->flags & IORESOURCE_TYPE_BITS; 200 } 201 static inline unsigned long resource_ext_type(const struct resource *res) 202 { 203 return res->flags & IORESOURCE_EXT_TYPE_BITS; 204 } 205 /* True iff r1 completely contains r2 */ 206 static inline bool resource_contains(struct resource *r1, struct resource *r2) 207 { 208 if (resource_type(r1) != resource_type(r2)) 209 return false; 210 if (r1->flags & IORESOURCE_UNSET || r2->flags & IORESOURCE_UNSET) 211 return false; 212 return r1->start <= r2->start && r1->end >= r2->end; 213 } 214 215 216 /* Convenience shorthand with allocation */ 217 #define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), 0) 218 #define request_muxed_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), IORESOURCE_MUXED) 219 #define __request_mem_region(start,n,name, excl) __request_region(&iomem_resource, (start), (n), (name), excl) 220 #define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name), 0) 221 #define request_mem_region_exclusive(start,n,name) \ 222 __request_region(&iomem_resource, (start), (n), (name), IORESOURCE_EXCLUSIVE) 223 #define rename_region(region, newname) do { (region)->name = (newname); } while (0) 224 225 extern struct resource * __request_region(struct resource *, 226 resource_size_t start, 227 resource_size_t n, 228 const char *name, int flags); 229 230 /* Compatibility cruft */ 231 #define release_region(start,n) __release_region(&ioport_resource, (start), (n)) 232 #define release_mem_region(start,n) __release_region(&iomem_resource, (start), (n)) 233 234 extern void __release_region(struct resource *, resource_size_t, 235 resource_size_t); 236 #ifdef CONFIG_MEMORY_HOTREMOVE 237 extern int release_mem_region_adjustable(struct resource *, resource_size_t, 238 resource_size_t); 239 #endif 240 241 /* Wrappers for managed devices */ 242 struct device; 243 244 extern int devm_request_resource(struct device *dev, struct resource *root, 245 struct resource *new); 246 extern void devm_release_resource(struct device *dev, struct resource *new); 247 248 #define devm_request_region(dev,start,n,name) \ 249 __devm_request_region(dev, &ioport_resource, (start), (n), (name)) 250 #define devm_request_mem_region(dev,start,n,name) \ 251 __devm_request_region(dev, &iomem_resource, (start), (n), (name)) 252 253 extern struct resource * __devm_request_region(struct device *dev, 254 struct resource *parent, resource_size_t start, 255 resource_size_t n, const char *name); 256 257 #define devm_release_region(dev, start, n) \ 258 __devm_release_region(dev, &ioport_resource, (start), (n)) 259 #define devm_release_mem_region(dev, start, n) \ 260 __devm_release_region(dev, &iomem_resource, (start), (n)) 261 262 extern void __devm_release_region(struct device *dev, struct resource *parent, 263 resource_size_t start, resource_size_t n); 264 extern int iomem_map_sanity_check(resource_size_t addr, unsigned long size); 265 extern int iomem_is_exclusive(u64 addr); 266 267 extern int 268 walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, 269 void *arg, int (*func)(unsigned long, unsigned long, void *)); 270 extern int 271 walk_system_ram_res(u64 start, u64 end, void *arg, 272 int (*func)(u64, u64, void *)); 273 extern int 274 walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start, u64 end, 275 void *arg, int (*func)(u64, u64, void *)); 276 277 /* True if any part of r1 overlaps r2 */ 278 static inline bool resource_overlaps(struct resource *r1, struct resource *r2) 279 { 280 return (r1->start <= r2->end && r1->end >= r2->start); 281 } 282 283 284 #endif /* __ASSEMBLY__ */ 285 #endif /* _LINUX_IOPORT_H */
1 /* 2 * platform_device.h - generic, centralized driver model 3 * 4 * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org> 5 * 6 * This file is released under the GPLv2 7 * 8 * See Documentation/driver-model/ for more information. 9 */ 10 11 #ifndef _PLATFORM_DEVICE_H_ 12 #define _PLATFORM_DEVICE_H_ 13 14 #include <linux/device.h> 15 #include <linux/mod_devicetable.h> 16 17 #define PLATFORM_DEVID_NONE (-1) 18 #define PLATFORM_DEVID_AUTO (-2) 19 20 struct mfd_cell; 21 struct property_entry; 22 23 struct platform_device { 24 const char *name; 25 int id; 26 bool id_auto; 27 struct device dev; 28 u32 num_resources; 29 struct resource *resource; 30 31 const struct platform_device_id *id_entry; 32 char *driver_override; /* Driver name to force a match */ 33 34 /* MFD cell pointer */ 35 struct mfd_cell *mfd_cell; 36 37 /* arch specific additions */ 38 struct pdev_archdata archdata; 39 }; 40 41 #define platform_get_device_id(pdev) ((pdev)->id_entry) 42 43 #define to_platform_device(x) container_of((x), struct platform_device, dev) 44 45 extern int platform_device_register(struct platform_device *); 46 extern void platform_device_unregister(struct platform_device *); 47 48 extern struct bus_type platform_bus_type; 49 extern struct device platform_bus; 50 51 extern void arch_setup_pdev_archdata(struct platform_device *); 52 extern struct resource *platform_get_resource(struct platform_device *, 53 unsigned int, unsigned int); 54 extern int platform_get_irq(struct platform_device *, unsigned int); 55 extern int platform_irq_count(struct platform_device *); 56 extern struct resource *platform_get_resource_byname(struct platform_device *, 57 unsigned int, 58 const char *); 59 extern int platform_get_irq_byname(struct platform_device *, const char *); 60 extern int platform_add_devices(struct platform_device **, int); 61 62 struct platform_device_info { 63 struct device *parent; 64 struct fwnode_handle *fwnode; 65 66 const char *name; 67 int id; 68 69 const struct resource *res; 70 unsigned int num_res; 71 72 const void *data; 73 size_t size_data; 74 u64 dma_mask; 75 76 struct property_entry *properties; 77 }; 78 extern struct platform_device *platform_device_register_full( 79 const struct platform_device_info *pdevinfo); 80 81 /** 82 * platform_device_register_resndata - add a platform-level device with 83 * resources and platform-specific data 84 * 85 * @parent: parent device for the device we're adding 86 * @name: base name of the device we're adding 87 * @id: instance id 88 * @res: set of resources that needs to be allocated for the device 89 * @num: number of resources 90 * @data: platform specific data for this platform device 91 * @size: size of platform specific data 92 * 93 * Returns &struct platform_device pointer on success, or ERR_PTR() on error. 94 */ 95 static inline struct platform_device *platform_device_register_resndata( 96 struct device *parent, const char *name, int id, 97 const struct resource *res, unsigned int num, 98 const void *data, size_t size) { 99 100 struct platform_device_info pdevinfo = { 101 .parent = parent, 102 .name = name, 103 .id = id, 104 .res = res, 105 .num_res = num, 106 .data = data, 107 .size_data = size, 108 .dma_mask = 0, 109 }; 110 111 return platform_device_register_full(&pdevinfo); 112 } 113 114 /** 115 * platform_device_register_simple - add a platform-level device and its resources 116 * @name: base name of the device we're adding 117 * @id: instance id 118 * @res: set of resources that needs to be allocated for the device 119 * @num: number of resources 120 * 121 * This function creates a simple platform device that requires minimal 122 * resource and memory management. Canned release function freeing memory 123 * allocated for the device allows drivers using such devices to be 124 * unloaded without waiting for the last reference to the device to be 125 * dropped. 126 * 127 * This interface is primarily intended for use with legacy drivers which 128 * probe hardware directly. Because such drivers create sysfs device nodes 129 * themselves, rather than letting system infrastructure handle such device 130 * enumeration tasks, they don't fully conform to the Linux driver model. 131 * In particular, when such drivers are built as modules, they can't be 132 * "hotplugged". 133 * 134 * Returns &struct platform_device pointer on success, or ERR_PTR() on error. 135 */ 136 static inline struct platform_device *platform_device_register_simple( 137 const char *name, int id, 138 const struct resource *res, unsigned int num) 139 { 140 return platform_device_register_resndata(NULL, name, id, 141 res, num, NULL, 0); 142 } 143 144 /** 145 * platform_device_register_data - add a platform-level device with platform-specific data 146 * @parent: parent device for the device we're adding 147 * @name: base name of the device we're adding 148 * @id: instance id 149 * @data: platform specific data for this platform device 150 * @size: size of platform specific data 151 * 152 * This function creates a simple platform device that requires minimal 153 * resource and memory management. Canned release function freeing memory 154 * allocated for the device allows drivers using such devices to be 155 * unloaded without waiting for the last reference to the device to be 156 * dropped. 157 * 158 * Returns &struct platform_device pointer on success, or ERR_PTR() on error. 159 */ 160 static inline struct platform_device *platform_device_register_data( 161 struct device *parent, const char *name, int id, 162 const void *data, size_t size) 163 { 164 return platform_device_register_resndata(parent, name, id, 165 NULL, 0, data, size); 166 } 167 168 extern struct platform_device *platform_device_alloc(const char *name, int id); 169 extern int platform_device_add_resources(struct platform_device *pdev, 170 const struct resource *res, 171 unsigned int num); 172 extern int platform_device_add_data(struct platform_device *pdev, 173 const void *data, size_t size); 174 extern int platform_device_add_properties(struct platform_device *pdev, 175 struct property_entry *properties); 176 extern int platform_device_add(struct platform_device *pdev); 177 extern void platform_device_del(struct platform_device *pdev); 178 extern void platform_device_put(struct platform_device *pdev); 179 180 struct platform_driver { 181 int (*probe)(struct platform_device *); 182 int (*remove)(struct platform_device *); 183 void (*shutdown)(struct platform_device *); 184 int (*suspend)(struct platform_device *, pm_message_t state); 185 int (*resume)(struct platform_device *); 186 struct device_driver driver; 187 const struct platform_device_id *id_table; 188 bool prevent_deferred_probe; 189 }; 190 191 #define to_platform_driver(drv) (container_of((drv), struct platform_driver, \ 192 driver)) 193 194 /* 195 * use a macro to avoid include chaining to get THIS_MODULE 196 */ 197 #define platform_driver_register(drv) \ 198 __platform_driver_register(drv, THIS_MODULE) 199 extern int __platform_driver_register(struct platform_driver *, 200 struct module *); 201 extern void platform_driver_unregister(struct platform_driver *); 202 203 /* non-hotpluggable platform devices may use this so that probe() and 204 * its support may live in __init sections, conserving runtime memory. 205 */ 206 #define platform_driver_probe(drv, probe) \ 207 __platform_driver_probe(drv, probe, THIS_MODULE) 208 extern int __platform_driver_probe(struct platform_driver *driver, 209 int (*probe)(struct platform_device *), struct module *module); 210 211 static inline void *platform_get_drvdata(const struct platform_device *pdev) 212 { 213 return dev_get_drvdata(&pdev->dev); 214 } 215 216 static inline void platform_set_drvdata(struct platform_device *pdev, 217 void *data) 218 { 219 dev_set_drvdata(&pdev->dev, data); 220 } 221 222 /* module_platform_driver() - Helper macro for drivers that don't do 223 * anything special in module init/exit. This eliminates a lot of 224 * boilerplate. Each module may only use this macro once, and 225 * calling it replaces module_init() and module_exit() 226 */ 227 #define module_platform_driver(__platform_driver) \ 228 module_driver(__platform_driver, platform_driver_register, \ 229 platform_driver_unregister) 230 231 /* builtin_platform_driver() - Helper macro for builtin drivers that 232 * don't do anything special in driver init. This eliminates some 233 * boilerplate. Each driver may only use this macro once, and 234 * calling it replaces device_initcall(). Note this is meant to be 235 * a parallel of module_platform_driver() above, but w/o _exit stuff. 236 */ 237 #define builtin_platform_driver(__platform_driver) \ 238 builtin_driver(__platform_driver, platform_driver_register) 239 240 /* module_platform_driver_probe() - Helper macro for drivers that don't do 241 * anything special in module init/exit. This eliminates a lot of 242 * boilerplate. Each module may only use this macro once, and 243 * calling it replaces module_init() and module_exit() 244 */ 245 #define module_platform_driver_probe(__platform_driver, __platform_probe) \ 246 static int __init __platform_driver##_init(void) \ 247 { \ 248 return platform_driver_probe(&(__platform_driver), \ 249 __platform_probe); \ 250 } \ 251 module_init(__platform_driver##_init); \ 252 static void __exit __platform_driver##_exit(void) \ 253 { \ 254 platform_driver_unregister(&(__platform_driver)); \ 255 } \ 256 module_exit(__platform_driver##_exit); 257 258 /* builtin_platform_driver_probe() - Helper macro for drivers that don't do 259 * anything special in device init. This eliminates some boilerplate. Each 260 * driver may only use this macro once, and using it replaces device_initcall. 261 * This is meant to be a parallel of module_platform_driver_probe above, but 262 * without the __exit parts. 263 */ 264 #define builtin_platform_driver_probe(__platform_driver, __platform_probe) \ 265 static int __init __platform_driver##_init(void) \ 266 { \ 267 return platform_driver_probe(&(__platform_driver), \ 268 __platform_probe); \ 269 } \ 270 device_initcall(__platform_driver##_init); \ 271 272 #define platform_create_bundle(driver, probe, res, n_res, data, size) \ 273 __platform_create_bundle(driver, probe, res, n_res, data, size, THIS_MODULE) 274 extern struct platform_device *__platform_create_bundle( 275 struct platform_driver *driver, int (*probe)(struct platform_device *), 276 struct resource *res, unsigned int n_res, 277 const void *data, size_t size, struct module *module); 278 279 int __platform_register_drivers(struct platform_driver * const *drivers, 280 unsigned int count, struct module *owner); 281 void platform_unregister_drivers(struct platform_driver * const *drivers, 282 unsigned int count); 283 284 #define platform_register_drivers(drivers, count) \ 285 __platform_register_drivers(drivers, count, THIS_MODULE) 286 287 /* early platform driver interface */ 288 struct early_platform_driver { 289 const char *class_str; 290 struct platform_driver *pdrv; 291 struct list_head list; 292 int requested_id; 293 char *buffer; 294 int bufsize; 295 }; 296 297 #define EARLY_PLATFORM_ID_UNSET -2 298 #define EARLY_PLATFORM_ID_ERROR -3 299 300 extern int early_platform_driver_register(struct early_platform_driver *epdrv, 301 char *buf); 302 extern void early_platform_add_devices(struct platform_device **devs, int num); 303 304 static inline int is_early_platform_device(struct platform_device *pdev) 305 { 306 return !pdev->dev.driver; 307 } 308 309 extern void early_platform_driver_register_all(char *class_str); 310 extern int early_platform_driver_probe(char *class_str, 311 int nr_probe, int user_only); 312 extern void early_platform_cleanup(void); 313 314 #define early_platform_init(class_string, platdrv) \ 315 early_platform_init_buffer(class_string, platdrv, NULL, 0) 316 317 #ifndef MODULE 318 #define early_platform_init_buffer(class_string, platdrv, buf, bufsiz) \ 319 static __initdata struct early_platform_driver early_driver = { \ 320 .class_str = class_string, \ 321 .buffer = buf, \ 322 .bufsize = bufsiz, \ 323 .pdrv = platdrv, \ 324 .requested_id = EARLY_PLATFORM_ID_UNSET, \ 325 }; \ 326 static int __init early_platform_driver_setup_func(char *buffer) \ 327 { \ 328 return early_platform_driver_register(&early_driver, buffer); \ 329 } \ 330 early_param(class_string, early_platform_driver_setup_func) 331 #else /* MODULE */ 332 #define early_platform_init_buffer(class_string, platdrv, buf, bufsiz) \ 333 static inline char *early_platform_driver_setup_func(void) \ 334 { \ 335 return bufsiz ? buf : NULL; \ 336 } 337 #endif /* MODULE */ 338 339 #ifdef CONFIG_SUSPEND 340 extern int platform_pm_suspend(struct device *dev); 341 extern int platform_pm_resume(struct device *dev); 342 #else 343 #define platform_pm_suspend NULL 344 #define platform_pm_resume NULL 345 #endif 346 347 #ifdef CONFIG_HIBERNATE_CALLBACKS 348 extern int platform_pm_freeze(struct device *dev); 349 extern int platform_pm_thaw(struct device *dev); 350 extern int platform_pm_poweroff(struct device *dev); 351 extern int platform_pm_restore(struct device *dev); 352 #else 353 #define platform_pm_freeze NULL 354 #define platform_pm_thaw NULL 355 #define platform_pm_poweroff NULL 356 #define platform_pm_restore NULL 357 #endif 358 359 #ifdef CONFIG_PM_SLEEP 360 #define USE_PLATFORM_PM_SLEEP_OPS \ 361 .suspend = platform_pm_suspend, \ 362 .resume = platform_pm_resume, \ 363 .freeze = platform_pm_freeze, \ 364 .thaw = platform_pm_thaw, \ 365 .poweroff = platform_pm_poweroff, \ 366 .restore = platform_pm_restore, 367 #else 368 #define USE_PLATFORM_PM_SLEEP_OPS 369 #endif 370 371 #endif /* _PLATFORM_DEVICE_H_ */
1 /* 2 * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk). 3 * 4 * (C) SGI 2006, Christoph Lameter 5 * Cleaned up and restructured to ease the addition of alternative 6 * implementations of SLAB allocators. 7 * (C) Linux Foundation 2008-2013 8 * Unified interface for all slab allocators 9 */ 10 11 #ifndef _LINUX_SLAB_H 12 #define _LINUX_SLAB_H 13 14 #include <linux/gfp.h> 15 #include <linux/types.h> 16 #include <linux/workqueue.h> 17 18 19 /* 20 * Flags to pass to kmem_cache_create(). 21 * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set. 22 */ 23 #define SLAB_CONSISTENCY_CHECKS 0x00000100UL /* DEBUG: Perform (expensive) checks on alloc/free */ 24 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */ 25 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */ 26 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */ 27 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */ 28 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */ 29 #define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */ 30 /* 31 * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS! 32 * 33 * This delays freeing the SLAB page by a grace period, it does _NOT_ 34 * delay object freeing. This means that if you do kmem_cache_free() 35 * that memory location is free to be reused at any time. Thus it may 36 * be possible to see another object there in the same RCU grace period. 37 * 38 * This feature only ensures the memory location backing the object 39 * stays valid, the trick to using this is relying on an independent 40 * object validation pass. Something like: 41 * 42 * rcu_read_lock() 43 * again: 44 * obj = lockless_lookup(key); 45 * if (obj) { 46 * if (!try_get_ref(obj)) // might fail for free objects 47 * goto again; 48 * 49 * if (obj->key != key) { // not the object we expected 50 * put_ref(obj); 51 * goto again; 52 * } 53 * } 54 * rcu_read_unlock(); 55 * 56 * This is useful if we need to approach a kernel structure obliquely, 57 * from its address obtained without the usual locking. We can lock 58 * the structure to stabilize it and check it's still at the given address, 59 * only if we can be sure that the memory has not been meanwhile reused 60 * for some other kind of object (which our subsystem's lock might corrupt). 61 * 62 * rcu_read_lock before reading the address, then rcu_read_unlock after 63 * taking the spinlock within the structure expected at that address. 64 */ 65 #define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */ 66 #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ 67 #define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */ 68 69 /* Flag to prevent checks on free */ 70 #ifdef CONFIG_DEBUG_OBJECTS 71 # define SLAB_DEBUG_OBJECTS 0x00400000UL 72 #else 73 # define SLAB_DEBUG_OBJECTS 0x00000000UL 74 #endif 75 76 #define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */ 77 78 /* Don't track use of uninitialized memory */ 79 #ifdef CONFIG_KMEMCHECK 80 # define SLAB_NOTRACK 0x01000000UL 81 #else 82 # define SLAB_NOTRACK 0x00000000UL 83 #endif 84 #ifdef CONFIG_FAILSLAB 85 # define SLAB_FAILSLAB 0x02000000UL /* Fault injection mark */ 86 #else 87 # define SLAB_FAILSLAB 0x00000000UL 88 #endif 89 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) 90 # define SLAB_ACCOUNT 0x04000000UL /* Account to memcg */ 91 #else 92 # define SLAB_ACCOUNT 0x00000000UL 93 #endif 94 95 #ifdef CONFIG_KASAN 96 #define SLAB_KASAN 0x08000000UL 97 #else 98 #define SLAB_KASAN 0x00000000UL 99 #endif 100 101 /* The following flags affect the page allocator grouping pages by mobility */ 102 #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ 103 #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ 104 /* 105 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. 106 * 107 * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault. 108 * 109 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can. 110 * Both make kfree a no-op. 111 */ 112 #define ZERO_SIZE_PTR ((void *)16) 113 114 #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \ 115 (unsigned long)ZERO_SIZE_PTR) 116 117 #include <linux/kmemleak.h> 118 #include <linux/kasan.h> 119 120 struct mem_cgroup; 121 /* 122 * struct kmem_cache related prototypes 123 */ 124 void __init kmem_cache_init(void); 125 bool slab_is_available(void); 126 127 struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, 128 unsigned long, 129 void (*)(void *)); 130 void kmem_cache_destroy(struct kmem_cache *); 131 int kmem_cache_shrink(struct kmem_cache *); 132 133 void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *); 134 void memcg_deactivate_kmem_caches(struct mem_cgroup *); 135 void memcg_destroy_kmem_caches(struct mem_cgroup *); 136 137 /* 138 * Please use this macro to create slab caches. Simply specify the 139 * name of the structure and maybe some flags that are listed above. 140 * 141 * The alignment of the struct determines object alignment. If you 142 * f.e. add ____cacheline_aligned_in_smp to the struct declaration 143 * then the objects will be properly aligned in SMP configurations. 144 */ 145 #define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\ 146 sizeof(struct __struct), __alignof__(struct __struct),\ 147 (__flags), NULL) 148 149 /* 150 * Common kmalloc functions provided by all allocators 151 */ 152 void * __must_check __krealloc(const void *, size_t, gfp_t); 153 void * __must_check krealloc(const void *, size_t, gfp_t); 154 void kfree(const void *); 155 void kzfree(const void *); 156 size_t ksize(const void *); 157 158 #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR 159 const char *__check_heap_object(const void *ptr, unsigned long n, 160 struct page *page); 161 #else 162 static inline const char *__check_heap_object(const void *ptr, 163 unsigned long n, 164 struct page *page) 165 { 166 return NULL; 167 } 168 #endif 169 170 /* 171 * Some archs want to perform DMA into kmalloc caches and need a guaranteed 172 * alignment larger than the alignment of a 64-bit integer. 173 * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that. 174 */ 175 #if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8 176 #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN 177 #define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN 178 #define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN) 179 #else 180 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) 181 #endif 182 183 /* 184 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment. 185 * Intended for arches that get misalignment faults even for 64 bit integer 186 * aligned buffers. 187 */ 188 #ifndef ARCH_SLAB_MINALIGN 189 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) 190 #endif 191 192 /* 193 * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned 194 * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN 195 * aligned pointers. 196 */ 197 #define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN) 198 #define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN) 199 #define __assume_page_alignment __assume_aligned(PAGE_SIZE) 200 201 /* 202 * Kmalloc array related definitions 203 */ 204 205 #ifdef CONFIG_SLAB 206 /* 207 * The largest kmalloc size supported by the SLAB allocators is 208 * 32 megabyte (2^25) or the maximum allocatable page order if that is 209 * less than 32 MB. 210 * 211 * WARNING: Its not easy to increase this value since the allocators have 212 * to do various tricks to work around compiler limitations in order to 213 * ensure proper constant folding. 214 */ 215 #define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \ 216 (MAX_ORDER + PAGE_SHIFT - 1) : 25) 217 #define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH 218 #ifndef KMALLOC_SHIFT_LOW 219 #define KMALLOC_SHIFT_LOW 5 220 #endif 221 #endif 222 223 #ifdef CONFIG_SLUB 224 /* 225 * SLUB directly allocates requests fitting in to an order-1 page 226 * (PAGE_SIZE*2). Larger requests are passed to the page allocator. 227 */ 228 #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1) 229 #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT) 230 #ifndef KMALLOC_SHIFT_LOW 231 #define KMALLOC_SHIFT_LOW 3 232 #endif 233 #endif 234 235 #ifdef CONFIG_SLOB 236 /* 237 * SLOB passes all requests larger than one page to the page allocator. 238 * No kmalloc array is necessary since objects of different sizes can 239 * be allocated from the same page. 240 */ 241 #define KMALLOC_SHIFT_HIGH PAGE_SHIFT 242 #define KMALLOC_SHIFT_MAX 30 243 #ifndef KMALLOC_SHIFT_LOW 244 #define KMALLOC_SHIFT_LOW 3 245 #endif 246 #endif 247 248 /* Maximum allocatable size */ 249 #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX) 250 /* Maximum size for which we actually use a slab cache */ 251 #define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH) 252 /* Maximum order allocatable via the slab allocagtor */ 253 #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT) 254 255 /* 256 * Kmalloc subsystem. 257 */ 258 #ifndef KMALLOC_MIN_SIZE 259 #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW) 260 #endif 261 262 /* 263 * This restriction comes from byte sized index implementation. 264 * Page size is normally 2^12 bytes and, in this case, if we want to use 265 * byte sized index which can represent 2^8 entries, the size of the object 266 * should be equal or greater to 2^12 / 2^8 = 2^4 = 16. 267 * If minimum size of kmalloc is less than 16, we use it as minimum object 268 * size and give up to use byte sized index. 269 */ 270 #define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \ 271 (KMALLOC_MIN_SIZE) : 16) 272 273 #ifndef CONFIG_SLOB 274 extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; 275 #ifdef CONFIG_ZONE_DMA 276 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1]; 277 #endif 278 279 /* 280 * Figure out which kmalloc slab an allocation of a certain size 281 * belongs to. 282 * 0 = zero alloc 283 * 1 = 65 .. 96 bytes 284 * 2 = 129 .. 192 bytes 285 * n = 2^(n-1)+1 .. 2^n 286 */ 287 static __always_inline int kmalloc_index(size_t size) 288 { 289 if (!size) 290 return 0; 291 292 if (size <= KMALLOC_MIN_SIZE) 293 return KMALLOC_SHIFT_LOW; 294 295 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96) 296 return 1; 297 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192) 298 return 2; 299 if (size <= 8) return 3; 300 if (size <= 16) return 4; 301 if (size <= 32) return 5; 302 if (size <= 64) return 6; 303 if (size <= 128) return 7; 304 if (size <= 256) return 8; 305 if (size <= 512) return 9; 306 if (size <= 1024) return 10; 307 if (size <= 2 * 1024) return 11; 308 if (size <= 4 * 1024) return 12; 309 if (size <= 8 * 1024) return 13; 310 if (size <= 16 * 1024) return 14; 311 if (size <= 32 * 1024) return 15; 312 if (size <= 64 * 1024) return 16; 313 if (size <= 128 * 1024) return 17; 314 if (size <= 256 * 1024) return 18; 315 if (size <= 512 * 1024) return 19; 316 if (size <= 1024 * 1024) return 20; 317 if (size <= 2 * 1024 * 1024) return 21; 318 if (size <= 4 * 1024 * 1024) return 22; 319 if (size <= 8 * 1024 * 1024) return 23; 320 if (size <= 16 * 1024 * 1024) return 24; 321 if (size <= 32 * 1024 * 1024) return 25; 322 if (size <= 64 * 1024 * 1024) return 26; 323 BUG(); 324 325 /* Will never be reached. Needed because the compiler may complain */ 326 return -1; 327 } 328 #endif /* !CONFIG_SLOB */ 329 330 void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc; 331 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc; 332 void kmem_cache_free(struct kmem_cache *, void *); 333 334 /* 335 * Bulk allocation and freeing operations. These are accelerated in an 336 * allocator specific way to avoid taking locks repeatedly or building 337 * metadata structures unnecessarily. 338 * 339 * Note that interrupts must be enabled when calling these functions. 340 */ 341 void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); 342 int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); 343 344 /* 345 * Caller must not use kfree_bulk() on memory not originally allocated 346 * by kmalloc(), because the SLOB allocator cannot handle this. 347 */ 348 static __always_inline void kfree_bulk(size_t size, void **p) 349 { 350 kmem_cache_free_bulk(NULL, size, p); 351 } 352 353 #ifdef CONFIG_NUMA 354 void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc; 355 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc; 356 #else 357 static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node) 358 { 359 return __kmalloc(size, flags); 360 } 361 362 static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) 363 { 364 return kmem_cache_alloc(s, flags); 365 } 366 #endif 367 368 #ifdef CONFIG_TRACING 369 extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc; 370 371 #ifdef CONFIG_NUMA 372 extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, 373 gfp_t gfpflags, 374 int node, size_t size) __assume_slab_alignment __malloc; 375 #else 376 static __always_inline void * 377 kmem_cache_alloc_node_trace(struct kmem_cache *s, 378 gfp_t gfpflags, 379 int node, size_t size) 380 { 381 return kmem_cache_alloc_trace(s, gfpflags, size); 382 } 383 #endif /* CONFIG_NUMA */ 384 385 #else /* CONFIG_TRACING */ 386 static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s, 387 gfp_t flags, size_t size) 388 { 389 void *ret = kmem_cache_alloc(s, flags); 390 391 kasan_kmalloc(s, ret, size, flags); 392 return ret; 393 } 394 395 static __always_inline void * 396 kmem_cache_alloc_node_trace(struct kmem_cache *s, 397 gfp_t gfpflags, 398 int node, size_t size) 399 { 400 void *ret = kmem_cache_alloc_node(s, gfpflags, node); 401 402 kasan_kmalloc(s, ret, size, gfpflags); 403 return ret; 404 } 405 #endif /* CONFIG_TRACING */ 406 407 extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc; 408 409 #ifdef CONFIG_TRACING 410 extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc; 411 #else 412 static __always_inline void * 413 kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) 414 { 415 return kmalloc_order(size, flags, order); 416 } 417 #endif 418 419 static __always_inline void *kmalloc_large(size_t size, gfp_t flags) 420 { 421 unsigned int order = get_order(size); 422 return kmalloc_order_trace(size, flags, order); 423 } 424 425 /** 426 * kmalloc - allocate memory 427 * @size: how many bytes of memory are required. 428 * @flags: the type of memory to allocate. 429 * 430 * kmalloc is the normal method of allocating memory 431 * for objects smaller than page size in the kernel. 432 * 433 * The @flags argument may be one of: 434 * 435 * %GFP_USER - Allocate memory on behalf of user. May sleep. 436 * 437 * %GFP_KERNEL - Allocate normal kernel ram. May sleep. 438 * 439 * %GFP_ATOMIC - Allocation will not sleep. May use emergency pools. 440 * For example, use this inside interrupt handlers. 441 * 442 * %GFP_HIGHUSER - Allocate pages from high memory. 443 * 444 * %GFP_NOIO - Do not do any I/O at all while trying to get memory. 445 * 446 * %GFP_NOFS - Do not make any fs calls while trying to get memory. 447 * 448 * %GFP_NOWAIT - Allocation will not sleep. 449 * 450 * %__GFP_THISNODE - Allocate node-local memory only. 451 * 452 * %GFP_DMA - Allocation suitable for DMA. 453 * Should only be used for kmalloc() caches. Otherwise, use a 454 * slab created with SLAB_DMA. 455 * 456 * Also it is possible to set different flags by OR'ing 457 * in one or more of the following additional @flags: 458 * 459 * %__GFP_COLD - Request cache-cold pages instead of 460 * trying to return cache-warm pages. 461 * 462 * %__GFP_HIGH - This allocation has high priority and may use emergency pools. 463 * 464 * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail 465 * (think twice before using). 466 * 467 * %__GFP_NORETRY - If memory is not immediately available, 468 * then give up at once. 469 * 470 * %__GFP_NOWARN - If allocation fails, don't issue any warnings. 471 * 472 * %__GFP_REPEAT - If allocation fails initially, try once more before failing. 473 * 474 * There are other flags available as well, but these are not intended 475 * for general use, and so are not documented here. For a full list of 476 * potential flags, always refer to linux/gfp.h. 477 */ 478 static __always_inline void *kmalloc(size_t size, gfp_t flags) 479 { 480 if (__builtin_constant_p(size)) { 481 if (size > KMALLOC_MAX_CACHE_SIZE) 482 return kmalloc_large(size, flags); 483 #ifndef CONFIG_SLOB 484 if (!(flags & GFP_DMA)) { 485 int index = kmalloc_index(size); 486 487 if (!index) 488 return ZERO_SIZE_PTR; 489 490 return kmem_cache_alloc_trace(kmalloc_caches[index], 491 flags, size); 492 } 493 #endif 494 } 495 return __kmalloc(size, flags); 496 } 497 498 /* 499 * Determine size used for the nth kmalloc cache. 500 * return size or 0 if a kmalloc cache for that 501 * size does not exist 502 */ 503 static __always_inline int kmalloc_size(int n) 504 { 505 #ifndef CONFIG_SLOB 506 if (n > 2) 507 return 1 << n; 508 509 if (n == 1 && KMALLOC_MIN_SIZE <= 32) 510 return 96; 511 512 if (n == 2 && KMALLOC_MIN_SIZE <= 64) 513 return 192; 514 #endif 515 return 0; 516 } 517 518 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) 519 { 520 #ifndef CONFIG_SLOB 521 if (__builtin_constant_p(size) && 522 size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) { 523 int i = kmalloc_index(size); 524 525 if (!i) 526 return ZERO_SIZE_PTR; 527 528 return kmem_cache_alloc_node_trace(kmalloc_caches[i], 529 flags, node, size); 530 } 531 #endif 532 return __kmalloc_node(size, flags, node); 533 } 534 535 struct memcg_cache_array { 536 struct rcu_head rcu; 537 struct kmem_cache *entries[0]; 538 }; 539 540 /* 541 * This is the main placeholder for memcg-related information in kmem caches. 542 * Both the root cache and the child caches will have it. For the root cache, 543 * this will hold a dynamically allocated array large enough to hold 544 * information about the currently limited memcgs in the system. To allow the 545 * array to be accessed without taking any locks, on relocation we free the old 546 * version only after a grace period. 547 * 548 * Child caches will hold extra metadata needed for its operation. Fields are: 549 * 550 * @memcg: pointer to the memcg this cache belongs to 551 * @root_cache: pointer to the global, root cache, this cache was derived from 552 * 553 * Both root and child caches of the same kind are linked into a list chained 554 * through @list. 555 */ 556 struct memcg_cache_params { 557 bool is_root_cache; 558 struct list_head list; 559 union { 560 struct memcg_cache_array __rcu *memcg_caches; 561 struct { 562 struct mem_cgroup *memcg; 563 struct kmem_cache *root_cache; 564 }; 565 }; 566 }; 567 568 int memcg_update_all_caches(int num_memcgs); 569 570 /** 571 * kmalloc_array - allocate memory for an array. 572 * @n: number of elements. 573 * @size: element size. 574 * @flags: the type of memory to allocate (see kmalloc). 575 */ 576 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) 577 { 578 if (size != 0 && n > SIZE_MAX / size) 579 return NULL; 580 if (__builtin_constant_p(n) && __builtin_constant_p(size)) 581 return kmalloc(n * size, flags); 582 return __kmalloc(n * size, flags); 583 } 584 585 /** 586 * kcalloc - allocate memory for an array. The memory is set to zero. 587 * @n: number of elements. 588 * @size: element size. 589 * @flags: the type of memory to allocate (see kmalloc). 590 */ 591 static inline void *kcalloc(size_t n, size_t size, gfp_t flags) 592 { 593 return kmalloc_array(n, size, flags | __GFP_ZERO); 594 } 595 596 /* 597 * kmalloc_track_caller is a special version of kmalloc that records the 598 * calling function of the routine calling it for slab leak tracking instead 599 * of just the calling function (confusing, eh?). 600 * It's useful when the call to kmalloc comes from a widely-used standard 601 * allocator where we care about the real place the memory allocation 602 * request comes from. 603 */ 604 extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); 605 #define kmalloc_track_caller(size, flags) \ 606 __kmalloc_track_caller(size, flags, _RET_IP_) 607 608 #ifdef CONFIG_NUMA 609 extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long); 610 #define kmalloc_node_track_caller(size, flags, node) \ 611 __kmalloc_node_track_caller(size, flags, node, \ 612 _RET_IP_) 613 614 #else /* CONFIG_NUMA */ 615 616 #define kmalloc_node_track_caller(size, flags, node) \ 617 kmalloc_track_caller(size, flags) 618 619 #endif /* CONFIG_NUMA */ 620 621 /* 622 * Shortcuts 623 */ 624 static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags) 625 { 626 return kmem_cache_alloc(k, flags | __GFP_ZERO); 627 } 628 629 /** 630 * kzalloc - allocate memory. The memory is set to zero. 631 * @size: how many bytes of memory are required. 632 * @flags: the type of memory to allocate (see kmalloc). 633 */ 634 static inline void *kzalloc(size_t size, gfp_t flags) 635 { 636 return kmalloc(size, flags | __GFP_ZERO); 637 } 638 639 /** 640 * kzalloc_node - allocate zeroed memory from a particular memory node. 641 * @size: how many bytes of memory are required. 642 * @flags: the type of memory to allocate (see kmalloc). 643 * @node: memory node from which to allocate 644 */ 645 static inline void *kzalloc_node(size_t size, gfp_t flags, int node) 646 { 647 return kmalloc_node(size, flags | __GFP_ZERO, node); 648 } 649 650 unsigned int kmem_cache_size(struct kmem_cache *s); 651 void __init kmem_cache_init_late(void); 652 653 #if defined(CONFIG_SMP) && defined(CONFIG_SLAB) 654 int slab_prepare_cpu(unsigned int cpu); 655 int slab_dead_cpu(unsigned int cpu); 656 #else 657 #define slab_prepare_cpu NULL 658 #define slab_dead_cpu NULL 659 #endif 660 661 #endif /* _LINUX_SLAB_H */

Here is an explanation of a rule violation arisen while checking your driver against a corresponding kernel.

Note that it may be false positive, i.e. there isn't a real error indeed. Please analyze a given error trace and related source code to understand whether there is an error in your driver.

Error trace column contains a path on which the given rule is violated. You can expand/collapse some entity classes by clicking on corresponding checkboxes in a main menu or in an advanced Others menu. Also you can expand/collapse each particular entity by clicking on +/-. In hovering on some entities you can see some tips. Also the error trace is bound with related source code. Line numbers may be shown as links on the left. You can click on them to open corresponding lines in source code.

Source code column contains a content of files related with the error trace. There is source code of your driver (note that there are some LDV modifications at the end), kernel headers and rule model. Tabs show a currently opened file and other available files. In hovering on them you can see full file names. On clicking a corresponding file content will be shown.

Ядро Модуль Правило Верификатор Вердикт Статус Время создания Описание проблемы
linux-4.9-rc1.tar.xz drivers/uio/uio_pruss.ko 320_7a CPAchecker Bug Fixed 2016-11-26 00:59:52 L0258

Комментарий

Reported: 26 Nov 2016

[В начало]