Bug

[В начало]

Ошибка # 151

Показать/спрятать трассу ошибок
Error trace
Function bodies
Blocks
  • Others...
    Function bodies without model function calls
    Initialization function calls
    Initialization function bodies
    Entry point
    Entry point body
    Function calls
    Skipped function calls
    Formal parameter names
    Declarations
    Assumes
    Assume conditions
    Returns
    Return values
    DEG initialization
    DEG function calls
    Model function calls
    Model function bodies
    Model asserts
    Model state changes
    Model function function calls
    Model function function bodies
    Model returns
    Model others
    Identation
    Line numbers
    Expand signs
-__CPAchecker_initialize()
{
20 typedef unsigned char __u8;
23 typedef unsigned short __u16;
25 typedef int __s32;
26 typedef unsigned int __u32;
29 typedef long long __s64;
30 typedef unsigned long long __u64;
15 typedef signed char s8;
16 typedef unsigned char u8;
19 typedef unsigned short u16;
21 typedef int s32;
22 typedef unsigned int u32;
24 typedef long long s64;
25 typedef unsigned long long u64;
14 typedef long __kernel_long_t;
15 typedef unsigned long __kernel_ulong_t;
27 typedef int __kernel_pid_t;
48 typedef unsigned int __kernel_uid32_t;
49 typedef unsigned int __kernel_gid32_t;
71 typedef __kernel_ulong_t __kernel_size_t;
72 typedef __kernel_long_t __kernel_ssize_t;
87 typedef long long __kernel_loff_t;
88 typedef __kernel_long_t __kernel_time_t;
89 typedef __kernel_long_t __kernel_clock_t;
90 typedef int __kernel_timer_t;
91 typedef int __kernel_clockid_t;
257 struct kernel_symbol { unsigned long value; const char *name; } ;
33 struct module ;
12 typedef __u32 __kernel_dev_t;
15 typedef __kernel_dev_t dev_t;
18 typedef unsigned short umode_t;
21 typedef __kernel_pid_t pid_t;
26 typedef __kernel_clockid_t clockid_t;
29 typedef _Bool bool;
31 typedef __kernel_uid32_t uid_t;
32 typedef __kernel_gid32_t gid_t;
45 typedef __kernel_loff_t loff_t;
54 typedef __kernel_size_t size_t;
59 typedef __kernel_ssize_t ssize_t;
69 typedef __kernel_time_t time_t;
102 typedef __s32 int32_t;
108 typedef __u32 uint32_t;
133 typedef unsigned long sector_t;
134 typedef unsigned long blkcnt_t;
152 typedef u64 dma_addr_t;
157 typedef unsigned int gfp_t;
158 typedef unsigned int fmode_t;
161 typedef u64 phys_addr_t;
166 typedef phys_addr_t resource_size_t;
176 struct __anonstruct_atomic_t_6 { int counter; } ;
176 typedef struct __anonstruct_atomic_t_6 atomic_t;
181 struct __anonstruct_atomic64_t_7 { long counter; } ;
181 typedef struct __anonstruct_atomic64_t_7 atomic64_t;
182 struct list_head { struct list_head *next; struct list_head *prev; } ;
187 struct hlist_node ;
187 struct hlist_head { struct hlist_node *first; } ;
191 struct hlist_node { struct hlist_node *next; struct hlist_node **pprev; } ;
202 struct callback_head { struct callback_head *next; void (*func)(struct callback_head *); } ;
125 typedef void (*ctor_fn_t)();
279 struct _ddebug { const char *modname; const char *function; const char *filename; const char *format; unsigned int lineno; unsigned char flags; } ;
58 struct device ;
467 struct file_operations ;
479 struct completion ;
480 struct pt_regs ;
27 union __anonunion___u_9 { struct list_head *__val; char __c[1U]; } ;
189 union __anonunion___u_13 { struct list_head *__val; char __c[1U]; } ;
556 struct bug_entry { int bug_addr_disp; int file_disp; unsigned short line; unsigned short flags; } ;
111 struct timespec ;
112 struct compat_timespec ;
113 struct __anonstruct_futex_25 { u32 *uaddr; u32 val; u32 flags; u32 bitset; u64 time; u32 *uaddr2; } ;
113 struct __anonstruct_nanosleep_26 { clockid_t clockid; struct timespec *rmtp; struct compat_timespec *compat_rmtp; u64 expires; } ;
113 struct pollfd ;
113 struct __anonstruct_poll_27 { struct pollfd *ufds; int nfds; int has_timeout; unsigned long tv_sec; unsigned long tv_nsec; } ;
113 union __anonunion____missing_field_name_24 { struct __anonstruct_futex_25 futex; struct __anonstruct_nanosleep_26 nanosleep; struct __anonstruct_poll_27 poll; } ;
113 struct restart_block { long int (*fn)(struct restart_block *); union __anonunion____missing_field_name_24 __annonCompField4; } ;
39 struct page ;
26 struct task_struct ;
27 struct mm_struct ;
288 struct pt_regs { unsigned long r15; unsigned long r14; unsigned long r13; unsigned long r12; unsigned long bp; unsigned long bx; unsigned long r11; unsigned long r10; unsigned long r9; unsigned long r8; unsigned long ax; unsigned long cx; unsigned long dx; unsigned long si; unsigned long di; unsigned long orig_ax; unsigned long ip; unsigned long cs; unsigned long flags; unsigned long sp; unsigned long ss; } ;
66 struct __anonstruct____missing_field_name_30 { unsigned int a; unsigned int b; } ;
66 struct __anonstruct____missing_field_name_31 { u16 limit0; u16 base0; unsigned char base1; unsigned char type; unsigned char s; unsigned char dpl; unsigned char p; unsigned char limit; unsigned char avl; unsigned char l; unsigned char d; unsigned char g; unsigned char base2; } ;
66 union __anonunion____missing_field_name_29 { struct __anonstruct____missing_field_name_30 __annonCompField5; struct __anonstruct____missing_field_name_31 __annonCompField6; } ;
66 struct desc_struct { union __anonunion____missing_field_name_29 __annonCompField7; } ;
13 typedef unsigned long pteval_t;
14 typedef unsigned long pmdval_t;
16 typedef unsigned long pgdval_t;
17 typedef unsigned long pgprotval_t;
19 struct __anonstruct_pte_t_32 { pteval_t pte; } ;
19 typedef struct __anonstruct_pte_t_32 pte_t;
21 struct pgprot { pgprotval_t pgprot; } ;
256 typedef struct pgprot pgprot_t;
258 struct __anonstruct_pgd_t_33 { pgdval_t pgd; } ;
258 typedef struct __anonstruct_pgd_t_33 pgd_t;
297 struct __anonstruct_pmd_t_35 { pmdval_t pmd; } ;
297 typedef struct __anonstruct_pmd_t_35 pmd_t;
423 typedef struct page *pgtable_t;
434 struct file ;
447 struct seq_file ;
483 struct thread_struct ;
485 struct cpumask ;
20 struct qspinlock { atomic_t val; } ;
33 typedef struct qspinlock arch_spinlock_t;
34 struct qrwlock { atomic_t cnts; arch_spinlock_t wait_lock; } ;
14 typedef struct qrwlock arch_rwlock_t;
247 struct math_emu_info { long ___orig_eip; struct pt_regs *regs; } ;
341 struct cpumask { unsigned long bits[128U]; } ;
15 typedef struct cpumask cpumask_t;
654 typedef struct cpumask *cpumask_var_t;
26 union __anonunion___u_42 { int __val; char __c[1U]; } ;
38 union __anonunion___u_44 { int __val; char __c[1U]; } ;
23 typedef atomic64_t atomic_long_t;
81 struct static_key { atomic_t enabled; } ;
22 struct tracepoint_func { void *func; void *data; int prio; } ;
28 struct tracepoint { const char *name; struct static_key key; void (*regfunc)(); void (*unregfunc)(); struct tracepoint_func *funcs; } ;
254 struct fregs_state { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u32 status; } ;
26 struct __anonstruct____missing_field_name_59 { u64 rip; u64 rdp; } ;
26 struct __anonstruct____missing_field_name_60 { u32 fip; u32 fcs; u32 foo; u32 fos; } ;
26 union __anonunion____missing_field_name_58 { struct __anonstruct____missing_field_name_59 __annonCompField13; struct __anonstruct____missing_field_name_60 __annonCompField14; } ;
26 union __anonunion____missing_field_name_61 { u32 padding1[12U]; u32 sw_reserved[12U]; } ;
26 struct fxregs_state { u16 cwd; u16 swd; u16 twd; u16 fop; union __anonunion____missing_field_name_58 __annonCompField15; u32 mxcsr; u32 mxcsr_mask; u32 st_space[32U]; u32 xmm_space[64U]; u32 padding[12U]; union __anonunion____missing_field_name_61 __annonCompField16; } ;
66 struct swregs_state { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u8 ftop; u8 changed; u8 lookahead; u8 no_update; u8 rm; u8 alimit; struct math_emu_info *info; u32 entry_eip; } ;
227 struct xstate_header { u64 xfeatures; u64 xcomp_bv; u64 reserved[6U]; } ;
233 struct xregs_state { struct fxregs_state i387; struct xstate_header header; u8 extended_state_area[0U]; } ;
254 union fpregs_state { struct fregs_state fsave; struct fxregs_state fxsave; struct swregs_state soft; struct xregs_state xsave; u8 __padding[4096U]; } ;
271 struct fpu { unsigned int last_cpu; unsigned char fpstate_active; unsigned char fpregs_active; unsigned char counter; union fpregs_state state; } ;
169 struct seq_operations ;
372 struct perf_event ;
377 struct __anonstruct_mm_segment_t_73 { unsigned long seg; } ;
377 typedef struct __anonstruct_mm_segment_t_73 mm_segment_t;
378 struct thread_struct { struct desc_struct tls_array[3U]; unsigned long sp0; unsigned long sp; unsigned short es; unsigned short ds; unsigned short fsindex; unsigned short gsindex; unsigned long fsbase; unsigned long gsbase; struct perf_event *ptrace_bps[4U]; unsigned long debugreg6; unsigned long ptrace_dr7; unsigned long cr2; unsigned long trap_nr; unsigned long error_code; unsigned long *io_bitmap_ptr; unsigned long iopl; unsigned int io_bitmap_max; mm_segment_t addr_limit; unsigned char sig_on_uaccess_err; unsigned char uaccess_err; struct fpu fpu; } ;
74 typedef int pao_T_____0;
33 struct lockdep_map ;
55 struct stack_trace { unsigned int nr_entries; unsigned int max_entries; unsigned long *entries; int skip; } ;
28 struct lockdep_subclass_key { char __one_byte; } ;
53 struct lock_class_key { struct lockdep_subclass_key subkeys[8U]; } ;
59 struct lock_class { struct hlist_node hash_entry; struct list_head lock_entry; struct lockdep_subclass_key *key; unsigned int subclass; unsigned int dep_gen_id; unsigned long usage_mask; struct stack_trace usage_traces[13U]; struct list_head locks_after; struct list_head locks_before; unsigned int version; unsigned long ops; const char *name; int name_version; unsigned long contention_point[4U]; unsigned long contending_point[4U]; } ;
144 struct lockdep_map { struct lock_class_key *key; struct lock_class *class_cache[2U]; const char *name; int cpu; unsigned long ip; } ;
207 struct held_lock { u64 prev_chain_key; unsigned long acquire_ip; struct lockdep_map *instance; struct lockdep_map *nest_lock; u64 waittime_stamp; u64 holdtime_stamp; unsigned short class_idx; unsigned char irq_context; unsigned char trylock; unsigned char read; unsigned char check; unsigned char hardirqs_off; unsigned short references; unsigned int pin_count; } ;
572 struct raw_spinlock { arch_spinlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ;
32 typedef struct raw_spinlock raw_spinlock_t;
33 struct __anonstruct____missing_field_name_75 { u8 __padding[24U]; struct lockdep_map dep_map; } ;
33 union __anonunion____missing_field_name_74 { struct raw_spinlock rlock; struct __anonstruct____missing_field_name_75 __annonCompField19; } ;
33 struct spinlock { union __anonunion____missing_field_name_74 __annonCompField20; } ;
76 typedef struct spinlock spinlock_t;
23 struct __anonstruct_rwlock_t_76 { arch_rwlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ;
23 typedef struct __anonstruct_rwlock_t_76 rwlock_t;
416 struct seqcount { unsigned int sequence; struct lockdep_map dep_map; } ;
52 typedef struct seqcount seqcount_t;
407 struct __anonstruct_seqlock_t_91 { struct seqcount seqcount; spinlock_t lock; } ;
407 typedef struct __anonstruct_seqlock_t_91 seqlock_t;
601 struct timespec { __kernel_time_t tv_sec; long tv_nsec; } ;
7 typedef __s64 time64_t;
83 struct user_namespace ;
22 struct __anonstruct_kuid_t_92 { uid_t val; } ;
22 typedef struct __anonstruct_kuid_t_92 kuid_t;
27 struct __anonstruct_kgid_t_93 { gid_t val; } ;
27 typedef struct __anonstruct_kgid_t_93 kgid_t;
139 struct kstat { u64 ino; dev_t dev; umode_t mode; unsigned int nlink; kuid_t uid; kgid_t gid; dev_t rdev; loff_t size; struct timespec atime; struct timespec mtime; struct timespec ctime; unsigned long blksize; unsigned long long blocks; } ;
36 struct vm_area_struct ;
38 struct __wait_queue_head { spinlock_t lock; struct list_head task_list; } ;
43 typedef struct __wait_queue_head wait_queue_head_t;
97 struct __anonstruct_nodemask_t_94 { unsigned long bits[16U]; } ;
97 typedef struct __anonstruct_nodemask_t_94 nodemask_t;
247 typedef unsigned int isolate_mode_t;
13 struct optimistic_spin_queue { atomic_t tail; } ;
39 struct mutex { atomic_t count; spinlock_t wait_lock; struct list_head wait_list; struct task_struct *owner; void *magic; struct lockdep_map dep_map; } ;
67 struct mutex_waiter { struct list_head list; struct task_struct *task; void *magic; } ;
177 struct rw_semaphore ;
178 struct rw_semaphore { atomic_long_t count; struct list_head wait_list; raw_spinlock_t wait_lock; struct optimistic_spin_queue osq; struct task_struct *owner; struct lockdep_map dep_map; } ;
178 struct completion { unsigned int done; wait_queue_head_t wait; } ;
446 union ktime { s64 tv64; } ;
41 typedef union ktime ktime_t;
1144 struct timer_list { struct hlist_node entry; unsigned long expires; void (*function)(unsigned long); unsigned long data; u32 flags; int start_pid; void *start_site; char start_comm[16U]; struct lockdep_map lockdep_map; } ;
254 struct hrtimer ;
255 enum hrtimer_restart ;
256 struct rb_node { unsigned long __rb_parent_color; struct rb_node *rb_right; struct rb_node *rb_left; } ;
41 struct rb_root { struct rb_node *rb_node; } ;
835 struct nsproxy ;
278 struct workqueue_struct ;
279 struct work_struct ;
54 struct work_struct { atomic_long_t data; struct list_head entry; void (*func)(struct work_struct *); struct lockdep_map lockdep_map; } ;
107 struct delayed_work { struct work_struct work; struct timer_list timer; struct workqueue_struct *wq; int cpu; } ;
268 struct notifier_block ;
53 struct notifier_block { int (*notifier_call)(struct notifier_block *, unsigned long, void *); struct notifier_block *next; int priority; } ;
217 struct resource ;
64 struct resource { resource_size_t start; resource_size_t end; const char *name; unsigned long flags; unsigned long desc; struct resource *parent; struct resource *sibling; struct resource *child; } ;
58 struct pm_message { int event; } ;
64 typedef struct pm_message pm_message_t;
65 struct dev_pm_ops { int (*prepare)(struct device *); void (*complete)(struct device *); int (*suspend)(struct device *); int (*resume)(struct device *); int (*freeze)(struct device *); int (*thaw)(struct device *); int (*poweroff)(struct device *); int (*restore)(struct device *); int (*suspend_late)(struct device *); int (*resume_early)(struct device *); int (*freeze_late)(struct device *); int (*thaw_early)(struct device *); int (*poweroff_late)(struct device *); int (*restore_early)(struct device *); int (*suspend_noirq)(struct device *); int (*resume_noirq)(struct device *); int (*freeze_noirq)(struct device *); int (*thaw_noirq)(struct device *); int (*poweroff_noirq)(struct device *); int (*restore_noirq)(struct device *); int (*runtime_suspend)(struct device *); int (*runtime_resume)(struct device *); int (*runtime_idle)(struct device *); } ;
320 enum rpm_status { RPM_ACTIVE = 0, RPM_RESUMING = 1, RPM_SUSPENDED = 2, RPM_SUSPENDING = 3 } ;
327 enum rpm_request { RPM_REQ_NONE = 0, RPM_REQ_IDLE = 1, RPM_REQ_SUSPEND = 2, RPM_REQ_AUTOSUSPEND = 3, RPM_REQ_RESUME = 4 } ;
335 struct wakeup_source ;
336 struct wake_irq ;
337 struct pm_domain_data ;
338 struct pm_subsys_data { spinlock_t lock; unsigned int refcount; struct list_head clock_list; struct pm_domain_data *domain_data; } ;
556 struct dev_pm_qos ;
556 struct dev_pm_info { pm_message_t power_state; unsigned char can_wakeup; unsigned char async_suspend; bool is_prepared; bool is_suspended; bool is_noirq_suspended; bool is_late_suspended; bool early_init; bool direct_complete; spinlock_t lock; struct list_head entry; struct completion completion; struct wakeup_source *wakeup; bool wakeup_path; bool syscore; bool no_pm_callbacks; struct timer_list suspend_timer; unsigned long timer_expires; struct work_struct work; wait_queue_head_t wait_queue; struct wake_irq *wakeirq; atomic_t usage_count; atomic_t child_count; unsigned char disable_depth; unsigned char idle_notification; unsigned char request_pending; unsigned char deferred_resume; unsigned char run_wake; unsigned char runtime_auto; bool ignore_children; unsigned char no_callbacks; unsigned char irq_safe; unsigned char use_autosuspend; unsigned char timer_autosuspends; unsigned char memalloc_noio; enum rpm_request request; enum rpm_status runtime_status; int runtime_error; int autosuspend_delay; unsigned long last_busy; unsigned long active_jiffies; unsigned long suspended_jiffies; unsigned long accounting_timestamp; struct pm_subsys_data *subsys_data; void (*set_latency_tolerance)(struct device *, s32 ); struct dev_pm_qos *qos; } ;
616 struct dev_pm_domain { struct dev_pm_ops ops; void (*detach)(struct device *, bool ); int (*activate)(struct device *); void (*sync)(struct device *); void (*dismiss)(struct device *); } ;
26 struct ldt_struct ;
26 struct vdso_image ;
26 struct __anonstruct_mm_context_t_165 { struct ldt_struct *ldt; unsigned short ia32_compat; struct mutex lock; void *vdso; const struct vdso_image *vdso_image; atomic_t perf_rdpmc_allowed; } ;
26 typedef struct __anonstruct_mm_context_t_165 mm_context_t;
22 struct bio_vec ;
1276 struct llist_node ;
64 struct llist_node { struct llist_node *next; } ;
37 struct cred ;
19 struct inode ;
58 struct arch_uprobe_task { unsigned long saved_scratch_register; unsigned int saved_trap_nr; unsigned int saved_tf; } ;
66 enum uprobe_task_state { UTASK_RUNNING = 0, UTASK_SSTEP = 1, UTASK_SSTEP_ACK = 2, UTASK_SSTEP_TRAPPED = 3 } ;
73 struct __anonstruct____missing_field_name_211 { struct arch_uprobe_task autask; unsigned long vaddr; } ;
73 struct __anonstruct____missing_field_name_212 { struct callback_head dup_xol_work; unsigned long dup_xol_addr; } ;
73 union __anonunion____missing_field_name_210 { struct __anonstruct____missing_field_name_211 __annonCompField35; struct __anonstruct____missing_field_name_212 __annonCompField36; } ;
73 struct uprobe ;
73 struct return_instance ;
73 struct uprobe_task { enum uprobe_task_state state; union __anonunion____missing_field_name_210 __annonCompField37; struct uprobe *active_uprobe; unsigned long xol_vaddr; struct return_instance *return_instances; unsigned int depth; } ;
94 struct return_instance { struct uprobe *uprobe; unsigned long func; unsigned long stack; unsigned long orig_ret_vaddr; bool chained; struct return_instance *next; } ;
110 struct xol_area ;
111 struct uprobes_state { struct xol_area *xol_area; } ;
150 struct address_space ;
151 struct mem_cgroup ;
152 union __anonunion____missing_field_name_213 { struct address_space *mapping; void *s_mem; atomic_t compound_mapcount; } ;
152 union __anonunion____missing_field_name_214 { unsigned long index; void *freelist; } ;
152 struct __anonstruct____missing_field_name_218 { unsigned short inuse; unsigned short objects; unsigned char frozen; } ;
152 union __anonunion____missing_field_name_217 { atomic_t _mapcount; unsigned int active; struct __anonstruct____missing_field_name_218 __annonCompField40; int units; } ;
152 struct __anonstruct____missing_field_name_216 { union __anonunion____missing_field_name_217 __annonCompField41; atomic_t _refcount; } ;
152 union __anonunion____missing_field_name_215 { unsigned long counters; struct __anonstruct____missing_field_name_216 __annonCompField42; } ;
152 struct dev_pagemap ;
152 struct __anonstruct____missing_field_name_220 { struct page *next; int pages; int pobjects; } ;
152 struct __anonstruct____missing_field_name_221 { unsigned long compound_head; unsigned int compound_dtor; unsigned int compound_order; } ;
152 struct __anonstruct____missing_field_name_222 { unsigned long __pad; pgtable_t pmd_huge_pte; } ;
152 union __anonunion____missing_field_name_219 { struct list_head lru; struct dev_pagemap *pgmap; struct __anonstruct____missing_field_name_220 __annonCompField44; struct callback_head callback_head; struct __anonstruct____missing_field_name_221 __annonCompField45; struct __anonstruct____missing_field_name_222 __annonCompField46; } ;
152 struct kmem_cache ;
152 union __anonunion____missing_field_name_223 { unsigned long private; spinlock_t *ptl; struct kmem_cache *slab_cache; } ;
152 struct page { unsigned long flags; union __anonunion____missing_field_name_213 __annonCompField38; union __anonunion____missing_field_name_214 __annonCompField39; union __anonunion____missing_field_name_215 __annonCompField43; union __anonunion____missing_field_name_219 __annonCompField47; union __anonunion____missing_field_name_223 __annonCompField48; struct mem_cgroup *mem_cgroup; } ;
197 struct page_frag { struct page *page; __u32 offset; __u32 size; } ;
282 struct userfaultfd_ctx ;
282 struct vm_userfaultfd_ctx { struct userfaultfd_ctx *ctx; } ;
289 struct __anonstruct_shared_224 { struct rb_node rb; unsigned long rb_subtree_last; } ;
289 struct anon_vma ;
289 struct vm_operations_struct ;
289 struct mempolicy ;
289 struct vm_area_struct { unsigned long vm_start; unsigned long vm_end; struct vm_area_struct *vm_next; struct vm_area_struct *vm_prev; struct rb_node vm_rb; unsigned long rb_subtree_gap; struct mm_struct *vm_mm; pgprot_t vm_page_prot; unsigned long vm_flags; struct __anonstruct_shared_224 shared; struct list_head anon_vma_chain; struct anon_vma *anon_vma; const struct vm_operations_struct *vm_ops; unsigned long vm_pgoff; struct file *vm_file; void *vm_private_data; struct mempolicy *vm_policy; struct vm_userfaultfd_ctx vm_userfaultfd_ctx; } ;
362 struct core_thread { struct task_struct *task; struct core_thread *next; } ;
367 struct core_state { atomic_t nr_threads; struct core_thread dumper; struct completion startup; } ;
381 struct task_rss_stat { int events; int count[4U]; } ;
389 struct mm_rss_stat { atomic_long_t count[4U]; } ;
394 struct kioctx_table ;
395 struct linux_binfmt ;
395 struct mmu_notifier_mm ;
395 struct mm_struct { struct vm_area_struct *mmap; struct rb_root mm_rb; u32 vmacache_seqnum; unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); unsigned long mmap_base; unsigned long mmap_legacy_base; unsigned long task_size; unsigned long highest_vm_end; pgd_t *pgd; atomic_t mm_users; atomic_t mm_count; atomic_long_t nr_ptes; atomic_long_t nr_pmds; int map_count; spinlock_t page_table_lock; struct rw_semaphore mmap_sem; struct list_head mmlist; unsigned long hiwater_rss; unsigned long hiwater_vm; unsigned long total_vm; unsigned long locked_vm; unsigned long pinned_vm; unsigned long data_vm; unsigned long exec_vm; unsigned long stack_vm; unsigned long def_flags; unsigned long start_code; unsigned long end_code; unsigned long start_data; unsigned long end_data; unsigned long start_brk; unsigned long brk; unsigned long start_stack; unsigned long arg_start; unsigned long arg_end; unsigned long env_start; unsigned long env_end; unsigned long saved_auxv[46U]; struct mm_rss_stat rss_stat; struct linux_binfmt *binfmt; cpumask_var_t cpu_vm_mask_var; mm_context_t context; unsigned long flags; struct core_state *core_state; spinlock_t ioctx_lock; struct kioctx_table *ioctx_table; struct task_struct *owner; struct file *exe_file; struct mmu_notifier_mm *mmu_notifier_mm; struct cpumask cpumask_allocation; unsigned long numa_next_scan; unsigned long numa_scan_offset; int numa_scan_seq; bool tlb_flush_pending; struct uprobes_state uprobes_state; void *bd_addr; atomic_long_t hugetlb_usage; struct work_struct async_put_work; } ;
565 struct vm_fault ;
619 struct vdso_image { void *data; unsigned long size; unsigned long alt; unsigned long alt_len; long sym_vvar_start; long sym_vvar_page; long sym_hpet_page; long sym_pvclock_page; long sym_VDSO32_NOTE_MASK; long sym___kernel_sigreturn; long sym___kernel_rt_sigreturn; long sym___kernel_vsyscall; long sym_int80_landing_pad; } ;
15 typedef __u64 Elf64_Addr;
16 typedef __u16 Elf64_Half;
18 typedef __u64 Elf64_Off;
20 typedef __u32 Elf64_Word;
21 typedef __u64 Elf64_Xword;
190 struct elf64_sym { Elf64_Word st_name; unsigned char st_info; unsigned char st_other; Elf64_Half st_shndx; Elf64_Addr st_value; Elf64_Xword st_size; } ;
198 typedef struct elf64_sym Elf64_Sym;
219 struct elf64_hdr { unsigned char e_ident[16U]; Elf64_Half e_type; Elf64_Half e_machine; Elf64_Word e_version; Elf64_Addr e_entry; Elf64_Off e_phoff; Elf64_Off e_shoff; Elf64_Word e_flags; Elf64_Half e_ehsize; Elf64_Half e_phentsize; Elf64_Half e_phnum; Elf64_Half e_shentsize; Elf64_Half e_shnum; Elf64_Half e_shstrndx; } ;
235 typedef struct elf64_hdr Elf64_Ehdr;
314 struct elf64_shdr { Elf64_Word sh_name; Elf64_Word sh_type; Elf64_Xword sh_flags; Elf64_Addr sh_addr; Elf64_Off sh_offset; Elf64_Xword sh_size; Elf64_Word sh_link; Elf64_Word sh_info; Elf64_Xword sh_addralign; Elf64_Xword sh_entsize; } ;
326 typedef struct elf64_shdr Elf64_Shdr;
53 union __anonunion____missing_field_name_229 { unsigned long bitmap[4U]; struct callback_head callback_head; } ;
53 struct idr_layer { int prefix; int layer; struct idr_layer *ary[256U]; int count; union __anonunion____missing_field_name_229 __annonCompField49; } ;
41 struct idr { struct idr_layer *hint; struct idr_layer *top; int layers; int cur; spinlock_t lock; int id_free_cnt; struct idr_layer *id_free; } ;
117 union __anonunion___u_231 { struct idr_layer *__val; char __c[1U]; } ;
120 union __anonunion___u_233 { struct idr_layer *__val; char __c[1U]; } ;
124 struct ida_bitmap { long nr_busy; unsigned long bitmap[15U]; } ;
167 struct ida { struct idr idr; struct ida_bitmap *free_bitmap; } ;
199 struct dentry ;
200 struct iattr ;
201 struct super_block ;
202 struct file_system_type ;
203 struct kernfs_open_node ;
204 struct kernfs_iattrs ;
227 struct kernfs_root ;
227 struct kernfs_elem_dir { unsigned long subdirs; struct rb_root children; struct kernfs_root *root; } ;
85 struct kernfs_node ;
85 struct kernfs_elem_symlink { struct kernfs_node *target_kn; } ;
89 struct kernfs_ops ;
89 struct kernfs_elem_attr { const struct kernfs_ops *ops; struct kernfs_open_node *open; loff_t size; struct kernfs_node *notify_next; } ;
96 union __anonunion____missing_field_name_234 { struct kernfs_elem_dir dir; struct kernfs_elem_symlink symlink; struct kernfs_elem_attr attr; } ;
96 struct kernfs_node { atomic_t count; atomic_t active; struct lockdep_map dep_map; struct kernfs_node *parent; const char *name; struct rb_node rb; const void *ns; unsigned int hash; union __anonunion____missing_field_name_234 __annonCompField50; void *priv; unsigned short flags; umode_t mode; unsigned int ino; struct kernfs_iattrs *iattr; } ;
138 struct kernfs_syscall_ops { int (*remount_fs)(struct kernfs_root *, int *, char *); int (*show_options)(struct seq_file *, struct kernfs_root *); int (*mkdir)(struct kernfs_node *, const char *, umode_t ); int (*rmdir)(struct kernfs_node *); int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *); int (*show_path)(struct seq_file *, struct kernfs_node *, struct kernfs_root *); } ;
157 struct kernfs_root { struct kernfs_node *kn; unsigned int flags; struct ida ino_ida; struct kernfs_syscall_ops *syscall_ops; struct list_head supers; wait_queue_head_t deactivate_waitq; } ;
173 struct kernfs_open_file { struct kernfs_node *kn; struct file *file; void *priv; struct mutex mutex; struct mutex prealloc_mutex; int event; struct list_head list; char *prealloc_buf; size_t atomic_write_len; bool mmapped; const struct vm_operations_struct *vm_ops; } ;
191 struct kernfs_ops { int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); ssize_t (*read)(struct kernfs_open_file *, char *, size_t , loff_t ); size_t atomic_write_len; bool prealloc; ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *); struct lock_class_key lockdep_key; } ;
499 struct sock ;
500 struct kobject ;
501 enum kobj_ns_type { KOBJ_NS_TYPE_NONE = 0, KOBJ_NS_TYPE_NET = 1, KOBJ_NS_TYPES = 2 } ;
507 struct kobj_ns_type_operations { enum kobj_ns_type type; bool (*current_may_mount)(); void * (*grab_current_ns)(); const void * (*netlink_ns)(struct sock *); const void * (*initial_ns)(); void (*drop_ns)(void *); } ;
59 struct bin_attribute ;
60 struct attribute { const char *name; umode_t mode; bool ignore_lockdep; struct lock_class_key *key; struct lock_class_key skey; } ;
37 struct attribute_group { const char *name; umode_t (*is_visible)(struct kobject *, struct attribute *, int); umode_t (*is_bin_visible)(struct kobject *, struct bin_attribute *, int); struct attribute **attrs; struct bin_attribute **bin_attrs; } ;
92 struct bin_attribute { struct attribute attr; size_t size; void *private; ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); int (*mmap)(struct file *, struct kobject *, struct bin_attribute *, struct vm_area_struct *); } ;
165 struct sysfs_ops { ssize_t (*show)(struct kobject *, struct attribute *, char *); ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t ); } ;
530 struct kref { atomic_t refcount; } ;
52 struct kset ;
52 struct kobj_type ;
52 struct kobject { const char *name; struct list_head entry; struct kobject *parent; struct kset *kset; struct kobj_type *ktype; struct kernfs_node *sd; struct kref kref; struct delayed_work release; unsigned char state_initialized; unsigned char state_in_sysfs; unsigned char state_add_uevent_sent; unsigned char state_remove_uevent_sent; unsigned char uevent_suppress; } ;
115 struct kobj_type { void (*release)(struct kobject *); const struct sysfs_ops *sysfs_ops; struct attribute **default_attrs; const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject *); const void * (*namespace)(struct kobject *); } ;
123 struct kobj_uevent_env { char *argv[3U]; char *envp[32U]; int envp_idx; char buf[2048U]; int buflen; } ;
131 struct kset_uevent_ops { const int (*filter)(struct kset *, struct kobject *); const const char * (*name)(struct kset *, struct kobject *); const int (*uevent)(struct kset *, struct kobject *, struct kobj_uevent_env *); } ;
148 struct kset { struct list_head list; spinlock_t list_lock; struct kobject kobj; const struct kset_uevent_ops *uevent_ops; } ;
223 struct kernel_param ;
228 struct kernel_param_ops { unsigned int flags; int (*set)(const char *, const struct kernel_param *); int (*get)(char *, const struct kernel_param *); void (*free)(void *); } ;
62 struct kparam_string ;
62 struct kparam_array ;
62 union __anonunion____missing_field_name_237 { void *arg; const struct kparam_string *str; const struct kparam_array *arr; } ;
62 struct kernel_param { const char *name; struct module *mod; const struct kernel_param_ops *ops; const u16 perm; s8 level; u8 flags; union __anonunion____missing_field_name_237 __annonCompField51; } ;
83 struct kparam_string { unsigned int maxlen; char *string; } ;
89 struct kparam_array { unsigned int max; unsigned int elemsize; unsigned int *num; const struct kernel_param_ops *ops; void *elem; } ;
470 struct exception_table_entry ;
24 struct latch_tree_node { struct rb_node node[2U]; } ;
211 struct mod_arch_specific { } ;
39 struct module_param_attrs ;
39 struct module_kobject { struct kobject kobj; struct module *mod; struct kobject *drivers_dir; struct module_param_attrs *mp; struct completion *kobj_completion; } ;
50 struct module_attribute { struct attribute attr; ssize_t (*show)(struct module_attribute *, struct module_kobject *, char *); ssize_t (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t ); void (*setup)(struct module *, const char *); int (*test)(struct module *); void (*free)(struct module *); } ;
277 enum module_state { MODULE_STATE_LIVE = 0, MODULE_STATE_COMING = 1, MODULE_STATE_GOING = 2, MODULE_STATE_UNFORMED = 3 } ;
284 struct mod_tree_node { struct module *mod; struct latch_tree_node node; } ;
291 struct module_layout { void *base; unsigned int size; unsigned int text_size; unsigned int ro_size; unsigned int ro_after_init_size; struct mod_tree_node mtn; } ;
307 struct mod_kallsyms { Elf64_Sym *symtab; unsigned int num_symtab; char *strtab; } ;
321 struct klp_modinfo { Elf64_Ehdr hdr; Elf64_Shdr *sechdrs; char *secstrings; unsigned int symndx; } ;
329 struct module_sect_attrs ;
329 struct module_notes_attrs ;
329 struct trace_event_call ;
329 struct trace_enum_map ;
329 struct module { enum module_state state; struct list_head list; char name[56U]; struct module_kobject mkobj; struct module_attribute *modinfo_attrs; const char *version; const char *srcversion; struct kobject *holders_dir; const struct kernel_symbol *syms; const unsigned long *crcs; unsigned int num_syms; struct mutex param_lock; struct kernel_param *kp; unsigned int num_kp; unsigned int num_gpl_syms; const struct kernel_symbol *gpl_syms; const unsigned long *gpl_crcs; const struct kernel_symbol *unused_syms; const unsigned long *unused_crcs; unsigned int num_unused_syms; unsigned int num_unused_gpl_syms; const struct kernel_symbol *unused_gpl_syms; const unsigned long *unused_gpl_crcs; bool sig_ok; bool async_probe_requested; const struct kernel_symbol *gpl_future_syms; const unsigned long *gpl_future_crcs; unsigned int num_gpl_future_syms; unsigned int num_exentries; struct exception_table_entry *extable; int (*init)(); struct module_layout core_layout; struct module_layout init_layout; struct mod_arch_specific arch; unsigned int taints; unsigned int num_bugs; struct list_head bug_list; struct bug_entry *bug_table; struct mod_kallsyms *kallsyms; struct mod_kallsyms core_kallsyms; struct module_sect_attrs *sect_attrs; struct module_notes_attrs *notes_attrs; char *args; void *percpu; unsigned int percpu_size; unsigned int num_tracepoints; const struct tracepoint **tracepoints_ptrs; unsigned int num_trace_bprintk_fmt; const char **trace_bprintk_fmt_start; struct trace_event_call **trace_events; unsigned int num_trace_events; struct trace_enum_map **trace_enums; unsigned int num_trace_enums; unsigned int num_ftrace_callsites; unsigned long *ftrace_callsites; bool klp; bool klp_alive; struct klp_modinfo *klp_info; struct list_head source_list; struct list_head target_list; void (*exit)(); atomic_t refcnt; ctor_fn_t (**ctors)(); unsigned int num_ctors; } ;
22 struct kernel_cap_struct { __u32 cap[2U]; } ;
25 typedef struct kernel_cap_struct kernel_cap_t;
84 struct plist_node { int prio; struct list_head prio_list; struct list_head node_list; } ;
4 typedef unsigned long cputime_t;
25 struct sem_undo_list ;
25 struct sysv_sem { struct sem_undo_list *undo_list; } ;
78 struct user_struct ;
26 struct sysv_shm { struct list_head shm_clist; } ;
24 struct __anonstruct_sigset_t_245 { unsigned long sig[1U]; } ;
24 typedef struct __anonstruct_sigset_t_245 sigset_t;
25 struct siginfo ;
17 typedef void __signalfn_t(int);
18 typedef __signalfn_t *__sighandler_t;
20 typedef void __restorefn_t();
21 typedef __restorefn_t *__sigrestore_t;
34 union sigval { int sival_int; void *sival_ptr; } ;
10 typedef union sigval sigval_t;
11 struct __anonstruct__kill_247 { __kernel_pid_t _pid; __kernel_uid32_t _uid; } ;
11 struct __anonstruct__timer_248 { __kernel_timer_t _tid; int _overrun; char _pad[0U]; sigval_t _sigval; int _sys_private; } ;
11 struct __anonstruct__rt_249 { __kernel_pid_t _pid; __kernel_uid32_t _uid; sigval_t _sigval; } ;
11 struct __anonstruct__sigchld_250 { __kernel_pid_t _pid; __kernel_uid32_t _uid; int _status; __kernel_clock_t _utime; __kernel_clock_t _stime; } ;
11 struct __anonstruct__addr_bnd_253 { void *_lower; void *_upper; } ;
11 union __anonunion____missing_field_name_252 { struct __anonstruct__addr_bnd_253 _addr_bnd; __u32 _pkey; } ;
11 struct __anonstruct__sigfault_251 { void *_addr; short _addr_lsb; union __anonunion____missing_field_name_252 __annonCompField52; } ;
11 struct __anonstruct__sigpoll_254 { long _band; int _fd; } ;
11 struct __anonstruct__sigsys_255 { void *_call_addr; int _syscall; unsigned int _arch; } ;
11 union __anonunion__sifields_246 { int _pad[28U]; struct __anonstruct__kill_247 _kill; struct __anonstruct__timer_248 _timer; struct __anonstruct__rt_249 _rt; struct __anonstruct__sigchld_250 _sigchld; struct __anonstruct__sigfault_251 _sigfault; struct __anonstruct__sigpoll_254 _sigpoll; struct __anonstruct__sigsys_255 _sigsys; } ;
11 struct siginfo { int si_signo; int si_errno; int si_code; union __anonunion__sifields_246 _sifields; } ;
118 typedef struct siginfo siginfo_t;
22 struct sigpending { struct list_head list; sigset_t signal; } ;
257 struct sigaction { __sighandler_t sa_handler; unsigned long sa_flags; __sigrestore_t sa_restorer; sigset_t sa_mask; } ;
271 struct k_sigaction { struct sigaction sa; } ;
457 enum pid_type { PIDTYPE_PID = 0, PIDTYPE_PGID = 1, PIDTYPE_SID = 2, PIDTYPE_MAX = 3 } ;
464 struct pid_namespace ;
464 struct upid { int nr; struct pid_namespace *ns; struct hlist_node pid_chain; } ;
56 struct pid { atomic_t count; unsigned int level; struct hlist_head tasks[3U]; struct callback_head rcu; struct upid numbers[1U]; } ;
68 struct pid_link { struct hlist_node node; struct pid *pid; } ;
43 struct seccomp_filter ;
44 struct seccomp { int mode; struct seccomp_filter *filter; } ;
40 struct rt_mutex_waiter ;
41 struct rlimit { __kernel_ulong_t rlim_cur; __kernel_ulong_t rlim_max; } ;
11 struct timerqueue_node { struct rb_node node; ktime_t expires; } ;
12 struct timerqueue_head { struct rb_root head; struct timerqueue_node *next; } ;
50 struct hrtimer_clock_base ;
51 struct hrtimer_cpu_base ;
60 enum hrtimer_restart { HRTIMER_NORESTART = 0, HRTIMER_RESTART = 1 } ;
65 struct hrtimer { struct timerqueue_node node; ktime_t _softexpires; enum hrtimer_restart (*function)(struct hrtimer *); struct hrtimer_clock_base *base; u8 state; u8 is_rel; int start_pid; void *start_site; char start_comm[16U]; } ;
125 struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base; int index; clockid_t clockid; struct timerqueue_head active; ktime_t (*get_time)(); ktime_t offset; } ;
158 struct hrtimer_cpu_base { raw_spinlock_t lock; seqcount_t seq; struct hrtimer *running; unsigned int cpu; unsigned int active_bases; unsigned int clock_was_set_seq; bool migration_enabled; bool nohz_active; unsigned char in_hrtirq; unsigned char hres_active; unsigned char hang_detected; ktime_t expires_next; struct hrtimer *next_timer; unsigned int nr_events; unsigned int nr_retries; unsigned int nr_hangs; unsigned int max_hang_time; struct hrtimer_clock_base clock_base[4U]; } ;
12 enum kcov_mode { KCOV_MODE_DISABLED = 0, KCOV_MODE_TRACE = 1 } ;
17 struct task_io_accounting { u64 rchar; u64 wchar; u64 syscr; u64 syscw; u64 read_bytes; u64 write_bytes; u64 cancelled_write_bytes; } ;
45 struct latency_record { unsigned long backtrace[12U]; unsigned int count; unsigned long time; unsigned long max; } ;
41 struct assoc_array_ptr ;
41 struct assoc_array { struct assoc_array_ptr *root; unsigned long nr_leaves_on_tree; } ;
31 typedef int32_t key_serial_t;
34 typedef uint32_t key_perm_t;
35 struct key ;
36 struct signal_struct ;
37 struct key_type ;
41 struct keyring_index_key { struct key_type *type; const char *description; size_t desc_len; } ;
91 union key_payload { void *rcu_data0; void *data[4U]; } ;
128 union __anonunion____missing_field_name_290 { struct list_head graveyard_link; struct rb_node serial_node; } ;
128 struct key_user ;
128 union __anonunion____missing_field_name_291 { time_t expiry; time_t revoked_at; } ;
128 struct __anonstruct____missing_field_name_293 { struct key_type *type; char *description; } ;
128 union __anonunion____missing_field_name_292 { struct keyring_index_key index_key; struct __anonstruct____missing_field_name_293 __annonCompField55; } ;
128 struct __anonstruct____missing_field_name_295 { struct list_head name_link; struct assoc_array keys; } ;
128 union __anonunion____missing_field_name_294 { union key_payload payload; struct __anonstruct____missing_field_name_295 __annonCompField57; int reject_error; } ;
128 struct key { atomic_t usage; key_serial_t serial; union __anonunion____missing_field_name_290 __annonCompField53; struct rw_semaphore sem; struct key_user *user; void *security; union __anonunion____missing_field_name_291 __annonCompField54; time_t last_used_at; kuid_t uid; kgid_t gid; key_perm_t perm; unsigned short quotalen; unsigned short datalen; unsigned long flags; union __anonunion____missing_field_name_292 __annonCompField56; union __anonunion____missing_field_name_294 __annonCompField58; int (*restrict_link)(struct key *, const struct key_type *, const union key_payload *); } ;
377 struct audit_context ;
27 struct group_info { atomic_t usage; int ngroups; int nblocks; kgid_t small_block[32U]; kgid_t *blocks[0U]; } ;
90 struct cred { atomic_t usage; atomic_t subscribers; void *put_addr; unsigned int magic; kuid_t uid; kgid_t gid; kuid_t suid; kgid_t sgid; kuid_t euid; kgid_t egid; kuid_t fsuid; kgid_t fsgid; unsigned int securebits; kernel_cap_t cap_inheritable; kernel_cap_t cap_permitted; kernel_cap_t cap_effective; kernel_cap_t cap_bset; kernel_cap_t cap_ambient; unsigned char jit_keyring; struct key *session_keyring; struct key *process_keyring; struct key *thread_keyring; struct key *request_key_auth; void *security; struct user_struct *user; struct user_namespace *user_ns; struct group_info *group_info; struct callback_head rcu; } ;
377 struct percpu_ref ;
55 typedef void percpu_ref_func_t(struct percpu_ref *);
68 struct percpu_ref { atomic_long_t count; unsigned long percpu_count_ptr; percpu_ref_func_t *release; percpu_ref_func_t *confirm_switch; bool force_atomic; struct callback_head rcu; } ;
325 enum rcu_sync_type { RCU_SYNC = 0, RCU_SCHED_SYNC = 1, RCU_BH_SYNC = 2 } ;
331 struct rcu_sync { int gp_state; int gp_count; wait_queue_head_t gp_wait; int cb_state; struct callback_head cb_head; enum rcu_sync_type gp_type; } ;
65 struct percpu_rw_semaphore { struct rcu_sync rss; unsigned int *fast_read_ctr; struct rw_semaphore rw_sem; atomic_t slow_read_ctr; wait_queue_head_t write_waitq; } ;
54 struct cgroup ;
55 struct cgroup_root ;
56 struct cgroup_subsys ;
57 struct cgroup_taskset ;
101 struct cgroup_file { struct kernfs_node *kn; } ;
90 struct cgroup_subsys_state { struct cgroup *cgroup; struct cgroup_subsys *ss; struct percpu_ref refcnt; struct cgroup_subsys_state *parent; struct list_head sibling; struct list_head children; int id; unsigned int flags; u64 serial_nr; atomic_t online_cnt; struct callback_head callback_head; struct work_struct destroy_work; } ;
141 struct css_set { atomic_t refcount; struct hlist_node hlist; struct list_head tasks; struct list_head mg_tasks; struct list_head cgrp_links; struct cgroup *dfl_cgrp; struct cgroup_subsys_state *subsys[13U]; struct list_head mg_preload_node; struct list_head mg_node; struct cgroup *mg_src_cgrp; struct cgroup *mg_dst_cgrp; struct css_set *mg_dst_cset; struct list_head e_cset_node[13U]; struct list_head task_iters; bool dead; struct callback_head callback_head; } ;
221 struct cgroup { struct cgroup_subsys_state self; unsigned long flags; int id; int level; int populated_cnt; struct kernfs_node *kn; struct cgroup_file procs_file; struct cgroup_file events_file; u16 subtree_control; u16 subtree_ss_mask; u16 old_subtree_control; u16 old_subtree_ss_mask; struct cgroup_subsys_state *subsys[13U]; struct cgroup_root *root; struct list_head cset_links; struct list_head e_csets[13U]; struct list_head pidlists; struct mutex pidlist_mutex; wait_queue_head_t offline_waitq; struct work_struct release_agent_work; int ancestor_ids[]; } ;
306 struct cgroup_root { struct kernfs_root *kf_root; unsigned int subsys_mask; int hierarchy_id; struct cgroup cgrp; int cgrp_ancestor_id_storage; atomic_t nr_cgrps; struct list_head root_list; unsigned int flags; struct idr cgroup_idr; char release_agent_path[4096U]; char name[64U]; } ;
345 struct cftype { char name[64U]; unsigned long private; size_t max_write_len; unsigned int flags; unsigned int file_offset; struct cgroup_subsys *ss; struct list_head node; struct kernfs_ops *kf_ops; u64 (*read_u64)(struct cgroup_subsys_state *, struct cftype *); s64 (*read_s64)(struct cgroup_subsys_state *, struct cftype *); int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); int (*write_u64)(struct cgroup_subsys_state *, struct cftype *, u64 ); int (*write_s64)(struct cgroup_subsys_state *, struct cftype *, s64 ); ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); struct lock_class_key lockdep_key; } ;
430 struct cgroup_subsys { struct cgroup_subsys_state * (*css_alloc)(struct cgroup_subsys_state *); int (*css_online)(struct cgroup_subsys_state *); void (*css_offline)(struct cgroup_subsys_state *); void (*css_released)(struct cgroup_subsys_state *); void (*css_free)(struct cgroup_subsys_state *); void (*css_reset)(struct cgroup_subsys_state *); int (*can_attach)(struct cgroup_taskset *); void (*cancel_attach)(struct cgroup_taskset *); void (*attach)(struct cgroup_taskset *); void (*post_attach)(); int (*can_fork)(struct task_struct *); void (*cancel_fork)(struct task_struct *); void (*fork)(struct task_struct *); void (*exit)(struct task_struct *); void (*free)(struct task_struct *); void (*bind)(struct cgroup_subsys_state *); bool early_init; bool implicit_on_dfl; bool broken_hierarchy; bool warned_broken_hierarchy; int id; const char *name; const char *legacy_name; struct cgroup_root *root; struct idr css_idr; struct list_head cfts; struct cftype *dfl_cftypes; struct cftype *legacy_cftypes; unsigned int depends_on; } ;
128 struct futex_pi_state ;
129 struct robust_list_head ;
130 struct bio_list ;
131 struct fs_struct ;
132 struct perf_event_context ;
133 struct blk_plug ;
135 struct nameidata ;
188 struct cfs_rq ;
189 struct task_group ;
493 struct sighand_struct { atomic_t count; struct k_sigaction action[64U]; spinlock_t siglock; wait_queue_head_t signalfd_wqh; } ;
536 struct pacct_struct { int ac_flag; long ac_exitcode; unsigned long ac_mem; cputime_t ac_utime; cputime_t ac_stime; unsigned long ac_minflt; unsigned long ac_majflt; } ;
544 struct cpu_itimer { cputime_t expires; cputime_t incr; u32 error; u32 incr_error; } ;
551 struct prev_cputime { cputime_t utime; cputime_t stime; raw_spinlock_t lock; } ;
576 struct task_cputime { cputime_t utime; cputime_t stime; unsigned long long sum_exec_runtime; } ;
592 struct task_cputime_atomic { atomic64_t utime; atomic64_t stime; atomic64_t sum_exec_runtime; } ;
614 struct thread_group_cputimer { struct task_cputime_atomic cputime_atomic; bool running; bool checking_timer; } ;
659 struct autogroup ;
660 struct tty_struct ;
660 struct taskstats ;
660 struct tty_audit_buf ;
660 struct signal_struct { atomic_t sigcnt; atomic_t live; int nr_threads; atomic_t oom_victims; struct list_head thread_head; wait_queue_head_t wait_chldexit; struct task_struct *curr_target; struct sigpending shared_pending; int group_exit_code; int notify_count; struct task_struct *group_exit_task; int group_stop_count; unsigned int flags; unsigned char is_child_subreaper; unsigned char has_child_subreaper; int posix_timer_id; struct list_head posix_timers; struct hrtimer real_timer; struct pid *leader_pid; ktime_t it_real_incr; struct cpu_itimer it[2U]; struct thread_group_cputimer cputimer; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; struct pid *tty_old_pgrp; int leader; struct tty_struct *tty; struct autogroup *autogroup; seqlock_t stats_lock; cputime_t utime; cputime_t stime; cputime_t cutime; cputime_t cstime; cputime_t gtime; cputime_t cgtime; struct prev_cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; unsigned long cnvcsw; unsigned long cnivcsw; unsigned long min_flt; unsigned long maj_flt; unsigned long cmin_flt; unsigned long cmaj_flt; unsigned long inblock; unsigned long oublock; unsigned long cinblock; unsigned long coublock; unsigned long maxrss; unsigned long cmaxrss; struct task_io_accounting ioac; unsigned long long sum_sched_runtime; struct rlimit rlim[16U]; struct pacct_struct pacct; struct taskstats *stats; unsigned int audit_tty; struct tty_audit_buf *tty_audit_buf; bool oom_flag_origin; short oom_score_adj; short oom_score_adj_min; struct mutex cred_guard_mutex; } ;
835 struct user_struct { atomic_t __count; atomic_t processes; atomic_t sigpending; atomic_t inotify_watches; atomic_t inotify_devs; atomic_t fanotify_listeners; atomic_long_t epoll_watches; unsigned long mq_bytes; unsigned long locked_shm; unsigned long unix_inflight; atomic_long_t pipe_bufs; struct key *uid_keyring; struct key *session_keyring; struct hlist_node uidhash_node; kuid_t uid; atomic_long_t locked_vm; } ;
880 struct backing_dev_info ;
881 struct reclaim_state ;
882 struct sched_info { unsigned long pcount; unsigned long long run_delay; unsigned long long last_arrival; unsigned long long last_queued; } ;
896 struct task_delay_info { spinlock_t lock; unsigned int flags; u64 blkio_start; u64 blkio_delay; u64 swapin_delay; u32 blkio_count; u32 swapin_count; u64 freepages_start; u64 freepages_delay; u32 freepages_count; } ;
953 struct wake_q_node { struct wake_q_node *next; } ;
1185 struct io_context ;
1219 struct pipe_inode_info ;
1221 struct load_weight { unsigned long weight; u32 inv_weight; } ;
1228 struct sched_avg { u64 last_update_time; u64 load_sum; u32 util_sum; u32 period_contrib; unsigned long load_avg; unsigned long util_avg; } ;
1286 struct sched_statistics { u64 wait_start; u64 wait_max; u64 wait_count; u64 wait_sum; u64 iowait_count; u64 iowait_sum; u64 sleep_start; u64 sleep_max; s64 sum_sleep_runtime; u64 block_start; u64 block_max; u64 exec_max; u64 slice_max; u64 nr_migrations_cold; u64 nr_failed_migrations_affine; u64 nr_failed_migrations_running; u64 nr_failed_migrations_hot; u64 nr_forced_migrations; u64 nr_wakeups; u64 nr_wakeups_sync; u64 nr_wakeups_migrate; u64 nr_wakeups_local; u64 nr_wakeups_remote; u64 nr_wakeups_affine; u64 nr_wakeups_affine_attempts; u64 nr_wakeups_passive; u64 nr_wakeups_idle; } ;
1321 struct sched_entity { struct load_weight load; struct rb_node run_node; struct list_head group_node; unsigned int on_rq; u64 exec_start; u64 sum_exec_runtime; u64 vruntime; u64 prev_sum_exec_runtime; u64 nr_migrations; struct sched_statistics statistics; int depth; struct sched_entity *parent; struct cfs_rq *cfs_rq; struct cfs_rq *my_q; struct sched_avg avg; } ;
1358 struct rt_rq ;
1358 struct sched_rt_entity { struct list_head run_list; unsigned long timeout; unsigned long watchdog_stamp; unsigned int time_slice; unsigned short on_rq; unsigned short on_list; struct sched_rt_entity *back; struct sched_rt_entity *parent; struct rt_rq *rt_rq; struct rt_rq *my_q; } ;
1376 struct sched_dl_entity { struct rb_node rb_node; u64 dl_runtime; u64 dl_deadline; u64 dl_period; u64 dl_bw; s64 runtime; u64 deadline; unsigned int flags; int dl_throttled; int dl_boosted; int dl_yielded; struct hrtimer dl_timer; } ;
1440 struct tlbflush_unmap_batch { struct cpumask cpumask; bool flush_required; bool writable; } ;
1459 struct sched_class ;
1459 struct files_struct ;
1459 struct compat_robust_list_head ;
1459 struct numa_group ;
1459 struct ftrace_ret_stack ;
1459 struct kcov ;
1459 struct task_struct { volatile long state; void *stack; atomic_t usage; unsigned int flags; unsigned int ptrace; struct llist_node wake_entry; int on_cpu; unsigned int wakee_flips; unsigned long wakee_flip_decay_ts; struct task_struct *last_wakee; int wake_cpu; int on_rq; int prio; int static_prio; int normal_prio; unsigned int rt_priority; const struct sched_class *sched_class; struct sched_entity se; struct sched_rt_entity rt; struct task_group *sched_task_group; struct sched_dl_entity dl; struct hlist_head preempt_notifiers; unsigned int btrace_seq; unsigned int policy; int nr_cpus_allowed; cpumask_t cpus_allowed; unsigned long rcu_tasks_nvcsw; bool rcu_tasks_holdout; struct list_head rcu_tasks_holdout_list; int rcu_tasks_idle_cpu; struct sched_info sched_info; struct list_head tasks; struct plist_node pushable_tasks; struct rb_node pushable_dl_tasks; struct mm_struct *mm; struct mm_struct *active_mm; u32 vmacache_seqnum; struct vm_area_struct *vmacache[4U]; struct task_rss_stat rss_stat; int exit_state; int exit_code; int exit_signal; int pdeath_signal; unsigned long jobctl; unsigned int personality; unsigned char sched_reset_on_fork; unsigned char sched_contributes_to_load; unsigned char sched_migrated; unsigned char sched_remote_wakeup; unsigned char; unsigned char in_execve; unsigned char in_iowait; unsigned char restore_sigmask; unsigned char memcg_may_oom; unsigned char memcg_kmem_skip_account; unsigned char brk_randomized; unsigned long atomic_flags; struct restart_block restart_block; pid_t pid; pid_t tgid; struct task_struct *real_parent; struct task_struct *parent; struct list_head children; struct list_head sibling; struct task_struct *group_leader; struct list_head ptraced; struct list_head ptrace_entry; struct pid_link pids[3U]; struct list_head thread_group; struct list_head thread_node; struct completion *vfork_done; int *set_child_tid; int *clear_child_tid; cputime_t utime; cputime_t stime; cputime_t utimescaled; cputime_t stimescaled; cputime_t gtime; struct prev_cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; u64 start_time; u64 real_start_time; unsigned long min_flt; unsigned long maj_flt; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; const struct cred *real_cred; const struct cred *cred; char comm[16U]; struct nameidata *nameidata; struct sysv_sem sysvsem; struct sysv_shm sysvshm; unsigned long last_switch_count; struct fs_struct *fs; struct files_struct *files; struct nsproxy *nsproxy; struct signal_struct *signal; struct sighand_struct *sighand; sigset_t blocked; sigset_t real_blocked; sigset_t saved_sigmask; struct sigpending pending; unsigned long sas_ss_sp; size_t sas_ss_size; unsigned int sas_ss_flags; struct callback_head *task_works; struct audit_context *audit_context; kuid_t loginuid; unsigned int sessionid; struct seccomp seccomp; u32 parent_exec_id; u32 self_exec_id; spinlock_t alloc_lock; raw_spinlock_t pi_lock; struct wake_q_node wake_q; struct rb_root pi_waiters; struct rb_node *pi_waiters_leftmost; struct rt_mutex_waiter *pi_blocked_on; struct mutex_waiter *blocked_on; unsigned int irq_events; unsigned long hardirq_enable_ip; unsigned long hardirq_disable_ip; unsigned int hardirq_enable_event; unsigned int hardirq_disable_event; int hardirqs_enabled; int hardirq_context; unsigned long softirq_disable_ip; unsigned long softirq_enable_ip; unsigned int softirq_disable_event; unsigned int softirq_enable_event; int softirqs_enabled; int softirq_context; u64 curr_chain_key; int lockdep_depth; unsigned int lockdep_recursion; struct held_lock held_locks[48U]; gfp_t lockdep_reclaim_gfp; unsigned int in_ubsan; void *journal_info; struct bio_list *bio_list; struct blk_plug *plug; struct reclaim_state *reclaim_state; struct backing_dev_info *backing_dev_info; struct io_context *io_context; unsigned long ptrace_message; siginfo_t *last_siginfo; struct task_io_accounting ioac; u64 acct_rss_mem1; u64 acct_vm_mem1; cputime_t acct_timexpd; nodemask_t mems_allowed; seqcount_t mems_allowed_seq; int cpuset_mem_spread_rotor; int cpuset_slab_spread_rotor; struct css_set *cgroups; struct list_head cg_list; struct robust_list_head *robust_list; struct compat_robust_list_head *compat_robust_list; struct list_head pi_state_list; struct futex_pi_state *pi_state_cache; struct perf_event_context *perf_event_ctxp[2U]; struct mutex perf_event_mutex; struct list_head perf_event_list; struct mempolicy *mempolicy; short il_next; short pref_node_fork; int numa_scan_seq; unsigned int numa_scan_period; unsigned int numa_scan_period_max; int numa_preferred_nid; unsigned long numa_migrate_retry; u64 node_stamp; u64 last_task_numa_placement; u64 last_sum_exec_runtime; struct callback_head numa_work; struct list_head numa_entry; struct numa_group *numa_group; unsigned long *numa_faults; unsigned long total_numa_faults; unsigned long numa_faults_locality[3U]; unsigned long numa_pages_migrated; struct tlbflush_unmap_batch tlb_ubc; struct callback_head rcu; struct pipe_inode_info *splice_pipe; struct page_frag task_frag; struct task_delay_info *delays; int make_it_fail; int nr_dirtied; int nr_dirtied_pause; unsigned long dirty_paused_when; int latency_record_count; struct latency_record latency_record[32U]; u64 timer_slack_ns; u64 default_timer_slack_ns; unsigned int kasan_depth; int curr_ret_stack; struct ftrace_ret_stack *ret_stack; unsigned long long ftrace_timestamp; atomic_t trace_overrun; atomic_t tracing_graph_pause; unsigned long trace; unsigned long trace_recursion; enum kcov_mode kcov_mode; unsigned int kcov_size; void *kcov_area; struct kcov *kcov; struct mem_cgroup *memcg_in_oom; gfp_t memcg_oom_gfp_mask; int memcg_oom_order; unsigned int memcg_nr_pages_over_high; struct uprobe_task *utask; unsigned int sequential_io; unsigned int sequential_io_avg; unsigned long task_state_change; int pagefault_disabled; struct task_struct *oom_reaper_list; struct thread_struct thread; } ;
22 struct klist_node ;
37 struct klist_node { void *n_klist; struct list_head n_node; struct kref n_ref; } ;
93 struct hlist_bl_node ;
93 struct hlist_bl_head { struct hlist_bl_node *first; } ;
36 struct hlist_bl_node { struct hlist_bl_node *next; struct hlist_bl_node **pprev; } ;
114 struct __anonstruct____missing_field_name_337 { spinlock_t lock; int count; } ;
114 union __anonunion____missing_field_name_336 { struct __anonstruct____missing_field_name_337 __annonCompField64; } ;
114 struct lockref { union __anonunion____missing_field_name_336 __annonCompField65; } ;
77 struct path ;
78 struct vfsmount ;
79 struct __anonstruct____missing_field_name_339 { u32 hash; u32 len; } ;
79 union __anonunion____missing_field_name_338 { struct __anonstruct____missing_field_name_339 __annonCompField66; u64 hash_len; } ;
79 struct qstr { union __anonunion____missing_field_name_338 __annonCompField67; const unsigned char *name; } ;
65 struct dentry_operations ;
65 union __anonunion____missing_field_name_340 { struct list_head d_lru; wait_queue_head_t *d_wait; } ;
65 union __anonunion_d_u_341 { struct hlist_node d_alias; struct hlist_bl_node d_in_lookup_hash; struct callback_head d_rcu; } ;
65 struct dentry { unsigned int d_flags; seqcount_t d_seq; struct hlist_bl_node d_hash; struct dentry *d_parent; struct qstr d_name; struct inode *d_inode; unsigned char d_iname[32U]; struct lockref d_lockref; const struct dentry_operations *d_op; struct super_block *d_sb; unsigned long d_time; void *d_fsdata; union __anonunion____missing_field_name_340 __annonCompField68; struct list_head d_child; struct list_head d_subdirs; union __anonunion_d_u_341 d_u; } ;
121 struct dentry_operations { int (*d_revalidate)(struct dentry *, unsigned int); int (*d_weak_revalidate)(struct dentry *, unsigned int); int (*d_hash)(const struct dentry *, struct qstr *); int (*d_compare)(const struct dentry *, unsigned int, const char *, const struct qstr *); int (*d_delete)(const struct dentry *); int (*d_init)(struct dentry *); void (*d_release)(struct dentry *); void (*d_prune)(struct dentry *); void (*d_iput)(struct dentry *, struct inode *); char * (*d_dname)(struct dentry *, char *, int); struct vfsmount * (*d_automount)(struct path *); int (*d_manage)(struct dentry *, bool ); struct dentry * (*d_real)(struct dentry *, const struct inode *, unsigned int); } ;
591 struct path { struct vfsmount *mnt; struct dentry *dentry; } ;
19 struct shrink_control { gfp_t gfp_mask; unsigned long nr_to_scan; int nid; struct mem_cgroup *memcg; } ;
27 struct shrinker { unsigned long int (*count_objects)(struct shrinker *, struct shrink_control *); unsigned long int (*scan_objects)(struct shrinker *, struct shrink_control *); int seeks; long batch; unsigned long flags; struct list_head list; atomic_long_t *nr_deferred; } ;
80 struct list_lru_one { struct list_head list; long nr_items; } ;
32 struct list_lru_memcg { struct list_lru_one *lru[0U]; } ;
37 struct list_lru_node { spinlock_t lock; struct list_lru_one lru; struct list_lru_memcg *memcg_lrus; } ;
47 struct list_lru { struct list_lru_node *node; struct list_head list; } ;
63 struct __anonstruct____missing_field_name_343 { struct radix_tree_node *parent; void *private_data; } ;
63 union __anonunion____missing_field_name_342 { struct __anonstruct____missing_field_name_343 __annonCompField69; struct callback_head callback_head; } ;
63 struct radix_tree_node { unsigned char shift; unsigned char offset; unsigned int count; union __anonunion____missing_field_name_342 __annonCompField70; struct list_head private_list; void *slots[64U]; unsigned long tags[3U][1U]; } ;
106 struct radix_tree_root { gfp_t gfp_mask; struct radix_tree_node *rnode; } ;
45 struct fiemap_extent { __u64 fe_logical; __u64 fe_physical; __u64 fe_length; __u64 fe_reserved64[2U]; __u32 fe_flags; __u32 fe_reserved[3U]; } ;
38 enum migrate_mode { MIGRATE_ASYNC = 0, MIGRATE_SYNC_LIGHT = 1, MIGRATE_SYNC = 2 } ;
44 struct bio_vec { struct page *bv_page; unsigned int bv_len; unsigned int bv_offset; } ;
87 struct block_device ;
266 struct delayed_call { void (*fn)(void *); void *arg; } ;
261 struct bdi_writeback ;
262 struct export_operations ;
264 struct iovec ;
265 struct kiocb ;
266 struct poll_table_struct ;
267 struct kstatfs ;
268 struct swap_info_struct ;
269 struct iov_iter ;
270 struct fscrypt_info ;
271 struct fscrypt_operations ;
76 struct iattr { unsigned int ia_valid; umode_t ia_mode; kuid_t ia_uid; kgid_t ia_gid; loff_t ia_size; struct timespec ia_atime; struct timespec ia_mtime; struct timespec ia_ctime; struct file *ia_file; } ;
213 struct dquot ;
214 struct kqid ;
19 typedef __kernel_uid32_t projid_t;
23 struct __anonstruct_kprojid_t_351 { projid_t val; } ;
23 typedef struct __anonstruct_kprojid_t_351 kprojid_t;
181 enum quota_type { USRQUOTA = 0, GRPQUOTA = 1, PRJQUOTA = 2 } ;
66 typedef long long qsize_t;
67 union __anonunion____missing_field_name_352 { kuid_t uid; kgid_t gid; kprojid_t projid; } ;
67 struct kqid { union __anonunion____missing_field_name_352 __annonCompField72; enum quota_type type; } ;
194 struct mem_dqblk { qsize_t dqb_bhardlimit; qsize_t dqb_bsoftlimit; qsize_t dqb_curspace; qsize_t dqb_rsvspace; qsize_t dqb_ihardlimit; qsize_t dqb_isoftlimit; qsize_t dqb_curinodes; time64_t dqb_btime; time64_t dqb_itime; } ;
216 struct quota_format_type ;
217 struct mem_dqinfo { struct quota_format_type *dqi_format; int dqi_fmt_id; struct list_head dqi_dirty_list; unsigned long dqi_flags; unsigned int dqi_bgrace; unsigned int dqi_igrace; qsize_t dqi_max_spc_limit; qsize_t dqi_max_ino_limit; void *dqi_priv; } ;
282 struct dquot { struct hlist_node dq_hash; struct list_head dq_inuse; struct list_head dq_free; struct list_head dq_dirty; struct mutex dq_lock; atomic_t dq_count; wait_queue_head_t dq_wait_unused; struct super_block *dq_sb; struct kqid dq_id; loff_t dq_off; unsigned long dq_flags; struct mem_dqblk dq_dqb; } ;
309 struct quota_format_ops { int (*check_quota_file)(struct super_block *, int); int (*read_file_info)(struct super_block *, int); int (*write_file_info)(struct super_block *, int); int (*free_file_info)(struct super_block *, int); int (*read_dqblk)(struct dquot *); int (*commit_dqblk)(struct dquot *); int (*release_dqblk)(struct dquot *); int (*get_next_id)(struct super_block *, struct kqid *); } ;
321 struct dquot_operations { int (*write_dquot)(struct dquot *); struct dquot * (*alloc_dquot)(struct super_block *, int); void (*destroy_dquot)(struct dquot *); int (*acquire_dquot)(struct dquot *); int (*release_dquot)(struct dquot *); int (*mark_dirty)(struct dquot *); int (*write_info)(struct super_block *, int); qsize_t * (*get_reserved_space)(struct inode *); int (*get_projid)(struct inode *, kprojid_t *); int (*get_next_id)(struct super_block *, struct kqid *); } ;
338 struct qc_dqblk { int d_fieldmask; u64 d_spc_hardlimit; u64 d_spc_softlimit; u64 d_ino_hardlimit; u64 d_ino_softlimit; u64 d_space; u64 d_ino_count; s64 d_ino_timer; s64 d_spc_timer; int d_ino_warns; int d_spc_warns; u64 d_rt_spc_hardlimit; u64 d_rt_spc_softlimit; u64 d_rt_space; s64 d_rt_spc_timer; int d_rt_spc_warns; } ;
361 struct qc_type_state { unsigned int flags; unsigned int spc_timelimit; unsigned int ino_timelimit; unsigned int rt_spc_timelimit; unsigned int spc_warnlimit; unsigned int ino_warnlimit; unsigned int rt_spc_warnlimit; unsigned long long ino; blkcnt_t blocks; blkcnt_t nextents; } ;
407 struct qc_state { unsigned int s_incoredqs; struct qc_type_state s_state[3U]; } ;
418 struct qc_info { int i_fieldmask; unsigned int i_flags; unsigned int i_spc_timelimit; unsigned int i_ino_timelimit; unsigned int i_rt_spc_timelimit; unsigned int i_spc_warnlimit; unsigned int i_ino_warnlimit; unsigned int i_rt_spc_warnlimit; } ;
431 struct quotactl_ops { int (*quota_on)(struct super_block *, int, int, struct path *); int (*quota_off)(struct super_block *, int); int (*quota_enable)(struct super_block *, unsigned int); int (*quota_disable)(struct super_block *, unsigned int); int (*quota_sync)(struct super_block *, int); int (*set_info)(struct super_block *, int, struct qc_info *); int (*get_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *); int (*get_nextdqblk)(struct super_block *, struct kqid *, struct qc_dqblk *); int (*set_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *); int (*get_state)(struct super_block *, struct qc_state *); int (*rm_xquota)(struct super_block *, unsigned int); } ;
447 struct quota_format_type { int qf_fmt_id; const struct quota_format_ops *qf_ops; struct module *qf_owner; struct quota_format_type *qf_next; } ;
511 struct quota_info { unsigned int flags; struct mutex dqio_mutex; struct mutex dqonoff_mutex; struct inode *files[3U]; struct mem_dqinfo info[3U]; const struct quota_format_ops *ops[3U]; } ;
541 struct writeback_control ;
542 struct kiocb { struct file *ki_filp; loff_t ki_pos; void (*ki_complete)(struct kiocb *, long, long); void *private; int ki_flags; } ;
367 struct address_space_operations { int (*writepage)(struct page *, struct writeback_control *); int (*readpage)(struct file *, struct page *); int (*writepages)(struct address_space *, struct writeback_control *); int (*set_page_dirty)(struct page *); int (*readpages)(struct file *, struct address_space *, struct list_head *, unsigned int); int (*write_begin)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page **, void **); int (*write_end)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page *, void *); sector_t (*bmap)(struct address_space *, sector_t ); void (*invalidatepage)(struct page *, unsigned int, unsigned int); int (*releasepage)(struct page *, gfp_t ); void (*freepage)(struct page *); ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *); int (*migratepage)(struct address_space *, struct page *, struct page *, enum migrate_mode ); bool (*isolate_page)(struct page *, isolate_mode_t ); void (*putback_page)(struct page *); int (*launder_page)(struct page *); int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long); void (*is_dirty_writeback)(struct page *, bool *, bool *); int (*error_remove_page)(struct address_space *, struct page *); int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *); void (*swap_deactivate)(struct file *); } ;
426 struct address_space { struct inode *host; struct radix_tree_root page_tree; spinlock_t tree_lock; atomic_t i_mmap_writable; struct rb_root i_mmap; struct rw_semaphore i_mmap_rwsem; unsigned long nrpages; unsigned long nrexceptional; unsigned long writeback_index; const struct address_space_operations *a_ops; unsigned long flags; spinlock_t private_lock; struct list_head private_list; void *private_data; } ;
447 struct request_queue ;
448 struct hd_struct ;
448 struct gendisk ;
448 struct block_device { dev_t bd_dev; int bd_openers; struct inode *bd_inode; struct super_block *bd_super; struct mutex bd_mutex; void *bd_claiming; void *bd_holder; int bd_holders; bool bd_write_holder; struct list_head bd_holder_disks; struct block_device *bd_contains; unsigned int bd_block_size; struct hd_struct *bd_part; unsigned int bd_part_count; int bd_invalidated; struct gendisk *bd_disk; struct request_queue *bd_queue; struct list_head bd_list; unsigned long bd_private; int bd_fsfreeze_count; struct mutex bd_fsfreeze_mutex; } ;
563 struct posix_acl ;
589 struct inode_operations ;
589 union __anonunion____missing_field_name_357 { const unsigned int i_nlink; unsigned int __i_nlink; } ;
589 union __anonunion____missing_field_name_358 { struct hlist_head i_dentry; struct callback_head i_rcu; } ;
589 struct file_lock_context ;
589 struct cdev ;
589 union __anonunion____missing_field_name_359 { struct pipe_inode_info *i_pipe; struct block_device *i_bdev; struct cdev *i_cdev; char *i_link; unsigned int i_dir_seq; } ;
589 struct inode { umode_t i_mode; unsigned short i_opflags; kuid_t i_uid; kgid_t i_gid; unsigned int i_flags; struct posix_acl *i_acl; struct posix_acl *i_default_acl; const struct inode_operations *i_op; struct super_block *i_sb; struct address_space *i_mapping; void *i_security; unsigned long i_ino; union __anonunion____missing_field_name_357 __annonCompField73; dev_t i_rdev; loff_t i_size; struct timespec i_atime; struct timespec i_mtime; struct timespec i_ctime; spinlock_t i_lock; unsigned short i_bytes; unsigned int i_blkbits; blkcnt_t i_blocks; unsigned long i_state; struct rw_semaphore i_rwsem; unsigned long dirtied_when; unsigned long dirtied_time_when; struct hlist_node i_hash; struct list_head i_io_list; struct bdi_writeback *i_wb; int i_wb_frn_winner; u16 i_wb_frn_avg_time; u16 i_wb_frn_history; struct list_head i_lru; struct list_head i_sb_list; struct list_head i_wb_list; union __anonunion____missing_field_name_358 __annonCompField74; u64 i_version; atomic_t i_count; atomic_t i_dio_count; atomic_t i_writecount; atomic_t i_readcount; const struct file_operations *i_fop; struct file_lock_context *i_flctx; struct address_space i_data; struct list_head i_devices; union __anonunion____missing_field_name_359 __annonCompField75; __u32 i_generation; __u32 i_fsnotify_mask; struct hlist_head i_fsnotify_marks; struct fscrypt_info *i_crypt_info; void *i_private; } ;
843 struct fown_struct { rwlock_t lock; struct pid *pid; enum pid_type pid_type; kuid_t uid; kuid_t euid; int signum; } ;
851 struct file_ra_state { unsigned long start; unsigned int size; unsigned int async_size; unsigned int ra_pages; unsigned int mmap_miss; loff_t prev_pos; } ;
874 union __anonunion_f_u_360 { struct llist_node fu_llist; struct callback_head fu_rcuhead; } ;
874 struct file { union __anonunion_f_u_360 f_u; struct path f_path; struct inode *f_inode; const struct file_operations *f_op; spinlock_t f_lock; atomic_long_t f_count; unsigned int f_flags; fmode_t f_mode; struct mutex f_pos_lock; loff_t f_pos; struct fown_struct f_owner; const struct cred *f_cred; struct file_ra_state f_ra; u64 f_version; void *f_security; void *private_data; struct list_head f_ep_links; struct list_head f_tfile_llink; struct address_space *f_mapping; } ;
959 typedef void *fl_owner_t;
960 struct file_lock ;
961 struct file_lock_operations { void (*fl_copy_lock)(struct file_lock *, struct file_lock *); void (*fl_release_private)(struct file_lock *); } ;
967 struct lock_manager_operations { int (*lm_compare_owner)(struct file_lock *, struct file_lock *); unsigned long int (*lm_owner_key)(struct file_lock *); fl_owner_t (*lm_get_owner)(fl_owner_t ); void (*lm_put_owner)(fl_owner_t ); void (*lm_notify)(struct file_lock *); int (*lm_grant)(struct file_lock *, int); bool (*lm_break)(struct file_lock *); int (*lm_change)(struct file_lock *, int, struct list_head *); void (*lm_setup)(struct file_lock *, void **); } ;
994 struct nlm_lockowner ;
995 struct nfs_lock_info { u32 state; struct nlm_lockowner *owner; struct list_head list; } ;
14 struct nfs4_lock_state ;
15 struct nfs4_lock_info { struct nfs4_lock_state *owner; } ;
19 struct fasync_struct ;
19 struct __anonstruct_afs_362 { struct list_head link; int state; } ;
19 union __anonunion_fl_u_361 { struct nfs_lock_info nfs_fl; struct nfs4_lock_info nfs4_fl; struct __anonstruct_afs_362 afs; } ;
19 struct file_lock { struct file_lock *fl_next; struct list_head fl_list; struct hlist_node fl_link; struct list_head fl_block; fl_owner_t fl_owner; unsigned int fl_flags; unsigned char fl_type; unsigned int fl_pid; int fl_link_cpu; struct pid *fl_nspid; wait_queue_head_t fl_wait; struct file *fl_file; loff_t fl_start; loff_t fl_end; struct fasync_struct *fl_fasync; unsigned long fl_break_time; unsigned long fl_downgrade_time; const struct file_lock_operations *fl_ops; const struct lock_manager_operations *fl_lmops; union __anonunion_fl_u_361 fl_u; } ;
1047 struct file_lock_context { spinlock_t flc_lock; struct list_head flc_flock; struct list_head flc_posix; struct list_head flc_lease; } ;
1255 struct fasync_struct { spinlock_t fa_lock; int magic; int fa_fd; struct fasync_struct *fa_next; struct file *fa_file; struct callback_head fa_rcu; } ;
1290 struct sb_writers { int frozen; wait_queue_head_t wait_unfrozen; struct percpu_rw_semaphore rw_sem[3U]; } ;
1320 struct super_operations ;
1320 struct xattr_handler ;
1320 struct mtd_info ;
1320 struct super_block { struct list_head s_list; dev_t s_dev; unsigned char s_blocksize_bits; unsigned long s_blocksize; loff_t s_maxbytes; struct file_system_type *s_type; const struct super_operations *s_op; const struct dquot_operations *dq_op; const struct quotactl_ops *s_qcop; const struct export_operations *s_export_op; unsigned long s_flags; unsigned long s_iflags; unsigned long s_magic; struct dentry *s_root; struct rw_semaphore s_umount; int s_count; atomic_t s_active; void *s_security; const struct xattr_handler **s_xattr; const struct fscrypt_operations *s_cop; struct hlist_bl_head s_anon; struct list_head s_mounts; struct block_device *s_bdev; struct backing_dev_info *s_bdi; struct mtd_info *s_mtd; struct hlist_node s_instances; unsigned int s_quota_types; struct quota_info s_dquot; struct sb_writers s_writers; char s_id[32U]; u8 s_uuid[16U]; void *s_fs_info; unsigned int s_max_links; fmode_t s_mode; u32 s_time_gran; struct mutex s_vfs_rename_mutex; char *s_subtype; char *s_options; const struct dentry_operations *s_d_op; int cleancache_poolid; struct shrinker s_shrink; atomic_long_t s_remove_count; int s_readonly_remount; struct workqueue_struct *s_dio_done_wq; struct hlist_head s_pins; struct user_namespace *s_user_ns; struct list_lru s_dentry_lru; struct list_lru s_inode_lru; struct callback_head rcu; struct work_struct destroy_work; struct mutex s_sync_lock; int s_stack_depth; spinlock_t s_inode_list_lock; struct list_head s_inodes; spinlock_t s_inode_wblist_lock; struct list_head s_inodes_wb; } ;
1603 struct fiemap_extent_info { unsigned int fi_flags; unsigned int fi_extents_mapped; unsigned int fi_extents_max; struct fiemap_extent *fi_extents_start; } ;
1616 struct dir_context ;
1641 struct dir_context { int (*actor)(struct dir_context *, const char *, int, loff_t , u64 , unsigned int); loff_t pos; } ;
1648 struct file_operations { struct module *owner; loff_t (*llseek)(struct file *, loff_t , int); ssize_t (*read)(struct file *, char *, size_t , loff_t *); ssize_t (*write)(struct file *, const char *, size_t , loff_t *); ssize_t (*read_iter)(struct kiocb *, struct iov_iter *); ssize_t (*write_iter)(struct kiocb *, struct iov_iter *); int (*iterate)(struct file *, struct dir_context *); int (*iterate_shared)(struct file *, struct dir_context *); unsigned int (*poll)(struct file *, struct poll_table_struct *); long int (*unlocked_ioctl)(struct file *, unsigned int, unsigned long); long int (*compat_ioctl)(struct file *, unsigned int, unsigned long); int (*mmap)(struct file *, struct vm_area_struct *); int (*open)(struct inode *, struct file *); int (*flush)(struct file *, fl_owner_t ); int (*release)(struct inode *, struct file *); int (*fsync)(struct file *, loff_t , loff_t , int); int (*aio_fsync)(struct kiocb *, int); int (*fasync)(int, struct file *, int); int (*lock)(struct file *, int, struct file_lock *); ssize_t (*sendpage)(struct file *, struct page *, int, size_t , loff_t *, int); unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); int (*check_flags)(int); int (*flock)(struct file *, int, struct file_lock *); ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t , unsigned int); ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t , unsigned int); int (*setlease)(struct file *, long, struct file_lock **, void **); long int (*fallocate)(struct file *, int, loff_t , loff_t ); void (*show_fdinfo)(struct seq_file *, struct file *); ssize_t (*copy_file_range)(struct file *, loff_t , struct file *, loff_t , size_t , unsigned int); int (*clone_file_range)(struct file *, loff_t , struct file *, loff_t , u64 ); ssize_t (*dedupe_file_range)(struct file *, u64 , u64 , struct file *, u64 ); } ;
1717 struct inode_operations { struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int); const char * (*get_link)(struct dentry *, struct inode *, struct delayed_call *); int (*permission)(struct inode *, int); struct posix_acl * (*get_acl)(struct inode *, int); int (*readlink)(struct dentry *, char *, int); int (*create)(struct inode *, struct dentry *, umode_t , bool ); int (*link)(struct dentry *, struct inode *, struct dentry *); int (*unlink)(struct inode *, struct dentry *); int (*symlink)(struct inode *, struct dentry *, const char *); int (*mkdir)(struct inode *, struct dentry *, umode_t ); int (*rmdir)(struct inode *, struct dentry *); int (*mknod)(struct inode *, struct dentry *, umode_t , dev_t ); int (*rename)(struct inode *, struct dentry *, struct inode *, struct dentry *); int (*rename2)(struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); int (*setattr)(struct dentry *, struct iattr *); int (*getattr)(struct vfsmount *, struct dentry *, struct kstat *); int (*setxattr)(struct dentry *, struct inode *, const char *, const void *, size_t , int); ssize_t (*getxattr)(struct dentry *, struct inode *, const char *, void *, size_t ); ssize_t (*listxattr)(struct dentry *, char *, size_t ); int (*removexattr)(struct dentry *, const char *); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 , u64 ); int (*update_time)(struct inode *, struct timespec *, int); int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t , int *); int (*tmpfile)(struct inode *, struct dentry *, umode_t ); int (*set_acl)(struct inode *, struct posix_acl *, int); } ;
1774 struct super_operations { struct inode * (*alloc_inode)(struct super_block *); void (*destroy_inode)(struct inode *); void (*dirty_inode)(struct inode *, int); int (*write_inode)(struct inode *, struct writeback_control *); int (*drop_inode)(struct inode *); void (*evict_inode)(struct inode *); void (*put_super)(struct super_block *); int (*sync_fs)(struct super_block *, int); int (*freeze_super)(struct super_block *); int (*freeze_fs)(struct super_block *); int (*thaw_super)(struct super_block *); int (*unfreeze_fs)(struct super_block *); int (*statfs)(struct dentry *, struct kstatfs *); int (*remount_fs)(struct super_block *, int *, char *); void (*umount_begin)(struct super_block *); int (*show_options)(struct seq_file *, struct dentry *); int (*show_devname)(struct seq_file *, struct dentry *); int (*show_path)(struct seq_file *, struct dentry *); int (*show_stats)(struct seq_file *, struct dentry *); ssize_t (*quota_read)(struct super_block *, int, char *, size_t , loff_t ); ssize_t (*quota_write)(struct super_block *, int, const char *, size_t , loff_t ); struct dquot ** (*get_dquots)(struct inode *); int (*bdev_try_to_free_page)(struct super_block *, struct page *, gfp_t ); long int (*nr_cached_objects)(struct super_block *, struct shrink_control *); long int (*free_cached_objects)(struct super_block *, struct shrink_control *); } ;
2018 struct file_system_type { const char *name; int fs_flags; struct dentry * (*mount)(struct file_system_type *, int, const char *, void *); void (*kill_sb)(struct super_block *); struct module *owner; struct file_system_type *next; struct hlist_head fs_supers; struct lock_class_key s_lock_key; struct lock_class_key s_umount_key; struct lock_class_key s_vfs_rename_key; struct lock_class_key s_writers_key[3U]; struct lock_class_key i_lock_key; struct lock_class_key i_mutex_key; struct lock_class_key i_mutex_dir_key; } ;
3193 struct seq_file { char *buf; size_t size; size_t from; size_t count; size_t pad_until; loff_t index; loff_t read_pos; u64 version; struct mutex lock; const struct seq_operations *op; int poll_event; const struct file *file; void *private; } ;
30 struct seq_operations { void * (*start)(struct seq_file *, loff_t *); void (*stop)(struct seq_file *, void *); void * (*next)(struct seq_file *, void *, loff_t *); int (*show)(struct seq_file *, void *); } ;
222 struct pinctrl ;
223 struct pinctrl_state ;
194 struct dev_pin_info { struct pinctrl *p; struct pinctrl_state *default_state; struct pinctrl_state *init_state; struct pinctrl_state *sleep_state; struct pinctrl_state *idle_state; } ;
76 struct dma_map_ops ;
76 struct dev_archdata { struct dma_map_ops *dma_ops; void *iommu; } ;
24 struct device_private ;
25 struct device_driver ;
26 struct driver_private ;
27 struct class ;
28 struct subsys_private ;
29 struct bus_type ;
30 struct device_node ;
31 struct fwnode_handle ;
32 struct iommu_ops ;
33 struct iommu_group ;
61 struct device_attribute ;
61 struct bus_type { const char *name; const char *dev_name; struct device *dev_root; struct device_attribute *dev_attrs; const struct attribute_group **bus_groups; const struct attribute_group **dev_groups; const struct attribute_group **drv_groups; int (*match)(struct device *, struct device_driver *); int (*uevent)(struct device *, struct kobj_uevent_env *); int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*online)(struct device *); int (*offline)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct dev_pm_ops *pm; const struct iommu_ops *iommu_ops; struct subsys_private *p; struct lock_class_key lock_key; } ;
142 struct device_type ;
201 enum probe_type { PROBE_DEFAULT_STRATEGY = 0, PROBE_PREFER_ASYNCHRONOUS = 1, PROBE_FORCE_SYNCHRONOUS = 2 } ;
207 struct of_device_id ;
207 struct acpi_device_id ;
207 struct device_driver { const char *name; struct bus_type *bus; struct module *owner; const char *mod_name; bool suppress_bind_attrs; enum probe_type probe_type; const struct of_device_id *of_match_table; const struct acpi_device_id *acpi_match_table; int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct attribute_group **groups; const struct dev_pm_ops *pm; struct driver_private *p; } ;
327 struct subsys_interface { const char *name; struct bus_type *subsys; struct list_head node; int (*add_dev)(struct device *, struct subsys_interface *); void (*remove_dev)(struct device *, struct subsys_interface *); } ;
357 struct class_attribute ;
357 struct class { const char *name; struct module *owner; struct class_attribute *class_attrs; const struct attribute_group **dev_groups; struct kobject *dev_kobj; int (*dev_uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *); void (*class_release)(struct class *); void (*dev_release)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct kobj_ns_type_operations *ns_type; const void * (*namespace)(struct device *); const struct dev_pm_ops *pm; struct subsys_private *p; } ;
450 struct class_attribute { struct attribute attr; ssize_t (*show)(struct class *, struct class_attribute *, char *); ssize_t (*store)(struct class *, struct class_attribute *, const char *, size_t ); } ;
501 struct class_interface { struct list_head node; struct class *class; int (*add_dev)(struct device *, struct class_interface *); void (*remove_dev)(struct device *, struct class_interface *); } ;
518 struct device_type { const char *name; const struct attribute_group **groups; int (*uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *, kuid_t *, kgid_t *); void (*release)(struct device *); const struct dev_pm_ops *pm; } ;
546 struct device_attribute { struct attribute attr; ssize_t (*show)(struct device *, struct device_attribute *, char *); ssize_t (*store)(struct device *, struct device_attribute *, const char *, size_t ); } ;
699 struct device_dma_parameters { unsigned int max_segment_size; unsigned long segment_boundary_mask; } ;
708 struct irq_domain ;
708 struct dma_coherent_mem ;
708 struct cma ;
708 struct device { struct device *parent; struct device_private *p; struct kobject kobj; const char *init_name; const struct device_type *type; struct mutex mutex; struct bus_type *bus; struct device_driver *driver; void *platform_data; void *driver_data; struct dev_pm_info power; struct dev_pm_domain *pm_domain; struct irq_domain *msi_domain; struct dev_pin_info *pins; struct list_head msi_list; int numa_node; u64 *dma_mask; u64 coherent_dma_mask; unsigned long dma_pfn_offset; struct device_dma_parameters *dma_parms; struct list_head dma_pools; struct dma_coherent_mem *dma_mem; struct cma *cma_area; struct dev_archdata archdata; struct device_node *of_node; struct fwnode_handle *fwnode; dev_t devt; u32 id; spinlock_t devres_lock; struct list_head devres_head; struct klist_node knode_class; struct class *class; const struct attribute_group **groups; void (*release)(struct device *); struct iommu_group *iommu_group; bool offline_disabled; bool offline; } ;
862 struct wakeup_source { const char *name; struct list_head entry; spinlock_t lock; struct wake_irq *wakeirq; struct timer_list timer; unsigned long timer_expires; ktime_t total_time; ktime_t max_time; ktime_t last_time; ktime_t start_prevent_time; ktime_t prevent_sleep_time; unsigned long event_count; unsigned long active_count; unsigned long relax_count; unsigned long expire_count; unsigned long wakeup_count; bool active; bool autosleep_enabled; } ;
1327 struct scatterlist ;
89 enum dma_data_direction { DMA_BIDIRECTIONAL = 0, DMA_TO_DEVICE = 1, DMA_FROM_DEVICE = 2, DMA_NONE = 3 } ;
273 struct vm_fault { unsigned int flags; gfp_t gfp_mask; unsigned long pgoff; void *virtual_address; struct page *cow_page; struct page *page; void *entry; } ;
308 struct fault_env { struct vm_area_struct *vma; unsigned long address; unsigned int flags; pmd_t *pmd; pte_t *pte; spinlock_t *ptl; pgtable_t prealloc_pte; } ;
335 struct vm_operations_struct { void (*open)(struct vm_area_struct *); void (*close)(struct vm_area_struct *); int (*mremap)(struct vm_area_struct *); int (*fault)(struct vm_area_struct *, struct vm_fault *); int (*pmd_fault)(struct vm_area_struct *, unsigned long, pmd_t *, unsigned int); void (*map_pages)(struct fault_env *, unsigned long, unsigned long); int (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *); int (*pfn_mkwrite)(struct vm_area_struct *, struct vm_fault *); int (*access)(struct vm_area_struct *, unsigned long, void *, int, int); const char * (*name)(struct vm_area_struct *); int (*set_policy)(struct vm_area_struct *, struct mempolicy *); struct mempolicy * (*get_policy)(struct vm_area_struct *, unsigned long); struct page * (*find_special_page)(struct vm_area_struct *, unsigned long); } ;
1348 struct kvec ;
2451 struct scatterlist { unsigned long sg_magic; unsigned long page_link; unsigned int offset; unsigned int length; dma_addr_t dma_address; unsigned int dma_length; } ;
21 struct sg_table { struct scatterlist *sgl; unsigned int nents; unsigned int orig_nents; } ;
158 struct dma_map_ops { void * (*alloc)(struct device *, size_t , dma_addr_t *, gfp_t , unsigned long); void (*free)(struct device *, size_t , void *, dma_addr_t , unsigned long); int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t , size_t , unsigned long); int (*get_sgtable)(struct device *, struct sg_table *, void *, dma_addr_t , size_t , unsigned long); dma_addr_t (*map_page)(struct device *, struct page *, unsigned long, size_t , enum dma_data_direction , unsigned long); void (*unmap_page)(struct device *, dma_addr_t , size_t , enum dma_data_direction , unsigned long); int (*map_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , unsigned long); void (*unmap_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , unsigned long); void (*sync_single_for_cpu)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_single_for_device)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_sg_for_cpu)(struct device *, struct scatterlist *, int, enum dma_data_direction ); void (*sync_sg_for_device)(struct device *, struct scatterlist *, int, enum dma_data_direction ); int (*mapping_error)(struct device *, dma_addr_t ); int (*dma_supported)(struct device *, u64 ); int (*set_dma_mask)(struct device *, u64 ); int is_phys; } ;
13 typedef unsigned long kernel_ulong_t;
186 struct acpi_device_id { __u8 id[9U]; kernel_ulong_t driver_data; __u32 cls; __u32 cls_msk; } ;
229 struct of_device_id { char name[32U]; char type[32U]; char compatible[128U]; const void *data; } ;
629 struct rio_device_id { __u16 did; __u16 vid; __u16 asm_did; __u16 asm_vid; } ;
674 struct iovec { void *iov_base; __kernel_size_t iov_len; } ;
21 struct kvec { void *iov_base; size_t iov_len; } ;
27 union __anonunion____missing_field_name_376 { const struct iovec *iov; const struct kvec *kvec; const struct bio_vec *bvec; } ;
27 struct iov_iter { int type; size_t iov_offset; size_t count; union __anonunion____missing_field_name_376 __annonCompField76; unsigned long nr_segs; } ;
34 typedef s32 dma_cookie_t;
41 enum dma_status { DMA_COMPLETE = 0, DMA_IN_PROGRESS = 1, DMA_PAUSED = 2, DMA_ERROR = 3 } ;
66 enum dma_transfer_direction { DMA_MEM_TO_MEM = 0, DMA_MEM_TO_DEV = 1, DMA_DEV_TO_MEM = 2, DMA_DEV_TO_DEV = 3, DMA_TRANS_NONE = 4 } ;
74 struct data_chunk { size_t size; size_t icg; size_t dst_icg; size_t src_icg; } ;
140 struct dma_interleaved_template { dma_addr_t src_start; dma_addr_t dst_start; enum dma_transfer_direction dir; bool src_inc; bool dst_inc; bool src_sgl; bool dst_sgl; size_t numf; size_t frame_size; struct data_chunk sgl[0U]; } ;
171 enum dma_ctrl_flags { DMA_PREP_INTERRUPT = 1, DMA_CTRL_ACK = 2, DMA_PREP_PQ_DISABLE_P = 4, DMA_PREP_PQ_DISABLE_Q = 8, DMA_PREP_CONTINUE = 16, DMA_PREP_FENCE = 32, DMA_CTRL_REUSE = 64 } ;
186 enum sum_check_flags { SUM_CHECK_P_RESULT = 1, SUM_CHECK_Q_RESULT = 2 } ;
223 struct __anonstruct_dma_cap_mask_t_377 { unsigned long bits[1U]; } ;
223 typedef struct __anonstruct_dma_cap_mask_t_377 dma_cap_mask_t;
225 struct dma_chan_percpu { unsigned long memcpy_count; unsigned long bytes_transferred; } ;
236 struct dma_router { struct device *dev; void (*route_free)(struct device *, void *); } ;
246 struct dma_device ;
246 struct dma_chan_dev ;
246 struct dma_chan { struct dma_device *device; dma_cookie_t cookie; dma_cookie_t completed_cookie; int chan_id; struct dma_chan_dev *dev; struct list_head device_node; struct dma_chan_percpu *local; int client_count; int table_count; struct dma_router *router; void *route_data; void *private; } ;
282 struct dma_chan_dev { struct dma_chan *chan; struct device device; int dev_id; atomic_t *idr_ref; } ;
296 enum dma_slave_buswidth { DMA_SLAVE_BUSWIDTH_UNDEFINED = 0, DMA_SLAVE_BUSWIDTH_1_BYTE = 1, DMA_SLAVE_BUSWIDTH_2_BYTES = 2, DMA_SLAVE_BUSWIDTH_3_BYTES = 3, DMA_SLAVE_BUSWIDTH_4_BYTES = 4, DMA_SLAVE_BUSWIDTH_8_BYTES = 8, DMA_SLAVE_BUSWIDTH_16_BYTES = 16, DMA_SLAVE_BUSWIDTH_32_BYTES = 32, DMA_SLAVE_BUSWIDTH_64_BYTES = 64 } ;
308 struct dma_slave_config { enum dma_transfer_direction direction; phys_addr_t src_addr; phys_addr_t dst_addr; enum dma_slave_buswidth src_addr_width; enum dma_slave_buswidth dst_addr_width; u32 src_maxburst; u32 dst_maxburst; bool device_fc; unsigned int slave_id; } ;
369 enum dma_residue_granularity { DMA_RESIDUE_GRANULARITY_DESCRIPTOR = 0, DMA_RESIDUE_GRANULARITY_SEGMENT = 1, DMA_RESIDUE_GRANULARITY_BURST = 2 } ;
443 struct dmaengine_unmap_data { u8 map_cnt; u8 to_cnt; u8 from_cnt; u8 bidi_cnt; struct device *dev; struct kref kref; size_t len; dma_addr_t addr[0U]; } ;
454 struct dma_async_tx_descriptor { dma_cookie_t cookie; enum dma_ctrl_flags flags; dma_addr_t phys; struct dma_chan *chan; dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *); int (*desc_free)(struct dma_async_tx_descriptor *); void (*callback)(void *); void *callback_param; struct dmaengine_unmap_data *unmap; struct dma_async_tx_descriptor *next; struct dma_async_tx_descriptor *parent; spinlock_t lock; } ;
580 struct dma_tx_state { dma_cookie_t last; dma_cookie_t used; u32 residue; } ;
596 enum dmaengine_alignment { DMAENGINE_ALIGN_1_BYTE = 0, DMAENGINE_ALIGN_2_BYTES = 1, DMAENGINE_ALIGN_4_BYTES = 2, DMAENGINE_ALIGN_8_BYTES = 3, DMAENGINE_ALIGN_16_BYTES = 4, DMAENGINE_ALIGN_32_BYTES = 5, DMAENGINE_ALIGN_64_BYTES = 6 } ;
606 struct dma_slave_map { const char *devname; const char *slave; void *param; } ;
623 struct dma_filter { bool (*fn)(struct dma_chan *, void *); int mapcnt; const struct dma_slave_map *map; } ;
636 struct dma_device { unsigned int chancnt; unsigned int privatecnt; struct list_head channels; struct list_head global_node; struct dma_filter filter; dma_cap_mask_t cap_mask; unsigned short max_xor; unsigned short max_pq; enum dmaengine_alignment copy_align; enum dmaengine_alignment xor_align; enum dmaengine_alignment pq_align; enum dmaengine_alignment fill_align; int dev_id; struct device *dev; u32 src_addr_widths; u32 dst_addr_widths; u32 directions; u32 max_burst; bool descriptor_reuse; enum dma_residue_granularity residue_granularity; int (*device_alloc_chan_resources)(struct dma_chan *); void (*device_free_chan_resources)(struct dma_chan *); struct dma_async_tx_descriptor * (*device_prep_dma_memcpy)(struct dma_chan *, dma_addr_t , dma_addr_t , size_t , unsigned long); struct dma_async_tx_descriptor * (*device_prep_dma_xor)(struct dma_chan *, dma_addr_t , dma_addr_t *, unsigned int, size_t , unsigned long); struct dma_async_tx_descriptor * (*device_prep_dma_xor_val)(struct dma_chan *, dma_addr_t *, unsigned int, size_t , enum sum_check_flags *, unsigned long); struct dma_async_tx_descriptor * (*device_prep_dma_pq)(struct dma_chan *, dma_addr_t *, dma_addr_t *, unsigned int, const unsigned char *, size_t , unsigned long); struct dma_async_tx_descriptor * (*device_prep_dma_pq_val)(struct dma_chan *, dma_addr_t *, dma_addr_t *, unsigned int, const unsigned char *, size_t , enum sum_check_flags *, unsigned long); struct dma_async_tx_descriptor * (*device_prep_dma_memset)(struct dma_chan *, dma_addr_t , int, size_t , unsigned long); struct dma_async_tx_descriptor * (*device_prep_dma_memset_sg)(struct dma_chan *, struct scatterlist *, unsigned int, int, unsigned long); struct dma_async_tx_descriptor * (*device_prep_dma_interrupt)(struct dma_chan *, unsigned long); struct dma_async_tx_descriptor * (*device_prep_dma_sg)(struct dma_chan *, struct scatterlist *, unsigned int, struct scatterlist *, unsigned int, unsigned long); struct dma_async_tx_descriptor * (*device_prep_slave_sg)(struct dma_chan *, struct scatterlist *, unsigned int, enum dma_transfer_direction , unsigned long, void *); struct dma_async_tx_descriptor * (*device_prep_dma_cyclic)(struct dma_chan *, dma_addr_t , size_t , size_t , enum dma_transfer_direction , unsigned long); struct dma_async_tx_descriptor * (*device_prep_interleaved_dma)(struct dma_chan *, struct dma_interleaved_template *, unsigned long); struct dma_async_tx_descriptor * (*device_prep_dma_imm_data)(struct dma_chan *, dma_addr_t , u64 , unsigned long); int (*device_config)(struct dma_chan *, struct dma_slave_config *); int (*device_pause)(struct dma_chan *); int (*device_resume)(struct dma_chan *); int (*device_terminate_all)(struct dma_chan *); void (*device_synchronize)(struct dma_chan *); enum dma_status (*device_tx_status)(struct dma_chan *, dma_cookie_t , struct dma_tx_state *); void (*device_issue_pending)(struct dma_chan *); } ;
87 struct rio_mport ;
88 struct rio_dev ;
89 union rio_pw_msg ;
90 struct rio_switch_ops ;
90 struct rio_switch { struct list_head node; u8 *route_table; u32 port_ok; struct rio_switch_ops *ops; spinlock_t lock; struct rio_dev *nextdev[0U]; } ;
109 struct rio_switch_ops { struct module *owner; int (*add_entry)(struct rio_mport *, u16 , u8 , u16 , u16 , u8 ); int (*get_entry)(struct rio_mport *, u16 , u8 , u16 , u16 , u8 *); int (*clr_table)(struct rio_mport *, u16 , u8 , u16 ); int (*set_domain)(struct rio_mport *, u16 , u8 , u8 ); int (*get_domain)(struct rio_mport *, u16 , u8 , u8 *); int (*em_init)(struct rio_dev *); int (*em_handle)(struct rio_dev *, u8 ); } ;
146 struct rio_net ;
146 struct rio_driver ;
146 struct rio_dev { struct list_head global_list; struct list_head net_list; struct rio_net *net; bool do_enum; u16 did; u16 vid; u32 device_rev; u16 asm_did; u16 asm_vid; u16 asm_rev; u16 efptr; u32 pef; u32 swpinfo; u32 src_ops; u32 dst_ops; u32 comp_tag; u32 phys_efptr; u32 phys_rmap; u32 em_efptr; u64 dma_mask; struct rio_driver *driver; struct device dev; struct resource riores[16U]; int (*pwcback)(struct rio_dev *, union rio_pw_msg *, int); u16 destid; u8 hopcount; struct rio_dev *prev; atomic_t state; struct rio_switch rswitch[0U]; } ;
210 struct rio_msg { struct resource *res; void (*mcback)(struct rio_mport *, void *, int, int); } ;
241 struct rio_ops ;
241 struct rio_scan ;
241 struct rio_mport { struct list_head dbells; struct list_head pwrites; struct list_head node; struct list_head nnode; struct rio_net *net; struct mutex lock; struct resource iores; struct resource riores[16U]; struct rio_msg inb_msg[4U]; struct rio_msg outb_msg[4U]; int host_deviceid; struct rio_ops *ops; unsigned char id; unsigned char index; unsigned int sys_size; u32 phys_efptr; u32 phys_rmap; unsigned char name[40U]; struct device dev; void *priv; struct dma_device dma; struct rio_scan *nscan; atomic_t state; unsigned int pwe_refcnt; } ;
306 struct rio_net { struct list_head node; struct list_head devices; struct list_head switches; struct list_head mports; struct rio_mport *hport; unsigned char id; struct device dev; void *enum_data; void (*release)(struct rio_net *); } ;
359 struct rio_mport_attr { int flags; int link_speed; int link_width; int dma_max_sge; int dma_max_size; int dma_align; } ;
379 struct rio_ops { int (*lcread)(struct rio_mport *, int, u32 , int, u32 *); int (*lcwrite)(struct rio_mport *, int, u32 , int, u32 ); int (*cread)(struct rio_mport *, int, u16 , u8 , u32 , int, u32 *); int (*cwrite)(struct rio_mport *, int, u16 , u8 , u32 , int, u32 ); int (*dsend)(struct rio_mport *, int, u16 , u16 ); int (*pwenable)(struct rio_mport *, int); int (*open_outb_mbox)(struct rio_mport *, void *, int, int); void (*close_outb_mbox)(struct rio_mport *, int); int (*open_inb_mbox)(struct rio_mport *, void *, int, int); void (*close_inb_mbox)(struct rio_mport *, int); int (*add_outb_message)(struct rio_mport *, struct rio_dev *, int, void *, size_t ); int (*add_inb_buffer)(struct rio_mport *, int, void *); void * (*get_inb_message)(struct rio_mport *, int); int (*map_inb)(struct rio_mport *, dma_addr_t , u64 , u64 , u32 ); void (*unmap_inb)(struct rio_mport *, dma_addr_t ); int (*query_mport)(struct rio_mport *, struct rio_mport_attr *); int (*map_outb)(struct rio_mport *, u16 , u64 , u32 , u32 , dma_addr_t *); void (*unmap_outb)(struct rio_mport *, u16 , u64 ); } ;
433 struct rio_driver { struct list_head node; char *name; const struct rio_device_id *id_table; int (*probe)(struct rio_dev *, const struct rio_device_id *); void (*remove)(struct rio_dev *); void (*shutdown)(struct rio_dev *); int (*suspend)(struct rio_dev *, u32 ); int (*resume)(struct rio_dev *); int (*enable_wake)(struct rio_dev *, u32 , int); struct device_driver driver; } ;
471 struct __anonstruct_em_378 { u32 comptag; u32 errdetect; u32 is_port; u32 ltlerrdet; u32 padding[12U]; } ;
471 union rio_pw_msg { struct __anonstruct_em_378 em; u32 raw[16U]; } ;
525 struct rio_scan { struct module *owner; int (*enumerate)(struct rio_mport *, u32 ); int (*discover)(struct rio_mport *, u32 ); } ;
63 struct exception_table_entry { int insn; int fixup; int handler; } ;
708 struct cdev { struct kobject kobj; struct module *owner; const struct file_operations *ops; struct list_head list; dev_t dev; unsigned int count; } ;
32 struct pollfd { int fd; short events; short revents; } ;
32 struct poll_table_struct { void (*_qproc)(struct file *, wait_queue_head_t *, struct poll_table_struct *); unsigned long _key; } ;
5 struct rio_cm_channel { __u16 id; __u16 remote_channel; __u16 remote_destid; __u8 mport_id; } ;
49 struct rio_cm_msg { __u16 ch_num; __u16 size; __u32 rxto; __u64 msg; } ;
56 struct rio_cm_accept { __u16 ch_num; __u16 pad0; __u32 wait_to; } ;
98 enum rio_cm_state { RIO_CM_IDLE = 0, RIO_CM_CONNECT = 1, RIO_CM_CONNECTED = 2, RIO_CM_DISCONNECT = 3, RIO_CM_CHAN_BOUND = 4, RIO_CM_LISTEN = 5, RIO_CM_DESTROYING = 6 } ;
120 struct rio_ch_base_bhdr { u32 src_id; u32 dst_id; u8 src_mbox; u8 dst_mbox; u8 type; } ;
138 struct rio_ch_chan_hdr { struct rio_ch_base_bhdr bhdr; u8 ch_op; u16 dst_ch; u16 src_ch; u16 msg_len; u16 rsrvd; } ;
147 struct tx_req { struct list_head node; struct rio_dev *rdev; void *buffer; size_t len; } ;
154 struct cm_dev { struct list_head list; struct rio_mport *mport; void *rx_buf[128U]; int rx_slots; struct mutex rx_lock; void *tx_buf[128U]; int tx_slot; int tx_cnt; int tx_ack_slot; struct list_head tx_reqs; spinlock_t tx_lock; struct list_head peers; u32 npeers; struct workqueue_struct *rx_wq; struct work_struct rx_work; } ;
174 struct chan_rx_ring { void *buf[128U]; int head; int tail; int count; void *inuse[128U]; int inuse_cnt; } ;
185 struct rio_channel { u16 id; struct kref ref; struct file *filp; struct cm_dev *cmdev; struct rio_dev *rdev; enum rio_cm_state state; int error; spinlock_t lock; void *context; u32 loc_destid; u32 rem_destid; u16 rem_channel; struct list_head accept_queue; struct list_head ch_node; struct completion comp; struct completion comp_close; struct chan_rx_ring rx_ring; } ;
205 struct cm_peer { struct list_head node; struct rio_dev *rdev; } ;
216 struct conn_req { struct list_head node; u32 destid; u16 chan; struct cm_dev *cmdev; } ;
223 struct channel_dev { struct cdev cdev; struct device *dev; } ;
1 void * __builtin_memcpy(void *, const void *, unsigned long);
1 unsigned long int __builtin_object_size(void *, int);
1 long int __builtin_expect(long exp, long c);
218 void __read_once_size(const volatile void *p, void *res, int size);
243 void __write_once_size(volatile void *p, void *res, int size);
308 bool constant_test_bit(long nr, const volatile unsigned long *addr);
7 __u32 __arch_swab32(__u32 val);
46 __u16 __fswab16(__u16 val);
55 __u32 __fswab32(__u32 val);
275 void __pr_err(const char *, ...);
276 void __pr_warn(const char *, ...);
55 void __dynamic_pr_debug(struct _ddebug *, const char *, ...);
254 void __might_fault(const char *, int);
3 bool ldv_is_err(const void *ptr);
5 void * ldv_err_ptr(long error);
6 long int ldv_ptr_err(const void *ptr);
8 void ldv_spin_lock();
9 void ldv_spin_unlock();
26 void * ldv_undef_ptr();
25 void INIT_LIST_HEAD(struct list_head *list);
48 void __list_add(struct list_head *, struct list_head *, struct list_head *);
61 void list_add(struct list_head *new, struct list_head *head);
75 void list_add_tail(struct list_head *new, struct list_head *head);
113 void list_del(struct list_head *);
187 int list_empty(const struct list_head *head);
87 void __bad_percpu_size();
71 void warn_slowpath_null(const char *, const int);
10 extern struct task_struct *current_task;
12 struct task_struct * get_current();
16 void __xadd_wrong_size();
24 int atomic_read(const atomic_t *v);
36 void atomic_set(atomic_t *v, int i);
78 bool atomic_sub_and_test(int i, atomic_t *v);
154 int atomic_add_return(int i, atomic_t *v);
23 void * ERR_PTR(long error);
32 long int PTR_ERR(const void *ptr);
41 bool IS_ERR(const void *ptr);
8 extern int __preempt_count;
72 void __preempt_count_sub(int val);
281 void lockdep_init_map(struct lockdep_map *, const char *, struct lock_class_key *, int);
93 void __raw_spin_lock_init(raw_spinlock_t *, const char *, struct lock_class_key *);
22 void _raw_spin_lock(raw_spinlock_t *);
30 void _raw_spin_lock_bh(raw_spinlock_t *);
41 void _raw_spin_unlock(raw_spinlock_t *);
42 void _raw_spin_unlock_bh(raw_spinlock_t *);
45 void _raw_spin_unlock_irqrestore(raw_spinlock_t *, unsigned long);
289 raw_spinlock_t * spinlock_check(spinlock_t *lock);
300 void ldv_spin_lock_5(spinlock_t *lock);
300 void spin_lock(spinlock_t *lock);
309 void ldv_spin_lock_bh_6(spinlock_t *lock);
309 void spin_lock_bh(spinlock_t *lock);
349 void ldv_spin_unlock_9(spinlock_t *lock);
349 void spin_unlock(spinlock_t *lock);
358 void ldv_spin_unlock_bh_10(spinlock_t *lock);
358 void spin_unlock_bh(spinlock_t *lock);
376 void ldv_spin_unlock_irqrestore_12(spinlock_t *lock, unsigned long flags);
376 void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags);
72 void __init_waitqueue_head(wait_queue_head_t *, const char *, struct lock_class_key *);
119 void __mutex_init(struct mutex *, const char *, struct lock_class_key *);
138 void mutex_lock_nested(struct mutex *, unsigned int);
174 void mutex_unlock(struct mutex *);
112 void down_read(struct rw_semaphore *);
122 void down_write(struct rw_semaphore *);
133 void up_read(struct rw_semaphore *);
138 void up_write(struct rw_semaphore *);
73 void init_completion(struct completion *x);
99 long int wait_for_completion_interruptible_timeout(struct completion *, unsigned long);
103 bool try_wait_for_completion(struct completion *);
106 void complete(struct completion *);
107 void complete_all(struct completion *);
292 unsigned long int __msecs_to_jiffies(const unsigned int);
354 unsigned long int msecs_to_jiffies(const unsigned int m);
181 void __init_work(struct work_struct *, int);
362 struct workqueue_struct * __alloc_workqueue_key(const char *, unsigned int, int, struct lock_class_key *, const char *, ...);
422 void destroy_workqueue(struct workqueue_struct *);
430 bool queue_work_on(int, struct workqueue_struct *, struct work_struct *);
437 void flush_workqueue(struct workqueue_struct *);
470 bool queue_work(struct workqueue_struct *wq, struct work_struct *work);
79 void * idr_find_slowpath(struct idr *, int);
80 void idr_preload(gfp_t );
82 int idr_alloc_cyclic(struct idr *, void *, int, int, gfp_t );
85 void * idr_get_next(struct idr *, int *);
87 void idr_remove(struct idr *, int);
88 void idr_destroy(struct idr *);
98 void idr_preload_end();
115 void * idr_find(struct idr *idr, int id);
31 void kref_init(struct kref *kref);
40 void kref_get(struct kref *kref);
67 int kref_sub(struct kref *kref, unsigned int count, void (*release)(struct kref *));
96 int kref_put(struct kref *kref, void (*release)(struct kref *));
87 const char * kobject_name(const struct kobject *kobj);
2054 pid_t task_pid_nr(struct task_struct *tsk);
154 void kfree(const void *);
322 void * ldv_kmem_cache_alloc_20(struct kmem_cache *ldv_func_arg1, gfp_t flags);
466 void * kmalloc(size_t size, gfp_t flags);
579 void * kcalloc(size_t n, size_t size, gfp_t flags);
18 void ldv_check_alloc_flags(gfp_t flags);
2448 void unregister_chrdev_region(dev_t , unsigned int);
352 void subsys_interface_unregister(struct subsys_interface *);
512 void class_interface_unregister(struct class_interface *);
517 void class_destroy(struct class *);
865 const char * dev_name(const struct device *dev);
1004 void device_unregister(struct device *);
85 extern struct bus_type rio_bus_type;
86 extern struct class rio_mport_class;
302 int rio_mport_is_running(struct rio_mport *mport);
300 int rio_request_outb_mbox(struct rio_mport *, void *, int, int, void (*)(struct rio_mport *, void *, int, int));
302 int rio_release_outb_mbox(struct rio_mport *, int);
315 int rio_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox, void *buffer, size_t len);
323 int rio_request_inb_mbox(struct rio_mport *, void *, int, int, void (*)(struct rio_mport *, void *, int, int));
325 int rio_release_inb_mbox(struct rio_mport *, int);
336 int rio_add_inb_buffer(struct rio_mport *mport, int mbox, void *buffer);
349 void * rio_get_inb_message(struct rio_mport *mport, int mbox);
418 const char * rio_name(struct rio_dev *rdev);
5 void kasan_check_read(const void *, unsigned int);
6 void kasan_check_write(const void *, unsigned int);
695 unsigned long int _copy_from_user(void *, const void *, unsigned int);
697 unsigned long int _copy_to_user(void *, const void *, unsigned int);
717 void __copy_from_user_overflow();
722 void __copy_to_user_overflow();
738 unsigned long int copy_from_user(void *to, const void *from, unsigned long n);
775 unsigned long int copy_to_user(void *to, const void *from, unsigned long n);
29 void cdev_del(struct cdev *);
39 int unregister_reboot_notifier(struct notifier_block *);
79 int cmbox = 1;
83 int chstart = 256;
89 unsigned int dbg_level = 0U;
234 struct rio_channel * riocm_ch_alloc(u16 ch_num);
235 void riocm_ch_free(struct kref *ref);
236 int riocm_post_send(struct cm_dev *cm, struct rio_dev *rdev, void *buffer, size_t len);
238 int riocm_ch_close(struct rio_channel *ch);
240 struct spinlock idr_lock = { { { { { 0 } }, 3735899821U, 4294967295U, (void *)-1, { 0, { 0, 0 }, "idr_lock", 0, 0UL } } } };
241 struct idr ch_idr = { 0, 0, 0, 0, { { { { { 0 } }, 3735899821U, 4294967295U, (void *)-1, { 0, { 0, 0 }, "ch_idr.lock", 0, 0UL } } } }, 0, 0 };
243 struct list_head cm_dev_list = { &cm_dev_list, &cm_dev_list };
244 struct rw_semaphore rdev_sem = { { 0L }, { &(rdev_sem.wait_list), &(rdev_sem.wait_list) }, { { { 0 } }, 3735899821U, 4294967295U, (void *)-1, { 0, { 0, 0 }, "rdev_sem.wait_lock", 0, 0UL } }, { { 0 } }, (struct task_struct *)0, { 0, { 0, 0 }, "rdev_sem", 0, 0UL } };
246 struct class *dev_class = 0;
249 unsigned int dev_number = 0U;
250 struct channel_dev riocm_cdev = { };
258 int riocm_cmp(struct rio_channel *ch, enum rio_cm_state cmp);
268 int riocm_cmp_exch(struct rio_channel *ch, enum rio_cm_state cmp, enum rio_cm_state exch);
281 enum rio_cm_state riocm_exch(struct rio_channel *ch, enum rio_cm_state exch);
293 struct rio_channel * riocm_get_channel(u16 nr);
305 void riocm_put_channel(struct rio_channel *ch);
310 void * riocm_rx_get_msg(struct cm_dev *cm);
339 void riocm_rx_fill(struct cm_dev *cm, int nent);
364 void riocm_rx_free(struct cm_dev *cm);
386 int riocm_req_handler(struct cm_dev *cm, void *req_data);
433 int riocm_resp_handler(void *resp_data);
465 int riocm_close_handler(void *data);
496 void rio_cm_handler(struct cm_dev *cm, void *data);
536 int rio_rx_data_handler(struct cm_dev *cm, void *buf);
590 void rio_ibmsg_handler(struct work_struct *work);
627 void riocm_inb_msg_event(struct rio_mport *mport, void *dev_id, int mbox, int slot);
645 void rio_txcq_handler(struct cm_dev *cm, int slot);
704 void riocm_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbox, int slot);
713 int riocm_queue_req(struct cm_dev *cm, struct rio_dev *rdev, void *buffer, size_t len);
791 int riocm_ch_send(u16 ch_id, void *buf, int len);
841 int riocm_ch_free_rxbuf(struct rio_channel *ch, void *buf);
876 int riocm_ch_receive(struct rio_channel *ch, void **buf, long timeout);
952 int riocm_ch_connect(u16 loc_ch, struct cm_dev *cm, struct cm_peer *peer, u16 rem_ch);
1031 int riocm_send_ack(struct rio_channel *ch);
1081 struct rio_channel * riocm_ch_accept(u16 ch_id, u16 *new_ch_id, long timeout);
1209 int riocm_ch_listen(u16 ch_id);
1234 int riocm_ch_bind(u16 ch_id, u8 mport_id, void *context);
1350 struct rio_channel * riocm_ch_create(u16 *ch_num);
1397 int riocm_send_close(struct rio_channel *ch);
1486 int riocm_cdev_open(struct inode *inode, struct file *filp);
1500 int riocm_cdev_release(struct inode *inode, struct file *filp);
1535 int cm_ep_get_list_size(void *arg);
1566 int cm_ep_get_list(void *arg);
1622 int cm_mport_get_list(void *arg);
1662 int cm_chan_create(struct file *filp, void *arg);
1688 int cm_chan_close(struct file *filp, void *arg);
1720 int cm_chan_bind(void *arg);
1736 int cm_chan_listen(void *arg);
1752 int cm_chan_accept(struct file *filp, void *arg);
1784 int cm_chan_connect(void *arg);
1839 int cm_chan_msg_send(void *arg);
1869 int cm_chan_msg_rcv(void *arg);
1908 long int riocm_cdev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
1955 int riocm_add_dev(struct device *dev, struct subsys_interface *sif);
1999 void riocm_remove_dev(struct device *dev, struct subsys_interface *sif);
2106 int riocm_add_mport(struct device *dev, struct class_interface *class_intf);
2179 void riocm_remove_mport(struct device *dev, struct class_interface *class_intf);
2244 int rio_cm_shutdown(struct notifier_block *nb, unsigned long code, void *unused);
2266 struct subsys_interface riocm_interface = { "rio_cm", &rio_bus_type, { 0, 0 }, &riocm_add_dev, &riocm_remove_dev };
2276 struct class_interface rio_mport_interface = { { 0, 0 }, &rio_mport_class, &riocm_add_mport, &riocm_remove_mport };
2282 struct notifier_block rio_cm_notifier = { &rio_cm_shutdown, 0, 0 };
2351 void riocm_exit();
2385 void ldv_check_final_state();
2388 void ldv_check_return_value(int);
2394 void ldv_initialize();
2397 void ldv_handler_precall();
2400 int nondet_int();
2403 int LDV_IN_INTERRUPT = 0;
2406 void ldv_main0_sequence_infinite_withcheck_stateful();
10 void ldv_error();
25 int ldv_undef_int();
28 bool ldv_is_err_or_null(const void *ptr);
20 int ldv_spin = 0;
30 struct page * ldv_some_page();
33 struct page * ldv_check_alloc_flags_and_return_some_page(gfp_t flags);
42 void ldv_check_alloc_nonatomic();
63 int ldv_spin_trylock();
return ;
}
-entry_point
{
2408 struct inode *var_group1;
2409 struct file *var_group2;
2410 int res_riocm_cdev_open_32;
2411 unsigned int var_riocm_cdev_ioctl_45_p1;
2412 unsigned long var_riocm_cdev_ioctl_45_p2;
2413 struct device *var_group3;
2414 struct subsys_interface *var_group4;
2415 struct notifier_block *var_group5;
2416 unsigned long var_rio_cm_shutdown_51_p1;
2417 void *var_rio_cm_shutdown_51_p2;
2418 int ldv_s_riocm_cdev_fops_file_operations;
2419 int tmp;
2420 int tmp___0;
2682 ldv_s_riocm_cdev_fops_file_operations = 0;
2672 LDV_IN_INTERRUPT = 1;
2681 ldv_initialize() { /* Function call is skipped due to function is undefined */}
2690 goto ldv_35569;
2690 tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */}
2690 assume(tmp___0 != 0);
2693 goto ldv_35568;
2691 ldv_35568:;
2694 tmp = nondet_int() { /* Function call is skipped due to function is undefined */}
2694 switch (tmp);
2695 assume(!(tmp == 0));
2752 assume(!(tmp == 1));
2805 assume(!(tmp == 2));
2858 assume(!(tmp == 3));
2911 assume(!(tmp == 4));
2964 assume(tmp == 5);
3008 ldv_handler_precall() { /* Function call is skipped due to function is undefined */}
3009 -rio_cm_shutdown(var_group5, var_rio_cm_shutdown_51_p1, var_rio_cm_shutdown_51_p2)
{
2246 struct rio_channel *ch;
2247 unsigned int i;
2248 struct _ddebug descriptor;
2249 long tmp;
2250 struct _ddebug descriptor___0;
2251 long tmp___0;
2252 void *tmp___1;
2250 assume(!((dbg_level & 2U) != 0U));
2252 -spin_lock_bh(&idr_lock)
{
58 -ldv_spin_lock()
{
52 ldv_spin = 1;
53 return ;;
}
60 -ldv_spin_lock_bh_6(lock)
{
311 _raw_spin_lock_bh(&(lock->__annonCompField20.rlock)) { /* Function call is skipped due to function is undefined */}
312 return ;;
}
61 return ;;
}
2253 i = 0U;
2253 goto ldv_35501;
2253 tmp___1 = idr_get_next(&ch_idr, (int *)(&i)) { /* Function call is skipped due to function is undefined */}
2253 ch = (struct rio_channel *)tmp___1;
2253 assume(((unsigned long)ch) != ((unsigned long)((struct rio_channel *)0)));
2255 goto ldv_35500;
2254 ldv_35500:;
2254 assume(!((dbg_level & 2U) != 0U));
2255 unsigned int __CPAchecker_TMP_1 = (unsigned int)(ch->state);
2255 assume(__CPAchecker_TMP_1 == 2U);
2256 -riocm_send_close(ch)
{
1399 struct rio_ch_chan_hdr *hdr;
1400 int ret;
1401 void *tmp;
1402 unsigned int tmp___0;
1403 unsigned int tmp___1;
1404 unsigned short tmp___2;
1405 unsigned short tmp___3;
1406 int tmp___4;
1406 -kmalloc(20UL, 37748928U)
{
145 void *tmp;
146 -ldv_check_alloc_flags(flags)
{
27 assume(ldv_spin != 0);
27 assume(flags != 34078752U);
27 assume(flags != 33554432U);
27 -ldv_error()
{
15 LDV_ERROR:;
}
}
}
}
}
}
Source code
1 2 /* 3 * rio_cm - RapidIO Channelized Messaging Driver 4 * 5 * Copyright 2013-2016 Integrated Device Technology, Inc. 6 * Copyright (c) 2015, Prodrive Technologies 7 * Copyright (c) 2015, RapidIO Trade Association 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms of the GNU General Public License as published by the 11 * Free Software Foundation; either version 2 of the License, or (at your 12 * option) any later version. 13 * 14 * THIS PROGRAM IS DISTRIBUTED IN THE HOPE THAT IT WILL BE USEFUL, 15 * BUT WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED WARRANTY OF 16 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. SEE THE 17 * GNU GENERAL PUBLIC LICENSE FOR MORE DETAILS. 18 */ 19 20 #include <linux/module.h> 21 #include <linux/kernel.h> 22 #include <linux/dma-mapping.h> 23 #include <linux/delay.h> 24 #include <linux/sched.h> 25 #include <linux/rio.h> 26 #include <linux/rio_drv.h> 27 #include <linux/slab.h> 28 #include <linux/idr.h> 29 #include <linux/interrupt.h> 30 #include <linux/cdev.h> 31 #include <linux/fs.h> 32 #include <linux/poll.h> 33 #include <linux/reboot.h> 34 #include <linux/bitops.h> 35 #include <linux/printk.h> 36 #include <linux/rio_cm_cdev.h> 37 38 #define DRV_NAME "rio_cm" 39 #define DRV_VERSION "1.0.0" 40 #define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>" 41 #define DRV_DESC "RapidIO Channelized Messaging Driver" 42 #define DEV_NAME "rio_cm" 43 44 /* Debug output filtering masks */ 45 enum { 46 DBG_NONE = 0, 47 DBG_INIT = BIT(0), /* driver init */ 48 DBG_EXIT = BIT(1), /* driver exit */ 49 DBG_MPORT = BIT(2), /* mport add/remove */ 50 DBG_RDEV = BIT(3), /* RapidIO device add/remove */ 51 DBG_CHOP = BIT(4), /* channel operations */ 52 DBG_WAIT = BIT(5), /* waiting for events */ 53 DBG_TX = BIT(6), /* message TX */ 54 DBG_TX_EVENT = BIT(7), /* message TX event */ 55 DBG_RX_DATA = BIT(8), /* inbound data messages */ 56 DBG_RX_CMD = BIT(9), /* inbound REQ/ACK/NACK messages */ 57 DBG_ALL = ~0, 58 }; 59 60 #ifdef DEBUG 61 #define riocm_debug(level, fmt, arg...) \ 62 do { \ 63 if (DBG_##level & dbg_level) \ 64 pr_debug(DRV_NAME ": %s " fmt "\n", \ 65 __func__, ##arg); \ 66 } while (0) 67 #else 68 #define riocm_debug(level, fmt, arg...) \ 69 no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg) 70 #endif 71 72 #define riocm_warn(fmt, arg...) \ 73 pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg) 74 75 #define riocm_error(fmt, arg...) \ 76 pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg) 77 78 79 static int cmbox = 1; 80 module_param(cmbox, int, S_IRUGO); 81 MODULE_PARM_DESC(cmbox, "RapidIO Mailbox number (default 1)"); 82 83 static int chstart = 256; 84 module_param(chstart, int, S_IRUGO); 85 MODULE_PARM_DESC(chstart, 86 "Start channel number for dynamic allocation (default 256)"); 87 88 #ifdef DEBUG 89 static u32 dbg_level = DBG_NONE; 90 module_param(dbg_level, uint, S_IWUSR | S_IRUGO); 91 MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)"); 92 #endif 93 94 MODULE_AUTHOR(DRV_AUTHOR); 95 MODULE_DESCRIPTION(DRV_DESC); 96 MODULE_LICENSE("GPL"); 97 MODULE_VERSION(DRV_VERSION); 98 99 #define RIOCM_TX_RING_SIZE 128 100 #define RIOCM_RX_RING_SIZE 128 101 #define RIOCM_CONNECT_TO 3 /* connect response TO (in sec) */ 102 103 #define RIOCM_MAX_CHNUM 0xffff /* Use full range of u16 field */ 104 #define RIOCM_CHNUM_AUTO 0 105 #define RIOCM_MAX_EP_COUNT 0x10000 /* Max number of endpoints */ 106 107 enum rio_cm_state { 108 RIO_CM_IDLE, 109 RIO_CM_CONNECT, 110 RIO_CM_CONNECTED, 111 RIO_CM_DISCONNECT, 112 RIO_CM_CHAN_BOUND, 113 RIO_CM_LISTEN, 114 RIO_CM_DESTROYING, 115 }; 116 117 enum rio_cm_pkt_type { 118 RIO_CM_SYS = 0xaa, 119 RIO_CM_CHAN = 0x55, 120 }; 121 122 enum rio_cm_chop { 123 CM_CONN_REQ, 124 CM_CONN_ACK, 125 CM_CONN_CLOSE, 126 CM_DATA_MSG, 127 }; 128 129 struct rio_ch_base_bhdr { 130 u32 src_id; 131 u32 dst_id; 132 #define RIO_HDR_LETTER_MASK 0xffff0000 133 #define RIO_HDR_MBOX_MASK 0x0000ffff 134 u8 src_mbox; 135 u8 dst_mbox; 136 u8 type; 137 } __attribute__((__packed__)); 138 139 struct rio_ch_chan_hdr { 140 struct rio_ch_base_bhdr bhdr; 141 u8 ch_op; 142 u16 dst_ch; 143 u16 src_ch; 144 u16 msg_len; 145 u16 rsrvd; 146 } __attribute__((__packed__)); 147 148 struct tx_req { 149 struct list_head node; 150 struct rio_dev *rdev; 151 void *buffer; 152 size_t len; 153 }; 154 155 struct cm_dev { 156 struct list_head list; 157 struct rio_mport *mport; 158 void *rx_buf[RIOCM_RX_RING_SIZE]; 159 int rx_slots; 160 struct mutex rx_lock; 161 162 void *tx_buf[RIOCM_TX_RING_SIZE]; 163 int tx_slot; 164 int tx_cnt; 165 int tx_ack_slot; 166 struct list_head tx_reqs; 167 spinlock_t tx_lock; 168 169 struct list_head peers; 170 u32 npeers; 171 struct workqueue_struct *rx_wq; 172 struct work_struct rx_work; 173 }; 174 175 struct chan_rx_ring { 176 void *buf[RIOCM_RX_RING_SIZE]; 177 int head; 178 int tail; 179 int count; 180 181 /* Tracking RX buffers reported to upper level */ 182 void *inuse[RIOCM_RX_RING_SIZE]; 183 int inuse_cnt; 184 }; 185 186 struct rio_channel { 187 u16 id; /* local channel ID */ 188 struct kref ref; /* channel refcount */ 189 struct file *filp; 190 struct cm_dev *cmdev; /* associated CM device object */ 191 struct rio_dev *rdev; /* remote RapidIO device */ 192 enum rio_cm_state state; 193 int error; 194 spinlock_t lock; 195 void *context; 196 u32 loc_destid; /* local destID */ 197 u32 rem_destid; /* remote destID */ 198 u16 rem_channel; /* remote channel ID */ 199 struct list_head accept_queue; 200 struct list_head ch_node; 201 struct completion comp; 202 struct completion comp_close; 203 struct chan_rx_ring rx_ring; 204 }; 205 206 struct cm_peer { 207 struct list_head node; 208 struct rio_dev *rdev; 209 }; 210 211 struct rio_cm_work { 212 struct work_struct work; 213 struct cm_dev *cm; 214 void *data; 215 }; 216 217 struct conn_req { 218 struct list_head node; 219 u32 destid; /* requester destID */ 220 u16 chan; /* requester channel ID */ 221 struct cm_dev *cmdev; 222 }; 223 224 /* 225 * A channel_dev structure represents a CM_CDEV 226 * @cdev Character device 227 * @dev Associated device object 228 */ 229 struct channel_dev { 230 struct cdev cdev; 231 struct device *dev; 232 }; 233 234 static struct rio_channel *riocm_ch_alloc(u16 ch_num); 235 static void riocm_ch_free(struct kref *ref); 236 static int riocm_post_send(struct cm_dev *cm, struct rio_dev *rdev, 237 void *buffer, size_t len); 238 static int riocm_ch_close(struct rio_channel *ch); 239 240 static DEFINE_SPINLOCK(idr_lock); 241 static DEFINE_IDR(ch_idr); 242 243 static LIST_HEAD(cm_dev_list); 244 static DECLARE_RWSEM(rdev_sem); 245 246 static struct class *dev_class; 247 static unsigned int dev_major; 248 static unsigned int dev_minor_base; 249 static dev_t dev_number; 250 static struct channel_dev riocm_cdev; 251 252 #define is_msg_capable(src_ops, dst_ops) \ 253 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \ 254 (dst_ops & RIO_DST_OPS_DATA_MSG)) 255 #define dev_cm_capable(dev) \ 256 is_msg_capable(dev->src_ops, dev->dst_ops) 257 258 static int riocm_cmp(struct rio_channel *ch, enum rio_cm_state cmp) 259 { 260 int ret; 261 262 spin_lock_bh(&ch->lock); 263 ret = (ch->state == cmp); 264 spin_unlock_bh(&ch->lock); 265 return ret; 266 } 267 268 static int riocm_cmp_exch(struct rio_channel *ch, 269 enum rio_cm_state cmp, enum rio_cm_state exch) 270 { 271 int ret; 272 273 spin_lock_bh(&ch->lock); 274 ret = (ch->state == cmp); 275 if (ret) 276 ch->state = exch; 277 spin_unlock_bh(&ch->lock); 278 return ret; 279 } 280 281 static enum rio_cm_state riocm_exch(struct rio_channel *ch, 282 enum rio_cm_state exch) 283 { 284 enum rio_cm_state old; 285 286 spin_lock_bh(&ch->lock); 287 old = ch->state; 288 ch->state = exch; 289 spin_unlock_bh(&ch->lock); 290 return old; 291 } 292 293 static struct rio_channel *riocm_get_channel(u16 nr) 294 { 295 struct rio_channel *ch; 296 297 spin_lock_bh(&idr_lock); 298 ch = idr_find(&ch_idr, nr); 299 if (ch) 300 kref_get(&ch->ref); 301 spin_unlock_bh(&idr_lock); 302 return ch; 303 } 304 305 static void riocm_put_channel(struct rio_channel *ch) 306 { 307 kref_put(&ch->ref, riocm_ch_free); 308 } 309 310 static void *riocm_rx_get_msg(struct cm_dev *cm) 311 { 312 void *msg; 313 int i; 314 315 msg = rio_get_inb_message(cm->mport, cmbox); 316 if (msg) { 317 for (i = 0; i < RIOCM_RX_RING_SIZE; i++) { 318 if (cm->rx_buf[i] == msg) { 319 cm->rx_buf[i] = NULL; 320 cm->rx_slots++; 321 break; 322 } 323 } 324 325 if (i == RIOCM_RX_RING_SIZE) 326 riocm_warn("no record for buffer 0x%p", msg); 327 } 328 329 return msg; 330 } 331 332 /* 333 * riocm_rx_fill - fills a ring of receive buffers for given cm device 334 * @cm: cm_dev object 335 * @nent: max number of entries to fill 336 * 337 * Returns: none 338 */ 339 static void riocm_rx_fill(struct cm_dev *cm, int nent) 340 { 341 int i; 342 343 if (cm->rx_slots == 0) 344 return; 345 346 for (i = 0; i < RIOCM_RX_RING_SIZE && cm->rx_slots && nent; i++) { 347 if (cm->rx_buf[i] == NULL) { 348 cm->rx_buf[i] = kmalloc(RIO_MAX_MSG_SIZE, GFP_KERNEL); 349 if (cm->rx_buf[i] == NULL) 350 break; 351 rio_add_inb_buffer(cm->mport, cmbox, cm->rx_buf[i]); 352 cm->rx_slots--; 353 nent--; 354 } 355 } 356 } 357 358 /* 359 * riocm_rx_free - frees all receive buffers associated with given cm device 360 * @cm: cm_dev object 361 * 362 * Returns: none 363 */ 364 static void riocm_rx_free(struct cm_dev *cm) 365 { 366 int i; 367 368 for (i = 0; i < RIOCM_RX_RING_SIZE; i++) { 369 if (cm->rx_buf[i] != NULL) { 370 kfree(cm->rx_buf[i]); 371 cm->rx_buf[i] = NULL; 372 } 373 } 374 } 375 376 /* 377 * riocm_req_handler - connection request handler 378 * @cm: cm_dev object 379 * @req_data: pointer to the request packet 380 * 381 * Returns: 0 if success, or 382 * -EINVAL if channel is not in correct state, 383 * -ENODEV if cannot find a channel with specified ID, 384 * -ENOMEM if unable to allocate memory to store the request 385 */ 386 static int riocm_req_handler(struct cm_dev *cm, void *req_data) 387 { 388 struct rio_channel *ch; 389 struct conn_req *req; 390 struct rio_ch_chan_hdr *hh = req_data; 391 u16 chnum; 392 393 chnum = ntohs(hh->dst_ch); 394 395 ch = riocm_get_channel(chnum); 396 397 if (!ch) 398 return -ENODEV; 399 400 if (ch->state != RIO_CM_LISTEN) { 401 riocm_debug(RX_CMD, "channel %d is not in listen state", chnum); 402 riocm_put_channel(ch); 403 return -EINVAL; 404 } 405 406 req = kzalloc(sizeof(*req), GFP_KERNEL); 407 if (!req) { 408 riocm_put_channel(ch); 409 return -ENOMEM; 410 } 411 412 req->destid = ntohl(hh->bhdr.src_id); 413 req->chan = ntohs(hh->src_ch); 414 req->cmdev = cm; 415 416 spin_lock_bh(&ch->lock); 417 list_add_tail(&req->node, &ch->accept_queue); 418 spin_unlock_bh(&ch->lock); 419 complete(&ch->comp); 420 riocm_put_channel(ch); 421 422 return 0; 423 } 424 425 /* 426 * riocm_resp_handler - response to connection request handler 427 * @resp_data: pointer to the response packet 428 * 429 * Returns: 0 if success, or 430 * -EINVAL if channel is not in correct state, 431 * -ENODEV if cannot find a channel with specified ID, 432 */ 433 static int riocm_resp_handler(void *resp_data) 434 { 435 struct rio_channel *ch; 436 struct rio_ch_chan_hdr *hh = resp_data; 437 u16 chnum; 438 439 chnum = ntohs(hh->dst_ch); 440 ch = riocm_get_channel(chnum); 441 if (!ch) 442 return -ENODEV; 443 444 if (ch->state != RIO_CM_CONNECT) { 445 riocm_put_channel(ch); 446 return -EINVAL; 447 } 448 449 riocm_exch(ch, RIO_CM_CONNECTED); 450 ch->rem_channel = ntohs(hh->src_ch); 451 complete(&ch->comp); 452 riocm_put_channel(ch); 453 454 return 0; 455 } 456 457 /* 458 * riocm_close_handler - channel close request handler 459 * @req_data: pointer to the request packet 460 * 461 * Returns: 0 if success, or 462 * -ENODEV if cannot find a channel with specified ID, 463 * + error codes returned by riocm_ch_close. 464 */ 465 static int riocm_close_handler(void *data) 466 { 467 struct rio_channel *ch; 468 struct rio_ch_chan_hdr *hh = data; 469 int ret; 470 471 riocm_debug(RX_CMD, "for ch=%d", ntohs(hh->dst_ch)); 472 473 spin_lock_bh(&idr_lock); 474 ch = idr_find(&ch_idr, ntohs(hh->dst_ch)); 475 if (!ch) { 476 spin_unlock_bh(&idr_lock); 477 return -ENODEV; 478 } 479 idr_remove(&ch_idr, ch->id); 480 spin_unlock_bh(&idr_lock); 481 482 riocm_exch(ch, RIO_CM_DISCONNECT); 483 484 ret = riocm_ch_close(ch); 485 if (ret) 486 riocm_debug(RX_CMD, "riocm_ch_close() returned %d", ret); 487 488 return 0; 489 } 490 491 /* 492 * rio_cm_handler - function that services request (non-data) packets 493 * @cm: cm_dev object 494 * @data: pointer to the packet 495 */ 496 static void rio_cm_handler(struct cm_dev *cm, void *data) 497 { 498 struct rio_ch_chan_hdr *hdr; 499 500 if (!rio_mport_is_running(cm->mport)) 501 goto out; 502 503 hdr = data; 504 505 riocm_debug(RX_CMD, "OP=%x for ch=%d from %d", 506 hdr->ch_op, ntohs(hdr->dst_ch), ntohs(hdr->src_ch)); 507 508 switch (hdr->ch_op) { 509 case CM_CONN_REQ: 510 riocm_req_handler(cm, data); 511 break; 512 case CM_CONN_ACK: 513 riocm_resp_handler(data); 514 break; 515 case CM_CONN_CLOSE: 516 riocm_close_handler(data); 517 break; 518 default: 519 riocm_error("Invalid packet header"); 520 break; 521 } 522 out: 523 kfree(data); 524 } 525 526 /* 527 * rio_rx_data_handler - received data packet handler 528 * @cm: cm_dev object 529 * @buf: data packet 530 * 531 * Returns: 0 if success, or 532 * -ENODEV if cannot find a channel with specified ID, 533 * -EIO if channel is not in CONNECTED state, 534 * -ENOMEM if channel RX queue is full (packet discarded) 535 */ 536 static int rio_rx_data_handler(struct cm_dev *cm, void *buf) 537 { 538 struct rio_ch_chan_hdr *hdr; 539 struct rio_channel *ch; 540 541 hdr = buf; 542 543 riocm_debug(RX_DATA, "for ch=%d", ntohs(hdr->dst_ch)); 544 545 ch = riocm_get_channel(ntohs(hdr->dst_ch)); 546 if (!ch) { 547 /* Discard data message for non-existing channel */ 548 kfree(buf); 549 return -ENODEV; 550 } 551 552 /* Place pointer to the buffer into channel's RX queue */ 553 spin_lock(&ch->lock); 554 555 if (ch->state != RIO_CM_CONNECTED) { 556 /* Channel is not ready to receive data, discard a packet */ 557 riocm_debug(RX_DATA, "ch=%d is in wrong state=%d", 558 ch->id, ch->state); 559 spin_unlock(&ch->lock); 560 kfree(buf); 561 riocm_put_channel(ch); 562 return -EIO; 563 } 564 565 if (ch->rx_ring.count == RIOCM_RX_RING_SIZE) { 566 /* If RX ring is full, discard a packet */ 567 riocm_debug(RX_DATA, "ch=%d is full", ch->id); 568 spin_unlock(&ch->lock); 569 kfree(buf); 570 riocm_put_channel(ch); 571 return -ENOMEM; 572 } 573 574 ch->rx_ring.buf[ch->rx_ring.head] = buf; 575 ch->rx_ring.head++; 576 ch->rx_ring.count++; 577 ch->rx_ring.head %= RIOCM_RX_RING_SIZE; 578 579 complete(&ch->comp); 580 581 spin_unlock(&ch->lock); 582 riocm_put_channel(ch); 583 584 return 0; 585 } 586 587 /* 588 * rio_ibmsg_handler - inbound message packet handler 589 */ 590 static void rio_ibmsg_handler(struct work_struct *work) 591 { 592 struct cm_dev *cm = container_of(work, struct cm_dev, rx_work); 593 void *data; 594 struct rio_ch_chan_hdr *hdr; 595 596 if (!rio_mport_is_running(cm->mport)) 597 return; 598 599 while (1) { 600 mutex_lock(&cm->rx_lock); 601 data = riocm_rx_get_msg(cm); 602 if (data) 603 riocm_rx_fill(cm, 1); 604 mutex_unlock(&cm->rx_lock); 605 606 if (data == NULL) 607 break; 608 609 hdr = data; 610 611 if (hdr->bhdr.type != RIO_CM_CHAN) { 612 /* For now simply discard packets other than channel */ 613 riocm_error("Unsupported TYPE code (0x%x). Msg dropped", 614 hdr->bhdr.type); 615 kfree(data); 616 continue; 617 } 618 619 /* Process a channel message */ 620 if (hdr->ch_op == CM_DATA_MSG) 621 rio_rx_data_handler(cm, data); 622 else 623 rio_cm_handler(cm, data); 624 } 625 } 626 627 static void riocm_inb_msg_event(struct rio_mport *mport, void *dev_id, 628 int mbox, int slot) 629 { 630 struct cm_dev *cm = dev_id; 631 632 if (rio_mport_is_running(cm->mport) && !work_pending(&cm->rx_work)) 633 queue_work(cm->rx_wq, &cm->rx_work); 634 } 635 636 /* 637 * rio_txcq_handler - TX completion handler 638 * @cm: cm_dev object 639 * @slot: TX queue slot 640 * 641 * TX completion handler also ensures that pending request packets are placed 642 * into transmit queue as soon as a free slot becomes available. This is done 643 * to give higher priority to request packets during high intensity data flow. 644 */ 645 static void rio_txcq_handler(struct cm_dev *cm, int slot) 646 { 647 int ack_slot; 648 649 /* ATTN: Add TX completion notification if/when direct buffer 650 * transfer is implemented. At this moment only correct tracking 651 * of tx_count is important. 652 */ 653 riocm_debug(TX_EVENT, "for mport_%d slot %d tx_cnt %d", 654 cm->mport->id, slot, cm->tx_cnt); 655 656 spin_lock(&cm->tx_lock); 657 ack_slot = cm->tx_ack_slot; 658 659 if (ack_slot == slot) 660 riocm_debug(TX_EVENT, "slot == ack_slot"); 661 662 while (cm->tx_cnt && ((ack_slot != slot) || 663 (cm->tx_cnt == RIOCM_TX_RING_SIZE))) { 664 665 cm->tx_buf[ack_slot] = NULL; 666 ++ack_slot; 667 ack_slot &= (RIOCM_TX_RING_SIZE - 1); 668 cm->tx_cnt--; 669 } 670 671 if (cm->tx_cnt < 0 || cm->tx_cnt > RIOCM_TX_RING_SIZE) 672 riocm_error("tx_cnt %d out of sync", cm->tx_cnt); 673 674 WARN_ON((cm->tx_cnt < 0) || (cm->tx_cnt > RIOCM_TX_RING_SIZE)); 675 676 cm->tx_ack_slot = ack_slot; 677 678 /* 679 * If there are pending requests, insert them into transmit queue 680 */ 681 if (!list_empty(&cm->tx_reqs) && (cm->tx_cnt < RIOCM_TX_RING_SIZE)) { 682 struct tx_req *req, *_req; 683 int rc; 684 685 list_for_each_entry_safe(req, _req, &cm->tx_reqs, node) { 686 list_del(&req->node); 687 cm->tx_buf[cm->tx_slot] = req->buffer; 688 rc = rio_add_outb_message(cm->mport, req->rdev, cmbox, 689 req->buffer, req->len); 690 kfree(req->buffer); 691 kfree(req); 692 693 ++cm->tx_cnt; 694 ++cm->tx_slot; 695 cm->tx_slot &= (RIOCM_TX_RING_SIZE - 1); 696 if (cm->tx_cnt == RIOCM_TX_RING_SIZE) 697 break; 698 } 699 } 700 701 spin_unlock(&cm->tx_lock); 702 } 703 704 static void riocm_outb_msg_event(struct rio_mport *mport, void *dev_id, 705 int mbox, int slot) 706 { 707 struct cm_dev *cm = dev_id; 708 709 if (cm && rio_mport_is_running(cm->mport)) 710 rio_txcq_handler(cm, slot); 711 } 712 713 static int riocm_queue_req(struct cm_dev *cm, struct rio_dev *rdev, 714 void *buffer, size_t len) 715 { 716 unsigned long flags; 717 struct tx_req *treq; 718 719 treq = kzalloc(sizeof(*treq), GFP_KERNEL); 720 if (treq == NULL) 721 return -ENOMEM; 722 723 treq->rdev = rdev; 724 treq->buffer = buffer; 725 treq->len = len; 726 727 spin_lock_irqsave(&cm->tx_lock, flags); 728 list_add_tail(&treq->node, &cm->tx_reqs); 729 spin_unlock_irqrestore(&cm->tx_lock, flags); 730 return 0; 731 } 732 733 /* 734 * riocm_post_send - helper function that places packet into msg TX queue 735 * @cm: cm_dev object 736 * @rdev: target RapidIO device object (required by outbound msg interface) 737 * @buffer: pointer to a packet buffer to send 738 * @len: length of data to transfer 739 * @req: request priority flag 740 * 741 * Returns: 0 if success, or error code otherwise. 742 */ 743 static int riocm_post_send(struct cm_dev *cm, struct rio_dev *rdev, 744 void *buffer, size_t len) 745 { 746 int rc; 747 unsigned long flags; 748 749 spin_lock_irqsave(&cm->tx_lock, flags); 750 751 if (cm->mport == NULL) { 752 rc = -ENODEV; 753 goto err_out; 754 } 755 756 if (cm->tx_cnt == RIOCM_TX_RING_SIZE) { 757 riocm_debug(TX, "Tx Queue is full"); 758 rc = -EBUSY; 759 goto err_out; 760 } 761 762 cm->tx_buf[cm->tx_slot] = buffer; 763 rc = rio_add_outb_message(cm->mport, rdev, cmbox, buffer, len); 764 765 riocm_debug(TX, "Add buf@%p destid=%x tx_slot=%d tx_cnt=%d", 766 buffer, rdev->destid, cm->tx_slot, cm->tx_cnt); 767 768 ++cm->tx_cnt; 769 ++cm->tx_slot; 770 cm->tx_slot &= (RIOCM_TX_RING_SIZE - 1); 771 772 err_out: 773 spin_unlock_irqrestore(&cm->tx_lock, flags); 774 return rc; 775 } 776 777 /* 778 * riocm_ch_send - sends a data packet to a remote device 779 * @ch_id: local channel ID 780 * @buf: pointer to a data buffer to send (including CM header) 781 * @len: length of data to transfer (including CM header) 782 * 783 * ATTN: ASSUMES THAT THE HEADER SPACE IS RESERVED PART OF THE DATA PACKET 784 * 785 * Returns: 0 if success, or 786 * -EINVAL if one or more input parameters is/are not valid, 787 * -ENODEV if cannot find a channel with specified ID, 788 * -EAGAIN if a channel is not in CONNECTED state, 789 * + error codes returned by HW send routine. 790 */ 791 static int riocm_ch_send(u16 ch_id, void *buf, int len) 792 { 793 struct rio_channel *ch; 794 struct rio_ch_chan_hdr *hdr; 795 int ret; 796 797 if (buf == NULL || ch_id == 0 || len == 0 || len > RIO_MAX_MSG_SIZE) 798 return -EINVAL; 799 800 ch = riocm_get_channel(ch_id); 801 if (!ch) { 802 riocm_error("%s(%d) ch_%d not found", current->comm, 803 task_pid_nr(current), ch_id); 804 return -ENODEV; 805 } 806 807 if (!riocm_cmp(ch, RIO_CM_CONNECTED)) { 808 ret = -EAGAIN; 809 goto err_out; 810 } 811 812 /* 813 * Fill buffer header section with corresponding channel data 814 */ 815 hdr = buf; 816 817 hdr->bhdr.src_id = htonl(ch->loc_destid); 818 hdr->bhdr.dst_id = htonl(ch->rem_destid); 819 hdr->bhdr.src_mbox = cmbox; 820 hdr->bhdr.dst_mbox = cmbox; 821 hdr->bhdr.type = RIO_CM_CHAN; 822 hdr->ch_op = CM_DATA_MSG; 823 hdr->dst_ch = htons(ch->rem_channel); 824 hdr->src_ch = htons(ch->id); 825 hdr->msg_len = htons((u16)len); 826 827 /* ATTN: the function call below relies on the fact that underlying 828 * HW-specific add_outb_message() routine copies TX data into its own 829 * internal transfer buffer (true for all RIONET compatible mport 830 * drivers). Must be reviewed if mport driver uses the buffer directly. 831 */ 832 833 ret = riocm_post_send(ch->cmdev, ch->rdev, buf, len); 834 if (ret) 835 riocm_debug(TX, "ch %d send_err=%d", ch->id, ret); 836 err_out: 837 riocm_put_channel(ch); 838 return ret; 839 } 840 841 static int riocm_ch_free_rxbuf(struct rio_channel *ch, void *buf) 842 { 843 int i, ret = -EINVAL; 844 845 spin_lock_bh(&ch->lock); 846 847 for (i = 0; i < RIOCM_RX_RING_SIZE; i++) { 848 if (ch->rx_ring.inuse[i] == buf) { 849 ch->rx_ring.inuse[i] = NULL; 850 ch->rx_ring.inuse_cnt--; 851 ret = 0; 852 break; 853 } 854 } 855 856 spin_unlock_bh(&ch->lock); 857 858 if (!ret) 859 kfree(buf); 860 861 return ret; 862 } 863 864 /* 865 * riocm_ch_receive - fetch a data packet received for the specified channel 866 * @ch: local channel ID 867 * @buf: pointer to a packet buffer 868 * @timeout: timeout to wait for incoming packet (in jiffies) 869 * 870 * Returns: 0 and valid buffer pointer if success, or NULL pointer and one of: 871 * -EAGAIN if a channel is not in CONNECTED state, 872 * -ENOMEM if in-use tracking queue is full, 873 * -ETIME if wait timeout expired, 874 * -EINTR if wait was interrupted. 875 */ 876 static int riocm_ch_receive(struct rio_channel *ch, void **buf, long timeout) 877 { 878 void *rxmsg = NULL; 879 int i, ret = 0; 880 long wret; 881 882 if (!riocm_cmp(ch, RIO_CM_CONNECTED)) { 883 ret = -EAGAIN; 884 goto out; 885 } 886 887 if (ch->rx_ring.inuse_cnt == RIOCM_RX_RING_SIZE) { 888 /* If we do not have entries to track buffers given to upper 889 * layer, reject request. 890 */ 891 ret = -ENOMEM; 892 goto out; 893 } 894 895 wret = wait_for_completion_interruptible_timeout(&ch->comp, timeout); 896 897 riocm_debug(WAIT, "wait on %d returned %ld", ch->id, wret); 898 899 if (!wret) 900 ret = -ETIME; 901 else if (wret == -ERESTARTSYS) 902 ret = -EINTR; 903 else 904 ret = riocm_cmp(ch, RIO_CM_CONNECTED) ? 0 : -ECONNRESET; 905 906 if (ret) 907 goto out; 908 909 spin_lock_bh(&ch->lock); 910 911 rxmsg = ch->rx_ring.buf[ch->rx_ring.tail]; 912 ch->rx_ring.buf[ch->rx_ring.tail] = NULL; 913 ch->rx_ring.count--; 914 ch->rx_ring.tail++; 915 ch->rx_ring.tail %= RIOCM_RX_RING_SIZE; 916 ret = -ENOMEM; 917 918 for (i = 0; i < RIOCM_RX_RING_SIZE; i++) { 919 if (ch->rx_ring.inuse[i] == NULL) { 920 ch->rx_ring.inuse[i] = rxmsg; 921 ch->rx_ring.inuse_cnt++; 922 ret = 0; 923 break; 924 } 925 } 926 927 if (ret) { 928 /* We have no entry to store pending message: drop it */ 929 kfree(rxmsg); 930 rxmsg = NULL; 931 } 932 933 spin_unlock_bh(&ch->lock); 934 out: 935 *buf = rxmsg; 936 return ret; 937 } 938 939 /* 940 * riocm_ch_connect - sends a connect request to a remote device 941 * @loc_ch: local channel ID 942 * @cm: CM device to send connect request 943 * @peer: target RapidIO device 944 * @rem_ch: remote channel ID 945 * 946 * Returns: 0 if success, or 947 * -EINVAL if the channel is not in IDLE state, 948 * -EAGAIN if no connection request available immediately, 949 * -ETIME if ACK response timeout expired, 950 * -EINTR if wait for response was interrupted. 951 */ 952 static int riocm_ch_connect(u16 loc_ch, struct cm_dev *cm, 953 struct cm_peer *peer, u16 rem_ch) 954 { 955 struct rio_channel *ch = NULL; 956 struct rio_ch_chan_hdr *hdr; 957 int ret; 958 long wret; 959 960 ch = riocm_get_channel(loc_ch); 961 if (!ch) 962 return -ENODEV; 963 964 if (!riocm_cmp_exch(ch, RIO_CM_IDLE, RIO_CM_CONNECT)) { 965 ret = -EINVAL; 966 goto conn_done; 967 } 968 969 ch->cmdev = cm; 970 ch->rdev = peer->rdev; 971 ch->context = NULL; 972 ch->loc_destid = cm->mport->host_deviceid; 973 ch->rem_channel = rem_ch; 974 975 /* 976 * Send connect request to the remote RapidIO device 977 */ 978 979 hdr = kzalloc(sizeof(*hdr), GFP_KERNEL); 980 if (hdr == NULL) { 981 ret = -ENOMEM; 982 goto conn_done; 983 } 984 985 hdr->bhdr.src_id = htonl(ch->loc_destid); 986 hdr->bhdr.dst_id = htonl(peer->rdev->destid); 987 hdr->bhdr.src_mbox = cmbox; 988 hdr->bhdr.dst_mbox = cmbox; 989 hdr->bhdr.type = RIO_CM_CHAN; 990 hdr->ch_op = CM_CONN_REQ; 991 hdr->dst_ch = htons(rem_ch); 992 hdr->src_ch = htons(loc_ch); 993 994 /* ATTN: the function call below relies on the fact that underlying 995 * HW-specific add_outb_message() routine copies TX data into its 996 * internal transfer buffer. Must be reviewed if mport driver uses 997 * this buffer directly. 998 */ 999 ret = riocm_post_send(cm, peer->rdev, hdr, sizeof(*hdr)); 1000 1001 if (ret != -EBUSY) { 1002 kfree(hdr); 1003 } else { 1004 ret = riocm_queue_req(cm, peer->rdev, hdr, sizeof(*hdr)); 1005 if (ret) 1006 kfree(hdr); 1007 } 1008 1009 if (ret) { 1010 riocm_cmp_exch(ch, RIO_CM_CONNECT, RIO_CM_IDLE); 1011 goto conn_done; 1012 } 1013 1014 /* Wait for connect response from the remote device */ 1015 wret = wait_for_completion_interruptible_timeout(&ch->comp, 1016 RIOCM_CONNECT_TO * HZ); 1017 riocm_debug(WAIT, "wait on %d returns %ld", ch->id, wret); 1018 1019 if (!wret) 1020 ret = -ETIME; 1021 else if (wret == -ERESTARTSYS) 1022 ret = -EINTR; 1023 else 1024 ret = riocm_cmp(ch, RIO_CM_CONNECTED) ? 0 : -1; 1025 1026 conn_done: 1027 riocm_put_channel(ch); 1028 return ret; 1029 } 1030 1031 static int riocm_send_ack(struct rio_channel *ch) 1032 { 1033 struct rio_ch_chan_hdr *hdr; 1034 int ret; 1035 1036 hdr = kzalloc(sizeof(*hdr), GFP_KERNEL); 1037 if (hdr == NULL) 1038 return -ENOMEM; 1039 1040 hdr->bhdr.src_id = htonl(ch->loc_destid); 1041 hdr->bhdr.dst_id = htonl(ch->rem_destid); 1042 hdr->dst_ch = htons(ch->rem_channel); 1043 hdr->src_ch = htons(ch->id); 1044 hdr->bhdr.src_mbox = cmbox; 1045 hdr->bhdr.dst_mbox = cmbox; 1046 hdr->bhdr.type = RIO_CM_CHAN; 1047 hdr->ch_op = CM_CONN_ACK; 1048 1049 /* ATTN: the function call below relies on the fact that underlying 1050 * add_outb_message() routine copies TX data into its internal transfer 1051 * buffer. Review if switching to direct buffer version. 1052 */ 1053 ret = riocm_post_send(ch->cmdev, ch->rdev, hdr, sizeof(*hdr)); 1054 1055 if (ret == -EBUSY && !riocm_queue_req(ch->cmdev, 1056 ch->rdev, hdr, sizeof(*hdr))) 1057 return 0; 1058 kfree(hdr); 1059 1060 if (ret) 1061 riocm_error("send ACK to ch_%d on %s failed (ret=%d)", 1062 ch->id, rio_name(ch->rdev), ret); 1063 return ret; 1064 } 1065 1066 /* 1067 * riocm_ch_accept - accept incoming connection request 1068 * @ch_id: channel ID 1069 * @new_ch_id: local mport device 1070 * @timeout: wait timeout (if 0 non-blocking call, do not wait if connection 1071 * request is not available). 1072 * 1073 * Returns: pointer to new channel struct if success, or error-valued pointer: 1074 * -ENODEV - cannot find specified channel or mport, 1075 * -EINVAL - the channel is not in IDLE state, 1076 * -EAGAIN - no connection request available immediately (timeout=0), 1077 * -ENOMEM - unable to allocate new channel, 1078 * -ETIME - wait timeout expired, 1079 * -EINTR - wait was interrupted. 1080 */ 1081 static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id, 1082 long timeout) 1083 { 1084 struct rio_channel *ch = NULL; 1085 struct rio_channel *new_ch = NULL; 1086 struct conn_req *req; 1087 struct cm_peer *peer; 1088 int found = 0; 1089 int err = 0; 1090 long wret; 1091 1092 ch = riocm_get_channel(ch_id); 1093 if (!ch) 1094 return ERR_PTR(-EINVAL); 1095 1096 if (!riocm_cmp(ch, RIO_CM_LISTEN)) { 1097 err = -EINVAL; 1098 goto err_put; 1099 } 1100 1101 /* Don't sleep if this is a non blocking call */ 1102 if (!timeout) { 1103 if (!try_wait_for_completion(&ch->comp)) { 1104 err = -EAGAIN; 1105 goto err_put; 1106 } 1107 } else { 1108 riocm_debug(WAIT, "on %d", ch->id); 1109 1110 wret = wait_for_completion_interruptible_timeout(&ch->comp, 1111 timeout); 1112 if (!wret) { 1113 err = -ETIME; 1114 goto err_put; 1115 } else if (wret == -ERESTARTSYS) { 1116 err = -EINTR; 1117 goto err_put; 1118 } 1119 } 1120 1121 spin_lock_bh(&ch->lock); 1122 1123 if (ch->state != RIO_CM_LISTEN) { 1124 err = -ECANCELED; 1125 } else if (list_empty(&ch->accept_queue)) { 1126 riocm_debug(WAIT, "on %d accept_queue is empty on completion", 1127 ch->id); 1128 err = -EIO; 1129 } 1130 1131 spin_unlock_bh(&ch->lock); 1132 1133 if (err) { 1134 riocm_debug(WAIT, "on %d returns %d", ch->id, err); 1135 goto err_put; 1136 } 1137 1138 /* Create new channel for this connection */ 1139 new_ch = riocm_ch_alloc(RIOCM_CHNUM_AUTO); 1140 1141 if (IS_ERR(new_ch)) { 1142 riocm_error("failed to get channel for new req (%ld)", 1143 PTR_ERR(new_ch)); 1144 err = -ENOMEM; 1145 goto err_put; 1146 } 1147 1148 spin_lock_bh(&ch->lock); 1149 1150 req = list_first_entry(&ch->accept_queue, struct conn_req, node); 1151 list_del(&req->node); 1152 new_ch->cmdev = ch->cmdev; 1153 new_ch->loc_destid = ch->loc_destid; 1154 new_ch->rem_destid = req->destid; 1155 new_ch->rem_channel = req->chan; 1156 1157 spin_unlock_bh(&ch->lock); 1158 riocm_put_channel(ch); 1159 kfree(req); 1160 1161 down_read(&rdev_sem); 1162 /* Find requester's device object */ 1163 list_for_each_entry(peer, &new_ch->cmdev->peers, node) { 1164 if (peer->rdev->destid == new_ch->rem_destid) { 1165 riocm_debug(RX_CMD, "found matching device(%s)", 1166 rio_name(peer->rdev)); 1167 found = 1; 1168 break; 1169 } 1170 } 1171 up_read(&rdev_sem); 1172 1173 if (!found) { 1174 /* If peer device object not found, simply ignore the request */ 1175 err = -ENODEV; 1176 goto err_nodev; 1177 } 1178 1179 new_ch->rdev = peer->rdev; 1180 new_ch->state = RIO_CM_CONNECTED; 1181 spin_lock_init(&new_ch->lock); 1182 1183 /* Acknowledge the connection request. */ 1184 riocm_send_ack(new_ch); 1185 1186 *new_ch_id = new_ch->id; 1187 return new_ch; 1188 err_put: 1189 riocm_put_channel(ch); 1190 err_nodev: 1191 if (new_ch) { 1192 spin_lock_bh(&idr_lock); 1193 idr_remove(&ch_idr, new_ch->id); 1194 spin_unlock_bh(&idr_lock); 1195 riocm_put_channel(new_ch); 1196 } 1197 *new_ch_id = 0; 1198 return ERR_PTR(err); 1199 } 1200 1201 /* 1202 * riocm_ch_listen - puts a channel into LISTEN state 1203 * @ch_id: channel ID 1204 * 1205 * Returns: 0 if success, or 1206 * -EINVAL if the specified channel does not exists or 1207 * is not in CHAN_BOUND state. 1208 */ 1209 static int riocm_ch_listen(u16 ch_id) 1210 { 1211 struct rio_channel *ch = NULL; 1212 int ret = 0; 1213 1214 riocm_debug(CHOP, "(ch_%d)", ch_id); 1215 1216 ch = riocm_get_channel(ch_id); 1217 if (!ch || !riocm_cmp_exch(ch, RIO_CM_CHAN_BOUND, RIO_CM_LISTEN)) 1218 ret = -EINVAL; 1219 riocm_put_channel(ch); 1220 return ret; 1221 } 1222 1223 /* 1224 * riocm_ch_bind - associate a channel object and an mport device 1225 * @ch_id: channel ID 1226 * @mport_id: local mport device ID 1227 * @context: pointer to the additional caller's context 1228 * 1229 * Returns: 0 if success, or 1230 * -ENODEV if cannot find specified mport, 1231 * -EINVAL if the specified channel does not exist or 1232 * is not in IDLE state. 1233 */ 1234 static int riocm_ch_bind(u16 ch_id, u8 mport_id, void *context) 1235 { 1236 struct rio_channel *ch = NULL; 1237 struct cm_dev *cm; 1238 int rc = -ENODEV; 1239 1240 riocm_debug(CHOP, "ch_%d to mport_%d", ch_id, mport_id); 1241 1242 /* Find matching cm_dev object */ 1243 down_read(&rdev_sem); 1244 list_for_each_entry(cm, &cm_dev_list, list) { 1245 if ((cm->mport->id == mport_id) && 1246 rio_mport_is_running(cm->mport)) { 1247 rc = 0; 1248 break; 1249 } 1250 } 1251 1252 if (rc) 1253 goto exit; 1254 1255 ch = riocm_get_channel(ch_id); 1256 if (!ch) { 1257 rc = -EINVAL; 1258 goto exit; 1259 } 1260 1261 spin_lock_bh(&ch->lock); 1262 if (ch->state != RIO_CM_IDLE) { 1263 spin_unlock_bh(&ch->lock); 1264 rc = -EINVAL; 1265 goto err_put; 1266 } 1267 1268 ch->cmdev = cm; 1269 ch->loc_destid = cm->mport->host_deviceid; 1270 ch->context = context; 1271 ch->state = RIO_CM_CHAN_BOUND; 1272 spin_unlock_bh(&ch->lock); 1273 err_put: 1274 riocm_put_channel(ch); 1275 exit: 1276 up_read(&rdev_sem); 1277 return rc; 1278 } 1279 1280 /* 1281 * riocm_ch_alloc - channel object allocation helper routine 1282 * @ch_num: channel ID (1 ... RIOCM_MAX_CHNUM, 0 = automatic) 1283 * 1284 * Return value: pointer to newly created channel object, 1285 * or error-valued pointer 1286 */ 1287 static struct rio_channel *riocm_ch_alloc(u16 ch_num) 1288 { 1289 int id; 1290 int start, end; 1291 struct rio_channel *ch; 1292 1293 ch = kzalloc(sizeof(*ch), GFP_KERNEL); 1294 if (!ch) 1295 return ERR_PTR(-ENOMEM); 1296 1297 if (ch_num) { 1298 /* If requested, try to obtain the specified channel ID */ 1299 start = ch_num; 1300 end = ch_num + 1; 1301 } else { 1302 /* Obtain channel ID from the dynamic allocation range */ 1303 start = chstart; 1304 end = RIOCM_MAX_CHNUM + 1; 1305 } 1306 1307 idr_preload(GFP_KERNEL); 1308 spin_lock_bh(&idr_lock); 1309 id = idr_alloc_cyclic(&ch_idr, ch, start, end, GFP_NOWAIT); 1310 spin_unlock_bh(&idr_lock); 1311 idr_preload_end(); 1312 1313 if (id < 0) { 1314 kfree(ch); 1315 return ERR_PTR(id == -ENOSPC ? -EBUSY : id); 1316 } 1317 1318 ch->id = (u16)id; 1319 ch->state = RIO_CM_IDLE; 1320 spin_lock_init(&ch->lock); 1321 INIT_LIST_HEAD(&ch->accept_queue); 1322 INIT_LIST_HEAD(&ch->ch_node); 1323 init_completion(&ch->comp); 1324 init_completion(&ch->comp_close); 1325 kref_init(&ch->ref); 1326 ch->rx_ring.head = 0; 1327 ch->rx_ring.tail = 0; 1328 ch->rx_ring.count = 0; 1329 ch->rx_ring.inuse_cnt = 0; 1330 1331 return ch; 1332 } 1333 1334 /* 1335 * riocm_ch_create - creates a new channel object and allocates ID for it 1336 * @ch_num: channel ID (1 ... RIOCM_MAX_CHNUM, 0 = automatic) 1337 * 1338 * Allocates and initializes a new channel object. If the parameter ch_num > 0 1339 * and is within the valid range, riocm_ch_create tries to allocate the 1340 * specified ID for the new channel. If ch_num = 0, channel ID will be assigned 1341 * automatically from the range (chstart ... RIOCM_MAX_CHNUM). 1342 * Module parameter 'chstart' defines start of an ID range available for dynamic 1343 * allocation. Range below 'chstart' is reserved for pre-defined ID numbers. 1344 * Available channel numbers are limited by 16-bit size of channel numbers used 1345 * in the packet header. 1346 * 1347 * Return value: PTR to rio_channel structure if successful (with channel number 1348 * updated via pointer) or error-valued pointer if error. 1349 */ 1350 static struct rio_channel *riocm_ch_create(u16 *ch_num) 1351 { 1352 struct rio_channel *ch = NULL; 1353 1354 ch = riocm_ch_alloc(*ch_num); 1355 1356 if (IS_ERR(ch)) 1357 riocm_debug(CHOP, "Failed to allocate channel %d (err=%ld)", 1358 *ch_num, PTR_ERR(ch)); 1359 else 1360 *ch_num = ch->id; 1361 1362 return ch; 1363 } 1364 1365 /* 1366 * riocm_ch_free - channel object release routine 1367 * @ref: pointer to a channel's kref structure 1368 */ 1369 static void riocm_ch_free(struct kref *ref) 1370 { 1371 struct rio_channel *ch = container_of(ref, struct rio_channel, ref); 1372 int i; 1373 1374 riocm_debug(CHOP, "(ch_%d)", ch->id); 1375 1376 if (ch->rx_ring.inuse_cnt) { 1377 for (i = 0; 1378 i < RIOCM_RX_RING_SIZE && ch->rx_ring.inuse_cnt; i++) { 1379 if (ch->rx_ring.inuse[i] != NULL) { 1380 kfree(ch->rx_ring.inuse[i]); 1381 ch->rx_ring.inuse_cnt--; 1382 } 1383 } 1384 } 1385 1386 if (ch->rx_ring.count) 1387 for (i = 0; i < RIOCM_RX_RING_SIZE && ch->rx_ring.count; i++) { 1388 if (ch->rx_ring.buf[i] != NULL) { 1389 kfree(ch->rx_ring.buf[i]); 1390 ch->rx_ring.count--; 1391 } 1392 } 1393 1394 complete(&ch->comp_close); 1395 } 1396 1397 static int riocm_send_close(struct rio_channel *ch) 1398 { 1399 struct rio_ch_chan_hdr *hdr; 1400 int ret; 1401 1402 /* 1403 * Send CH_CLOSE notification to the remote RapidIO device 1404 */ 1405 1406 hdr = kzalloc(sizeof(*hdr), GFP_KERNEL); 1407 if (hdr == NULL) 1408 return -ENOMEM; 1409 1410 hdr->bhdr.src_id = htonl(ch->loc_destid); 1411 hdr->bhdr.dst_id = htonl(ch->rem_destid); 1412 hdr->bhdr.src_mbox = cmbox; 1413 hdr->bhdr.dst_mbox = cmbox; 1414 hdr->bhdr.type = RIO_CM_CHAN; 1415 hdr->ch_op = CM_CONN_CLOSE; 1416 hdr->dst_ch = htons(ch->rem_channel); 1417 hdr->src_ch = htons(ch->id); 1418 1419 /* ATTN: the function call below relies on the fact that underlying 1420 * add_outb_message() routine copies TX data into its internal transfer 1421 * buffer. Needs to be reviewed if switched to direct buffer mode. 1422 */ 1423 ret = riocm_post_send(ch->cmdev, ch->rdev, hdr, sizeof(*hdr)); 1424 1425 if (ret == -EBUSY && !riocm_queue_req(ch->cmdev, ch->rdev, 1426 hdr, sizeof(*hdr))) 1427 return 0; 1428 kfree(hdr); 1429 1430 if (ret) 1431 riocm_error("ch(%d) send CLOSE failed (ret=%d)", ch->id, ret); 1432 1433 return ret; 1434 } 1435 1436 /* 1437 * riocm_ch_close - closes a channel object with specified ID (by local request) 1438 * @ch: channel to be closed 1439 */ 1440 static int riocm_ch_close(struct rio_channel *ch) 1441 { 1442 unsigned long tmo = msecs_to_jiffies(3000); 1443 enum rio_cm_state state; 1444 long wret; 1445 int ret = 0; 1446 1447 riocm_debug(CHOP, "ch_%d by %s(%d)", 1448 ch->id, current->comm, task_pid_nr(current)); 1449 1450 state = riocm_exch(ch, RIO_CM_DESTROYING); 1451 if (state == RIO_CM_CONNECTED) 1452 riocm_send_close(ch); 1453 1454 complete_all(&ch->comp); 1455 1456 riocm_put_channel(ch); 1457 wret = wait_for_completion_interruptible_timeout(&ch->comp_close, tmo); 1458 1459 riocm_debug(WAIT, "wait on %d returns %ld", ch->id, wret); 1460 1461 if (wret == 0) { 1462 /* Timeout on wait occurred */ 1463 riocm_debug(CHOP, "%s(%d) timed out waiting for ch %d", 1464 current->comm, task_pid_nr(current), ch->id); 1465 ret = -ETIMEDOUT; 1466 } else if (wret == -ERESTARTSYS) { 1467 /* Wait_for_completion was interrupted by a signal */ 1468 riocm_debug(CHOP, "%s(%d) wait for ch %d was interrupted", 1469 current->comm, task_pid_nr(current), ch->id); 1470 ret = -EINTR; 1471 } 1472 1473 if (!ret) { 1474 riocm_debug(CHOP, "ch_%d resources released", ch->id); 1475 kfree(ch); 1476 } else { 1477 riocm_debug(CHOP, "failed to release ch_%d resources", ch->id); 1478 } 1479 1480 return ret; 1481 } 1482 1483 /* 1484 * riocm_cdev_open() - Open character device 1485 */ 1486 static int riocm_cdev_open(struct inode *inode, struct file *filp) 1487 { 1488 riocm_debug(INIT, "by %s(%d) filp=%p ", 1489 current->comm, task_pid_nr(current), filp); 1490 1491 if (list_empty(&cm_dev_list)) 1492 return -ENODEV; 1493 1494 return 0; 1495 } 1496 1497 /* 1498 * riocm_cdev_release() - Release character device 1499 */ 1500 static int riocm_cdev_release(struct inode *inode, struct file *filp) 1501 { 1502 struct rio_channel *ch, *_c; 1503 unsigned int i; 1504 LIST_HEAD(list); 1505 1506 riocm_debug(EXIT, "by %s(%d) filp=%p", 1507 current->comm, task_pid_nr(current), filp); 1508 1509 /* Check if there are channels associated with this file descriptor */ 1510 spin_lock_bh(&idr_lock); 1511 idr_for_each_entry(&ch_idr, ch, i) { 1512 if (ch && ch->filp == filp) { 1513 riocm_debug(EXIT, "ch_%d not released by %s(%d)", 1514 ch->id, current->comm, 1515 task_pid_nr(current)); 1516 idr_remove(&ch_idr, ch->id); 1517 list_add(&ch->ch_node, &list); 1518 } 1519 } 1520 spin_unlock_bh(&idr_lock); 1521 1522 if (!list_empty(&list)) { 1523 list_for_each_entry_safe(ch, _c, &list, ch_node) { 1524 list_del(&ch->ch_node); 1525 riocm_ch_close(ch); 1526 } 1527 } 1528 1529 return 0; 1530 } 1531 1532 /* 1533 * cm_ep_get_list_size() - Reports number of endpoints in the network 1534 */ 1535 static int cm_ep_get_list_size(void __user *arg) 1536 { 1537 u32 __user *p = arg; 1538 u32 mport_id; 1539 u32 count = 0; 1540 struct cm_dev *cm; 1541 1542 if (get_user(mport_id, p)) 1543 return -EFAULT; 1544 if (mport_id >= RIO_MAX_MPORTS) 1545 return -EINVAL; 1546 1547 /* Find a matching cm_dev object */ 1548 down_read(&rdev_sem); 1549 list_for_each_entry(cm, &cm_dev_list, list) { 1550 if (cm->mport->id == mport_id) { 1551 count = cm->npeers; 1552 up_read(&rdev_sem); 1553 if (copy_to_user(arg, &count, sizeof(u32))) 1554 return -EFAULT; 1555 return 0; 1556 } 1557 } 1558 up_read(&rdev_sem); 1559 1560 return -ENODEV; 1561 } 1562 1563 /* 1564 * cm_ep_get_list() - Returns list of attached endpoints 1565 */ 1566 static int cm_ep_get_list(void __user *arg) 1567 { 1568 struct cm_dev *cm; 1569 struct cm_peer *peer; 1570 u32 info[2]; 1571 void *buf; 1572 u32 nent; 1573 u32 *entry_ptr; 1574 u32 i = 0; 1575 int ret = 0; 1576 1577 if (copy_from_user(&info, arg, sizeof(info))) 1578 return -EFAULT; 1579 1580 if (info[1] >= RIO_MAX_MPORTS || info[0] > RIOCM_MAX_EP_COUNT) 1581 return -EINVAL; 1582 1583 /* Find a matching cm_dev object */ 1584 down_read(&rdev_sem); 1585 list_for_each_entry(cm, &cm_dev_list, list) 1586 if (cm->mport->id == (u8)info[1]) 1587 goto found; 1588 1589 up_read(&rdev_sem); 1590 return -ENODEV; 1591 1592 found: 1593 nent = min(info[0], cm->npeers); 1594 buf = kcalloc(nent + 2, sizeof(u32), GFP_KERNEL); 1595 if (!buf) { 1596 up_read(&rdev_sem); 1597 return -ENOMEM; 1598 } 1599 1600 entry_ptr = (u32 *)((uintptr_t)buf + 2*sizeof(u32)); 1601 1602 list_for_each_entry(peer, &cm->peers, node) { 1603 *entry_ptr = (u32)peer->rdev->destid; 1604 entry_ptr++; 1605 if (++i == nent) 1606 break; 1607 } 1608 up_read(&rdev_sem); 1609 1610 ((u32 *)buf)[0] = i; /* report an updated number of entries */ 1611 ((u32 *)buf)[1] = info[1]; /* put back an mport ID */ 1612 if (copy_to_user(arg, buf, sizeof(u32) * (info[0] + 2))) 1613 ret = -EFAULT; 1614 1615 kfree(buf); 1616 return ret; 1617 } 1618 1619 /* 1620 * cm_mport_get_list() - Returns list of available local mport devices 1621 */ 1622 static int cm_mport_get_list(void __user *arg) 1623 { 1624 int ret = 0; 1625 u32 entries; 1626 void *buf; 1627 struct cm_dev *cm; 1628 u32 *entry_ptr; 1629 int count = 0; 1630 1631 if (copy_from_user(&entries, arg, sizeof(entries))) 1632 return -EFAULT; 1633 if (entries == 0 || entries > RIO_MAX_MPORTS) 1634 return -EINVAL; 1635 buf = kcalloc(entries + 1, sizeof(u32), GFP_KERNEL); 1636 if (!buf) 1637 return -ENOMEM; 1638 1639 /* Scan all registered cm_dev objects */ 1640 entry_ptr = (u32 *)((uintptr_t)buf + sizeof(u32)); 1641 down_read(&rdev_sem); 1642 list_for_each_entry(cm, &cm_dev_list, list) { 1643 if (count++ < entries) { 1644 *entry_ptr = (cm->mport->id << 16) | 1645 cm->mport->host_deviceid; 1646 entry_ptr++; 1647 } 1648 } 1649 up_read(&rdev_sem); 1650 1651 *((u32 *)buf) = count; /* report a real number of entries */ 1652 if (copy_to_user(arg, buf, sizeof(u32) * (count + 1))) 1653 ret = -EFAULT; 1654 1655 kfree(buf); 1656 return ret; 1657 } 1658 1659 /* 1660 * cm_chan_create() - Create a message exchange channel 1661 */ 1662 static int cm_chan_create(struct file *filp, void __user *arg) 1663 { 1664 u16 __user *p = arg; 1665 u16 ch_num; 1666 struct rio_channel *ch; 1667 1668 if (get_user(ch_num, p)) 1669 return -EFAULT; 1670 1671 riocm_debug(CHOP, "ch_%d requested by %s(%d)", 1672 ch_num, current->comm, task_pid_nr(current)); 1673 ch = riocm_ch_create(&ch_num); 1674 if (IS_ERR(ch)) 1675 return PTR_ERR(ch); 1676 1677 ch->filp = filp; 1678 riocm_debug(CHOP, "ch_%d created by %s(%d)", 1679 ch_num, current->comm, task_pid_nr(current)); 1680 return put_user(ch_num, p); 1681 } 1682 1683 /* 1684 * cm_chan_close() - Close channel 1685 * @filp: Pointer to file object 1686 * @arg: Channel to close 1687 */ 1688 static int cm_chan_close(struct file *filp, void __user *arg) 1689 { 1690 u16 __user *p = arg; 1691 u16 ch_num; 1692 struct rio_channel *ch; 1693 1694 if (get_user(ch_num, p)) 1695 return -EFAULT; 1696 1697 riocm_debug(CHOP, "ch_%d by %s(%d)", 1698 ch_num, current->comm, task_pid_nr(current)); 1699 1700 spin_lock_bh(&idr_lock); 1701 ch = idr_find(&ch_idr, ch_num); 1702 if (!ch) { 1703 spin_unlock_bh(&idr_lock); 1704 return 0; 1705 } 1706 if (ch->filp != filp) { 1707 spin_unlock_bh(&idr_lock); 1708 return -EINVAL; 1709 } 1710 idr_remove(&ch_idr, ch->id); 1711 spin_unlock_bh(&idr_lock); 1712 1713 return riocm_ch_close(ch); 1714 } 1715 1716 /* 1717 * cm_chan_bind() - Bind channel 1718 * @arg: Channel number 1719 */ 1720 static int cm_chan_bind(void __user *arg) 1721 { 1722 struct rio_cm_channel chan; 1723 1724 if (copy_from_user(&chan, arg, sizeof(chan))) 1725 return -EFAULT; 1726 if (chan.mport_id >= RIO_MAX_MPORTS) 1727 return -EINVAL; 1728 1729 return riocm_ch_bind(chan.id, chan.mport_id, NULL); 1730 } 1731 1732 /* 1733 * cm_chan_listen() - Listen on channel 1734 * @arg: Channel number 1735 */ 1736 static int cm_chan_listen(void __user *arg) 1737 { 1738 u16 __user *p = arg; 1739 u16 ch_num; 1740 1741 if (get_user(ch_num, p)) 1742 return -EFAULT; 1743 1744 return riocm_ch_listen(ch_num); 1745 } 1746 1747 /* 1748 * cm_chan_accept() - Accept incoming connection 1749 * @filp: Pointer to file object 1750 * @arg: Channel number 1751 */ 1752 static int cm_chan_accept(struct file *filp, void __user *arg) 1753 { 1754 struct rio_cm_accept param; 1755 long accept_to; 1756 struct rio_channel *ch; 1757 1758 if (copy_from_user(&param, arg, sizeof(param))) 1759 return -EFAULT; 1760 1761 riocm_debug(CHOP, "on ch_%d by %s(%d)", 1762 param.ch_num, current->comm, task_pid_nr(current)); 1763 1764 accept_to = param.wait_to ? 1765 msecs_to_jiffies(param.wait_to) : 0; 1766 1767 ch = riocm_ch_accept(param.ch_num, &param.ch_num, accept_to); 1768 if (IS_ERR(ch)) 1769 return PTR_ERR(ch); 1770 ch->filp = filp; 1771 1772 riocm_debug(CHOP, "new ch_%d for %s(%d)", 1773 ch->id, current->comm, task_pid_nr(current)); 1774 1775 if (copy_to_user(arg, &param, sizeof(param))) 1776 return -EFAULT; 1777 return 0; 1778 } 1779 1780 /* 1781 * cm_chan_connect() - Connect on channel 1782 * @arg: Channel information 1783 */ 1784 static int cm_chan_connect(void __user *arg) 1785 { 1786 struct rio_cm_channel chan; 1787 struct cm_dev *cm; 1788 struct cm_peer *peer; 1789 int ret = -ENODEV; 1790 1791 if (copy_from_user(&chan, arg, sizeof(chan))) 1792 return -EFAULT; 1793 if (chan.mport_id >= RIO_MAX_MPORTS) 1794 return -EINVAL; 1795 1796 down_read(&rdev_sem); 1797 1798 /* Find matching cm_dev object */ 1799 list_for_each_entry(cm, &cm_dev_list, list) { 1800 if (cm->mport->id == chan.mport_id) { 1801 ret = 0; 1802 break; 1803 } 1804 } 1805 1806 if (ret) 1807 goto err_out; 1808 1809 if (chan.remote_destid >= RIO_ANY_DESTID(cm->mport->sys_size)) { 1810 ret = -EINVAL; 1811 goto err_out; 1812 } 1813 1814 /* Find corresponding RapidIO endpoint device object */ 1815 ret = -ENODEV; 1816 1817 list_for_each_entry(peer, &cm->peers, node) { 1818 if (peer->rdev->destid == chan.remote_destid) { 1819 ret = 0; 1820 break; 1821 } 1822 } 1823 1824 if (ret) 1825 goto err_out; 1826 1827 up_read(&rdev_sem); 1828 1829 return riocm_ch_connect(chan.id, cm, peer, chan.remote_channel); 1830 err_out: 1831 up_read(&rdev_sem); 1832 return ret; 1833 } 1834 1835 /* 1836 * cm_chan_msg_send() - Send a message through channel 1837 * @arg: Outbound message information 1838 */ 1839 static int cm_chan_msg_send(void __user *arg) 1840 { 1841 struct rio_cm_msg msg; 1842 void *buf; 1843 int ret = 0; 1844 1845 if (copy_from_user(&msg, arg, sizeof(msg))) 1846 return -EFAULT; 1847 if (msg.size > RIO_MAX_MSG_SIZE) 1848 return -EINVAL; 1849 1850 buf = kmalloc(msg.size, GFP_KERNEL); 1851 if (!buf) 1852 return -ENOMEM; 1853 1854 if (copy_from_user(buf, (void __user *)(uintptr_t)msg.msg, msg.size)) { 1855 ret = -EFAULT; 1856 goto out; 1857 } 1858 1859 ret = riocm_ch_send(msg.ch_num, buf, msg.size); 1860 out: 1861 kfree(buf); 1862 return ret; 1863 } 1864 1865 /* 1866 * cm_chan_msg_rcv() - Receive a message through channel 1867 * @arg: Inbound message information 1868 */ 1869 static int cm_chan_msg_rcv(void __user *arg) 1870 { 1871 struct rio_cm_msg msg; 1872 struct rio_channel *ch; 1873 void *buf; 1874 long rxto; 1875 int ret = 0, msg_size; 1876 1877 if (copy_from_user(&msg, arg, sizeof(msg))) 1878 return -EFAULT; 1879 1880 if (msg.ch_num == 0 || msg.size == 0) 1881 return -EINVAL; 1882 1883 ch = riocm_get_channel(msg.ch_num); 1884 if (!ch) 1885 return -ENODEV; 1886 1887 rxto = msg.rxto ? msecs_to_jiffies(msg.rxto) : MAX_SCHEDULE_TIMEOUT; 1888 1889 ret = riocm_ch_receive(ch, &buf, rxto); 1890 if (ret) 1891 goto out; 1892 1893 msg_size = min(msg.size, (u16)(RIO_MAX_MSG_SIZE)); 1894 1895 if (copy_to_user((void __user *)(uintptr_t)msg.msg, buf, msg_size)) 1896 ret = -EFAULT; 1897 1898 riocm_ch_free_rxbuf(ch, buf); 1899 out: 1900 riocm_put_channel(ch); 1901 return ret; 1902 } 1903 1904 /* 1905 * riocm_cdev_ioctl() - IOCTL requests handler 1906 */ 1907 static long 1908 riocm_cdev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 1909 { 1910 switch (cmd) { 1911 case RIO_CM_EP_GET_LIST_SIZE: 1912 return cm_ep_get_list_size((void __user *)arg); 1913 case RIO_CM_EP_GET_LIST: 1914 return cm_ep_get_list((void __user *)arg); 1915 case RIO_CM_CHAN_CREATE: 1916 return cm_chan_create(filp, (void __user *)arg); 1917 case RIO_CM_CHAN_CLOSE: 1918 return cm_chan_close(filp, (void __user *)arg); 1919 case RIO_CM_CHAN_BIND: 1920 return cm_chan_bind((void __user *)arg); 1921 case RIO_CM_CHAN_LISTEN: 1922 return cm_chan_listen((void __user *)arg); 1923 case RIO_CM_CHAN_ACCEPT: 1924 return cm_chan_accept(filp, (void __user *)arg); 1925 case RIO_CM_CHAN_CONNECT: 1926 return cm_chan_connect((void __user *)arg); 1927 case RIO_CM_CHAN_SEND: 1928 return cm_chan_msg_send((void __user *)arg); 1929 case RIO_CM_CHAN_RECEIVE: 1930 return cm_chan_msg_rcv((void __user *)arg); 1931 case RIO_CM_MPORT_GET_LIST: 1932 return cm_mport_get_list((void __user *)arg); 1933 default: 1934 break; 1935 } 1936 1937 return -EINVAL; 1938 } 1939 1940 static const struct file_operations riocm_cdev_fops = { 1941 .owner = THIS_MODULE, 1942 .open = riocm_cdev_open, 1943 .release = riocm_cdev_release, 1944 .unlocked_ioctl = riocm_cdev_ioctl, 1945 }; 1946 1947 /* 1948 * riocm_add_dev - add new remote RapidIO device into channel management core 1949 * @dev: device object associated with RapidIO device 1950 * @sif: subsystem interface 1951 * 1952 * Adds the specified RapidIO device (if applicable) into peers list of 1953 * the corresponding channel management device (cm_dev). 1954 */ 1955 static int riocm_add_dev(struct device *dev, struct subsys_interface *sif) 1956 { 1957 struct cm_peer *peer; 1958 struct rio_dev *rdev = to_rio_dev(dev); 1959 struct cm_dev *cm; 1960 1961 /* Check if the remote device has capabilities required to support CM */ 1962 if (!dev_cm_capable(rdev)) 1963 return 0; 1964 1965 riocm_debug(RDEV, "(%s)", rio_name(rdev)); 1966 1967 peer = kmalloc(sizeof(*peer), GFP_KERNEL); 1968 if (!peer) 1969 return -ENOMEM; 1970 1971 /* Find a corresponding cm_dev object */ 1972 down_write(&rdev_sem); 1973 list_for_each_entry(cm, &cm_dev_list, list) { 1974 if (cm->mport == rdev->net->hport) 1975 goto found; 1976 } 1977 1978 up_write(&rdev_sem); 1979 kfree(peer); 1980 return -ENODEV; 1981 1982 found: 1983 peer->rdev = rdev; 1984 list_add_tail(&peer->node, &cm->peers); 1985 cm->npeers++; 1986 1987 up_write(&rdev_sem); 1988 return 0; 1989 } 1990 1991 /* 1992 * riocm_remove_dev - remove remote RapidIO device from channel management core 1993 * @dev: device object associated with RapidIO device 1994 * @sif: subsystem interface 1995 * 1996 * Removes the specified RapidIO device (if applicable) from peers list of 1997 * the corresponding channel management device (cm_dev). 1998 */ 1999 static void riocm_remove_dev(struct device *dev, struct subsys_interface *sif) 2000 { 2001 struct rio_dev *rdev = to_rio_dev(dev); 2002 struct cm_dev *cm; 2003 struct cm_peer *peer; 2004 struct rio_channel *ch, *_c; 2005 unsigned int i; 2006 bool found = false; 2007 LIST_HEAD(list); 2008 2009 /* Check if the remote device has capabilities required to support CM */ 2010 if (!dev_cm_capable(rdev)) 2011 return; 2012 2013 riocm_debug(RDEV, "(%s)", rio_name(rdev)); 2014 2015 /* Find matching cm_dev object */ 2016 down_write(&rdev_sem); 2017 list_for_each_entry(cm, &cm_dev_list, list) { 2018 if (cm->mport == rdev->net->hport) { 2019 found = true; 2020 break; 2021 } 2022 } 2023 2024 if (!found) { 2025 up_write(&rdev_sem); 2026 return; 2027 } 2028 2029 /* Remove remote device from the list of peers */ 2030 found = false; 2031 list_for_each_entry(peer, &cm->peers, node) { 2032 if (peer->rdev == rdev) { 2033 riocm_debug(RDEV, "removing peer %s", rio_name(rdev)); 2034 found = true; 2035 list_del(&peer->node); 2036 cm->npeers--; 2037 kfree(peer); 2038 break; 2039 } 2040 } 2041 2042 up_write(&rdev_sem); 2043 2044 if (!found) 2045 return; 2046 2047 /* 2048 * Release channels associated with this peer 2049 */ 2050 2051 spin_lock_bh(&idr_lock); 2052 idr_for_each_entry(&ch_idr, ch, i) { 2053 if (ch && ch->rdev == rdev) { 2054 if (atomic_read(&rdev->state) != RIO_DEVICE_SHUTDOWN) 2055 riocm_exch(ch, RIO_CM_DISCONNECT); 2056 idr_remove(&ch_idr, ch->id); 2057 list_add(&ch->ch_node, &list); 2058 } 2059 } 2060 spin_unlock_bh(&idr_lock); 2061 2062 if (!list_empty(&list)) { 2063 list_for_each_entry_safe(ch, _c, &list, ch_node) { 2064 list_del(&ch->ch_node); 2065 riocm_ch_close(ch); 2066 } 2067 } 2068 } 2069 2070 /* 2071 * riocm_cdev_add() - Create rio_cm char device 2072 * @devno: device number assigned to device (MAJ + MIN) 2073 */ 2074 static int riocm_cdev_add(dev_t devno) 2075 { 2076 int ret; 2077 2078 cdev_init(&riocm_cdev.cdev, &riocm_cdev_fops); 2079 riocm_cdev.cdev.owner = THIS_MODULE; 2080 ret = cdev_add(&riocm_cdev.cdev, devno, 1); 2081 if (ret < 0) { 2082 riocm_error("Cannot register a device with error %d", ret); 2083 return ret; 2084 } 2085 2086 riocm_cdev.dev = device_create(dev_class, NULL, devno, NULL, DEV_NAME); 2087 if (IS_ERR(riocm_cdev.dev)) { 2088 cdev_del(&riocm_cdev.cdev); 2089 return PTR_ERR(riocm_cdev.dev); 2090 } 2091 2092 riocm_debug(MPORT, "Added %s cdev(%d:%d)", 2093 DEV_NAME, MAJOR(devno), MINOR(devno)); 2094 2095 return 0; 2096 } 2097 2098 /* 2099 * riocm_add_mport - add new local mport device into channel management core 2100 * @dev: device object associated with mport 2101 * @class_intf: class interface 2102 * 2103 * When a new mport device is added, CM immediately reserves inbound and 2104 * outbound RapidIO mailboxes that will be used. 2105 */ 2106 static int riocm_add_mport(struct device *dev, 2107 struct class_interface *class_intf) 2108 { 2109 int rc; 2110 int i; 2111 struct cm_dev *cm; 2112 struct rio_mport *mport = to_rio_mport(dev); 2113 2114 riocm_debug(MPORT, "add mport %s", mport->name); 2115 2116 cm = kzalloc(sizeof(*cm), GFP_KERNEL); 2117 if (!cm) 2118 return -ENOMEM; 2119 2120 cm->mport = mport; 2121 2122 rc = rio_request_outb_mbox(mport, cm, cmbox, 2123 RIOCM_TX_RING_SIZE, riocm_outb_msg_event); 2124 if (rc) { 2125 riocm_error("failed to allocate OBMBOX_%d on %s", 2126 cmbox, mport->name); 2127 kfree(cm); 2128 return -ENODEV; 2129 } 2130 2131 rc = rio_request_inb_mbox(mport, cm, cmbox, 2132 RIOCM_RX_RING_SIZE, riocm_inb_msg_event); 2133 if (rc) { 2134 riocm_error("failed to allocate IBMBOX_%d on %s", 2135 cmbox, mport->name); 2136 rio_release_outb_mbox(mport, cmbox); 2137 kfree(cm); 2138 return -ENODEV; 2139 } 2140 2141 /* 2142 * Allocate and register inbound messaging buffers to be ready 2143 * to receive channel and system management requests 2144 */ 2145 for (i = 0; i < RIOCM_RX_RING_SIZE; i++) 2146 cm->rx_buf[i] = NULL; 2147 2148 cm->rx_slots = RIOCM_RX_RING_SIZE; 2149 mutex_init(&cm->rx_lock); 2150 riocm_rx_fill(cm, RIOCM_RX_RING_SIZE); 2151 cm->rx_wq = create_workqueue(DRV_NAME "/rxq"); 2152 INIT_WORK(&cm->rx_work, rio_ibmsg_handler); 2153 2154 cm->tx_slot = 0; 2155 cm->tx_cnt = 0; 2156 cm->tx_ack_slot = 0; 2157 spin_lock_init(&cm->tx_lock); 2158 2159 INIT_LIST_HEAD(&cm->peers); 2160 cm->npeers = 0; 2161 INIT_LIST_HEAD(&cm->tx_reqs); 2162 2163 down_write(&rdev_sem); 2164 list_add_tail(&cm->list, &cm_dev_list); 2165 up_write(&rdev_sem); 2166 2167 return 0; 2168 } 2169 2170 /* 2171 * riocm_remove_mport - remove local mport device from channel management core 2172 * @dev: device object associated with mport 2173 * @class_intf: class interface 2174 * 2175 * Removes a local mport device from the list of registered devices that provide 2176 * channel management services. Returns an error if the specified mport is not 2177 * registered with the CM core. 2178 */ 2179 static void riocm_remove_mport(struct device *dev, 2180 struct class_interface *class_intf) 2181 { 2182 struct rio_mport *mport = to_rio_mport(dev); 2183 struct cm_dev *cm; 2184 struct cm_peer *peer, *temp; 2185 struct rio_channel *ch, *_c; 2186 unsigned int i; 2187 bool found = false; 2188 LIST_HEAD(list); 2189 2190 riocm_debug(MPORT, "%s", mport->name); 2191 2192 /* Find a matching cm_dev object */ 2193 down_write(&rdev_sem); 2194 list_for_each_entry(cm, &cm_dev_list, list) { 2195 if (cm->mport == mport) { 2196 list_del(&cm->list); 2197 found = true; 2198 break; 2199 } 2200 } 2201 up_write(&rdev_sem); 2202 if (!found) 2203 return; 2204 2205 flush_workqueue(cm->rx_wq); 2206 destroy_workqueue(cm->rx_wq); 2207 2208 /* Release channels bound to this mport */ 2209 spin_lock_bh(&idr_lock); 2210 idr_for_each_entry(&ch_idr, ch, i) { 2211 if (ch->cmdev == cm) { 2212 riocm_debug(RDEV, "%s drop ch_%d", 2213 mport->name, ch->id); 2214 idr_remove(&ch_idr, ch->id); 2215 list_add(&ch->ch_node, &list); 2216 } 2217 } 2218 spin_unlock_bh(&idr_lock); 2219 2220 if (!list_empty(&list)) { 2221 list_for_each_entry_safe(ch, _c, &list, ch_node) { 2222 list_del(&ch->ch_node); 2223 riocm_ch_close(ch); 2224 } 2225 } 2226 2227 rio_release_inb_mbox(mport, cmbox); 2228 rio_release_outb_mbox(mport, cmbox); 2229 2230 /* Remove and free peer entries */ 2231 if (!list_empty(&cm->peers)) 2232 riocm_debug(RDEV, "ATTN: peer list not empty"); 2233 list_for_each_entry_safe(peer, temp, &cm->peers, node) { 2234 riocm_debug(RDEV, "removing peer %s", rio_name(peer->rdev)); 2235 list_del(&peer->node); 2236 kfree(peer); 2237 } 2238 2239 riocm_rx_free(cm); 2240 kfree(cm); 2241 riocm_debug(MPORT, "%s done", mport->name); 2242 } 2243 2244 static int rio_cm_shutdown(struct notifier_block *nb, unsigned long code, 2245 void *unused) 2246 { 2247 struct rio_channel *ch; 2248 unsigned int i; 2249 2250 riocm_debug(EXIT, "."); 2251 2252 spin_lock_bh(&idr_lock); 2253 idr_for_each_entry(&ch_idr, ch, i) { 2254 riocm_debug(EXIT, "close ch %d", ch->id); 2255 if (ch->state == RIO_CM_CONNECTED) 2256 riocm_send_close(ch); 2257 } 2258 spin_unlock_bh(&idr_lock); 2259 2260 return NOTIFY_DONE; 2261 } 2262 2263 /* 2264 * riocm_interface handles addition/removal of remote RapidIO devices 2265 */ 2266 static struct subsys_interface riocm_interface = { 2267 .name = "rio_cm", 2268 .subsys = &rio_bus_type, 2269 .add_dev = riocm_add_dev, 2270 .remove_dev = riocm_remove_dev, 2271 }; 2272 2273 /* 2274 * rio_mport_interface handles addition/removal local mport devices 2275 */ 2276 static struct class_interface rio_mport_interface __refdata = { 2277 .class = &rio_mport_class, 2278 .add_dev = riocm_add_mport, 2279 .remove_dev = riocm_remove_mport, 2280 }; 2281 2282 static struct notifier_block rio_cm_notifier = { 2283 .notifier_call = rio_cm_shutdown, 2284 }; 2285 2286 static int __init riocm_init(void) 2287 { 2288 int ret; 2289 2290 /* Create device class needed by udev */ 2291 dev_class = class_create(THIS_MODULE, DRV_NAME); 2292 if (IS_ERR(dev_class)) { 2293 riocm_error("Cannot create " DRV_NAME " class"); 2294 return PTR_ERR(dev_class); 2295 } 2296 2297 ret = alloc_chrdev_region(&dev_number, 0, 1, DRV_NAME); 2298 if (ret) { 2299 class_destroy(dev_class); 2300 return ret; 2301 } 2302 2303 dev_major = MAJOR(dev_number); 2304 dev_minor_base = MINOR(dev_number); 2305 riocm_debug(INIT, "Registered class with %d major", dev_major); 2306 2307 /* 2308 * Register as rapidio_port class interface to get notifications about 2309 * mport additions and removals. 2310 */ 2311 ret = class_interface_register(&rio_mport_interface); 2312 if (ret) { 2313 riocm_error("class_interface_register error: %d", ret); 2314 goto err_reg; 2315 } 2316 2317 /* 2318 * Register as RapidIO bus interface to get notifications about 2319 * addition/removal of remote RapidIO devices. 2320 */ 2321 ret = subsys_interface_register(&riocm_interface); 2322 if (ret) { 2323 riocm_error("subsys_interface_register error: %d", ret); 2324 goto err_cl; 2325 } 2326 2327 ret = register_reboot_notifier(&rio_cm_notifier); 2328 if (ret) { 2329 riocm_error("failed to register reboot notifier (err=%d)", ret); 2330 goto err_sif; 2331 } 2332 2333 ret = riocm_cdev_add(dev_number); 2334 if (ret) { 2335 unregister_reboot_notifier(&rio_cm_notifier); 2336 ret = -ENODEV; 2337 goto err_sif; 2338 } 2339 2340 return 0; 2341 err_sif: 2342 subsys_interface_unregister(&riocm_interface); 2343 err_cl: 2344 class_interface_unregister(&rio_mport_interface); 2345 err_reg: 2346 unregister_chrdev_region(dev_number, 1); 2347 class_destroy(dev_class); 2348 return ret; 2349 } 2350 2351 static void __exit riocm_exit(void) 2352 { 2353 riocm_debug(EXIT, "enter"); 2354 unregister_reboot_notifier(&rio_cm_notifier); 2355 subsys_interface_unregister(&riocm_interface); 2356 class_interface_unregister(&rio_mport_interface); 2357 idr_destroy(&ch_idr); 2358 2359 device_unregister(riocm_cdev.dev); 2360 cdev_del(&(riocm_cdev.cdev)); 2361 2362 class_destroy(dev_class); 2363 unregister_chrdev_region(dev_number, 1); 2364 } 2365 2366 late_initcall(riocm_init); 2367 module_exit(riocm_exit); 2368 2369 2370 2371 2372 2373 /* LDV_COMMENT_BEGIN_MAIN */ 2374 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful 2375 2376 /*###########################################################################*/ 2377 2378 /*############## Driver Environment Generator 0.2 output ####################*/ 2379 2380 /*###########################################################################*/ 2381 2382 2383 2384 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */ 2385 void ldv_check_final_state(void); 2386 2387 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */ 2388 void ldv_check_return_value(int res); 2389 2390 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */ 2391 void ldv_check_return_value_probe(int res); 2392 2393 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */ 2394 void ldv_initialize(void); 2395 2396 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */ 2397 void ldv_handler_precall(void); 2398 2399 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */ 2400 int nondet_int(void); 2401 2402 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */ 2403 int LDV_IN_INTERRUPT; 2404 2405 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */ 2406 void ldv_main0_sequence_infinite_withcheck_stateful(void) { 2407 2408 2409 2410 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */ 2411 /*============================= VARIABLE DECLARATION PART =============================*/ 2412 /** STRUCT: struct type: file_operations, struct name: riocm_cdev_fops **/ 2413 /* content: static int riocm_cdev_open(struct inode *inode, struct file *filp)*/ 2414 /* LDV_COMMENT_BEGIN_PREP */ 2415 #define DRV_NAME "rio_cm" 2416 #define DRV_VERSION "1.0.0" 2417 #define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>" 2418 #define DRV_DESC "RapidIO Channelized Messaging Driver" 2419 #define DEV_NAME "rio_cm" 2420 #ifdef DEBUG 2421 #define riocm_debug(level, fmt, arg...) \ 2422 do { \ 2423 if (DBG_##level & dbg_level) \ 2424 pr_debug(DRV_NAME ": %s " fmt "\n", \ 2425 __func__, ##arg); \ 2426 } while (0) 2427 #else 2428 #define riocm_debug(level, fmt, arg...) \ 2429 no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg) 2430 #endif 2431 #define riocm_warn(fmt, arg...) \ 2432 pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg) 2433 #define riocm_error(fmt, arg...) \ 2434 pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg) 2435 #ifdef DEBUG 2436 #endif 2437 #define RIOCM_TX_RING_SIZE 128 2438 #define RIOCM_RX_RING_SIZE 128 2439 #define RIOCM_CONNECT_TO 3 2440 #define RIOCM_MAX_CHNUM 0xffff 2441 #define RIOCM_CHNUM_AUTO 0 2442 #define RIOCM_MAX_EP_COUNT 0x10000 2443 #define RIO_HDR_LETTER_MASK 0xffff0000 2444 #define RIO_HDR_MBOX_MASK 0x0000ffff 2445 #define is_msg_capable(src_ops, dst_ops) \ 2446 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \ 2447 (dst_ops & RIO_DST_OPS_DATA_MSG)) 2448 #define dev_cm_capable(dev) \ 2449 is_msg_capable(dev->src_ops, dev->dst_ops) 2450 /* LDV_COMMENT_END_PREP */ 2451 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "riocm_cdev_open" */ 2452 struct inode * var_group1; 2453 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "riocm_cdev_open" */ 2454 struct file * var_group2; 2455 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "riocm_cdev_open" */ 2456 static int res_riocm_cdev_open_32; 2457 /* content: static int riocm_cdev_release(struct inode *inode, struct file *filp)*/ 2458 /* LDV_COMMENT_BEGIN_PREP */ 2459 #define DRV_NAME "rio_cm" 2460 #define DRV_VERSION "1.0.0" 2461 #define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>" 2462 #define DRV_DESC "RapidIO Channelized Messaging Driver" 2463 #define DEV_NAME "rio_cm" 2464 #ifdef DEBUG 2465 #define riocm_debug(level, fmt, arg...) \ 2466 do { \ 2467 if (DBG_##level & dbg_level) \ 2468 pr_debug(DRV_NAME ": %s " fmt "\n", \ 2469 __func__, ##arg); \ 2470 } while (0) 2471 #else 2472 #define riocm_debug(level, fmt, arg...) \ 2473 no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg) 2474 #endif 2475 #define riocm_warn(fmt, arg...) \ 2476 pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg) 2477 #define riocm_error(fmt, arg...) \ 2478 pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg) 2479 #ifdef DEBUG 2480 #endif 2481 #define RIOCM_TX_RING_SIZE 128 2482 #define RIOCM_RX_RING_SIZE 128 2483 #define RIOCM_CONNECT_TO 3 2484 #define RIOCM_MAX_CHNUM 0xffff 2485 #define RIOCM_CHNUM_AUTO 0 2486 #define RIOCM_MAX_EP_COUNT 0x10000 2487 #define RIO_HDR_LETTER_MASK 0xffff0000 2488 #define RIO_HDR_MBOX_MASK 0x0000ffff 2489 #define is_msg_capable(src_ops, dst_ops) \ 2490 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \ 2491 (dst_ops & RIO_DST_OPS_DATA_MSG)) 2492 #define dev_cm_capable(dev) \ 2493 is_msg_capable(dev->src_ops, dev->dst_ops) 2494 /* LDV_COMMENT_END_PREP */ 2495 /* content: static long riocm_cdev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)*/ 2496 /* LDV_COMMENT_BEGIN_PREP */ 2497 #define DRV_NAME "rio_cm" 2498 #define DRV_VERSION "1.0.0" 2499 #define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>" 2500 #define DRV_DESC "RapidIO Channelized Messaging Driver" 2501 #define DEV_NAME "rio_cm" 2502 #ifdef DEBUG 2503 #define riocm_debug(level, fmt, arg...) \ 2504 do { \ 2505 if (DBG_##level & dbg_level) \ 2506 pr_debug(DRV_NAME ": %s " fmt "\n", \ 2507 __func__, ##arg); \ 2508 } while (0) 2509 #else 2510 #define riocm_debug(level, fmt, arg...) \ 2511 no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg) 2512 #endif 2513 #define riocm_warn(fmt, arg...) \ 2514 pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg) 2515 #define riocm_error(fmt, arg...) \ 2516 pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg) 2517 #ifdef DEBUG 2518 #endif 2519 #define RIOCM_TX_RING_SIZE 128 2520 #define RIOCM_RX_RING_SIZE 128 2521 #define RIOCM_CONNECT_TO 3 2522 #define RIOCM_MAX_CHNUM 0xffff 2523 #define RIOCM_CHNUM_AUTO 0 2524 #define RIOCM_MAX_EP_COUNT 0x10000 2525 #define RIO_HDR_LETTER_MASK 0xffff0000 2526 #define RIO_HDR_MBOX_MASK 0x0000ffff 2527 #define is_msg_capable(src_ops, dst_ops) \ 2528 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \ 2529 (dst_ops & RIO_DST_OPS_DATA_MSG)) 2530 #define dev_cm_capable(dev) \ 2531 is_msg_capable(dev->src_ops, dev->dst_ops) 2532 /* LDV_COMMENT_END_PREP */ 2533 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "riocm_cdev_ioctl" */ 2534 unsigned int var_riocm_cdev_ioctl_45_p1; 2535 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "riocm_cdev_ioctl" */ 2536 unsigned long var_riocm_cdev_ioctl_45_p2; 2537 2538 /** STRUCT: struct type: subsys_interface, struct name: riocm_interface **/ 2539 /* content: static int riocm_add_dev(struct device *dev, struct subsys_interface *sif)*/ 2540 /* LDV_COMMENT_BEGIN_PREP */ 2541 #define DRV_NAME "rio_cm" 2542 #define DRV_VERSION "1.0.0" 2543 #define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>" 2544 #define DRV_DESC "RapidIO Channelized Messaging Driver" 2545 #define DEV_NAME "rio_cm" 2546 #ifdef DEBUG 2547 #define riocm_debug(level, fmt, arg...) \ 2548 do { \ 2549 if (DBG_##level & dbg_level) \ 2550 pr_debug(DRV_NAME ": %s " fmt "\n", \ 2551 __func__, ##arg); \ 2552 } while (0) 2553 #else 2554 #define riocm_debug(level, fmt, arg...) \ 2555 no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg) 2556 #endif 2557 #define riocm_warn(fmt, arg...) \ 2558 pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg) 2559 #define riocm_error(fmt, arg...) \ 2560 pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg) 2561 #ifdef DEBUG 2562 #endif 2563 #define RIOCM_TX_RING_SIZE 128 2564 #define RIOCM_RX_RING_SIZE 128 2565 #define RIOCM_CONNECT_TO 3 2566 #define RIOCM_MAX_CHNUM 0xffff 2567 #define RIOCM_CHNUM_AUTO 0 2568 #define RIOCM_MAX_EP_COUNT 0x10000 2569 #define RIO_HDR_LETTER_MASK 0xffff0000 2570 #define RIO_HDR_MBOX_MASK 0x0000ffff 2571 #define is_msg_capable(src_ops, dst_ops) \ 2572 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \ 2573 (dst_ops & RIO_DST_OPS_DATA_MSG)) 2574 #define dev_cm_capable(dev) \ 2575 is_msg_capable(dev->src_ops, dev->dst_ops) 2576 /* LDV_COMMENT_END_PREP */ 2577 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "riocm_add_dev" */ 2578 struct device * var_group3; 2579 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "riocm_add_dev" */ 2580 struct subsys_interface * var_group4; 2581 /* content: static void riocm_remove_dev(struct device *dev, struct subsys_interface *sif)*/ 2582 /* LDV_COMMENT_BEGIN_PREP */ 2583 #define DRV_NAME "rio_cm" 2584 #define DRV_VERSION "1.0.0" 2585 #define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>" 2586 #define DRV_DESC "RapidIO Channelized Messaging Driver" 2587 #define DEV_NAME "rio_cm" 2588 #ifdef DEBUG 2589 #define riocm_debug(level, fmt, arg...) \ 2590 do { \ 2591 if (DBG_##level & dbg_level) \ 2592 pr_debug(DRV_NAME ": %s " fmt "\n", \ 2593 __func__, ##arg); \ 2594 } while (0) 2595 #else 2596 #define riocm_debug(level, fmt, arg...) \ 2597 no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg) 2598 #endif 2599 #define riocm_warn(fmt, arg...) \ 2600 pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg) 2601 #define riocm_error(fmt, arg...) \ 2602 pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg) 2603 #ifdef DEBUG 2604 #endif 2605 #define RIOCM_TX_RING_SIZE 128 2606 #define RIOCM_RX_RING_SIZE 128 2607 #define RIOCM_CONNECT_TO 3 2608 #define RIOCM_MAX_CHNUM 0xffff 2609 #define RIOCM_CHNUM_AUTO 0 2610 #define RIOCM_MAX_EP_COUNT 0x10000 2611 #define RIO_HDR_LETTER_MASK 0xffff0000 2612 #define RIO_HDR_MBOX_MASK 0x0000ffff 2613 #define is_msg_capable(src_ops, dst_ops) \ 2614 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \ 2615 (dst_ops & RIO_DST_OPS_DATA_MSG)) 2616 #define dev_cm_capable(dev) \ 2617 is_msg_capable(dev->src_ops, dev->dst_ops) 2618 /* LDV_COMMENT_END_PREP */ 2619 2620 /** STRUCT: struct type: notifier_block, struct name: rio_cm_notifier **/ 2621 /* content: static int rio_cm_shutdown(struct notifier_block *nb, unsigned long code, void *unused)*/ 2622 /* LDV_COMMENT_BEGIN_PREP */ 2623 #define DRV_NAME "rio_cm" 2624 #define DRV_VERSION "1.0.0" 2625 #define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>" 2626 #define DRV_DESC "RapidIO Channelized Messaging Driver" 2627 #define DEV_NAME "rio_cm" 2628 #ifdef DEBUG 2629 #define riocm_debug(level, fmt, arg...) \ 2630 do { \ 2631 if (DBG_##level & dbg_level) \ 2632 pr_debug(DRV_NAME ": %s " fmt "\n", \ 2633 __func__, ##arg); \ 2634 } while (0) 2635 #else 2636 #define riocm_debug(level, fmt, arg...) \ 2637 no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg) 2638 #endif 2639 #define riocm_warn(fmt, arg...) \ 2640 pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg) 2641 #define riocm_error(fmt, arg...) \ 2642 pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg) 2643 #ifdef DEBUG 2644 #endif 2645 #define RIOCM_TX_RING_SIZE 128 2646 #define RIOCM_RX_RING_SIZE 128 2647 #define RIOCM_CONNECT_TO 3 2648 #define RIOCM_MAX_CHNUM 0xffff 2649 #define RIOCM_CHNUM_AUTO 0 2650 #define RIOCM_MAX_EP_COUNT 0x10000 2651 #define RIO_HDR_LETTER_MASK 0xffff0000 2652 #define RIO_HDR_MBOX_MASK 0x0000ffff 2653 #define is_msg_capable(src_ops, dst_ops) \ 2654 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \ 2655 (dst_ops & RIO_DST_OPS_DATA_MSG)) 2656 #define dev_cm_capable(dev) \ 2657 is_msg_capable(dev->src_ops, dev->dst_ops) 2658 /* LDV_COMMENT_END_PREP */ 2659 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "rio_cm_shutdown" */ 2660 struct notifier_block * var_group5; 2661 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "rio_cm_shutdown" */ 2662 unsigned long var_rio_cm_shutdown_51_p1; 2663 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "rio_cm_shutdown" */ 2664 void * var_rio_cm_shutdown_51_p2; 2665 2666 2667 2668 2669 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */ 2670 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */ 2671 /*============================= VARIABLE INITIALIZING PART =============================*/ 2672 LDV_IN_INTERRUPT=1; 2673 2674 2675 2676 2677 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */ 2678 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */ 2679 /*============================= FUNCTION CALL SECTION =============================*/ 2680 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */ 2681 ldv_initialize(); 2682 int ldv_s_riocm_cdev_fops_file_operations = 0; 2683 2684 2685 2686 2687 2688 2689 2690 while( nondet_int() 2691 || !(ldv_s_riocm_cdev_fops_file_operations == 0) 2692 ) { 2693 2694 switch(nondet_int()) { 2695 2696 case 0: { 2697 2698 /** STRUCT: struct type: file_operations, struct name: riocm_cdev_fops **/ 2699 if(ldv_s_riocm_cdev_fops_file_operations==0) { 2700 2701 /* content: static int riocm_cdev_open(struct inode *inode, struct file *filp)*/ 2702 /* LDV_COMMENT_BEGIN_PREP */ 2703 #define DRV_NAME "rio_cm" 2704 #define DRV_VERSION "1.0.0" 2705 #define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>" 2706 #define DRV_DESC "RapidIO Channelized Messaging Driver" 2707 #define DEV_NAME "rio_cm" 2708 #ifdef DEBUG 2709 #define riocm_debug(level, fmt, arg...) \ 2710 do { \ 2711 if (DBG_##level & dbg_level) \ 2712 pr_debug(DRV_NAME ": %s " fmt "\n", \ 2713 __func__, ##arg); \ 2714 } while (0) 2715 #else 2716 #define riocm_debug(level, fmt, arg...) \ 2717 no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg) 2718 #endif 2719 #define riocm_warn(fmt, arg...) \ 2720 pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg) 2721 #define riocm_error(fmt, arg...) \ 2722 pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg) 2723 #ifdef DEBUG 2724 #endif 2725 #define RIOCM_TX_RING_SIZE 128 2726 #define RIOCM_RX_RING_SIZE 128 2727 #define RIOCM_CONNECT_TO 3 2728 #define RIOCM_MAX_CHNUM 0xffff 2729 #define RIOCM_CHNUM_AUTO 0 2730 #define RIOCM_MAX_EP_COUNT 0x10000 2731 #define RIO_HDR_LETTER_MASK 0xffff0000 2732 #define RIO_HDR_MBOX_MASK 0x0000ffff 2733 #define is_msg_capable(src_ops, dst_ops) \ 2734 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \ 2735 (dst_ops & RIO_DST_OPS_DATA_MSG)) 2736 #define dev_cm_capable(dev) \ 2737 is_msg_capable(dev->src_ops, dev->dst_ops) 2738 /* LDV_COMMENT_END_PREP */ 2739 /* LDV_COMMENT_FUNCTION_CALL Function from field "open" from driver structure with callbacks "riocm_cdev_fops". Standart function test for correct return result. */ 2740 ldv_handler_precall(); 2741 res_riocm_cdev_open_32 = riocm_cdev_open( var_group1, var_group2); 2742 ldv_check_return_value(res_riocm_cdev_open_32); 2743 if(res_riocm_cdev_open_32) 2744 goto ldv_module_exit; 2745 ldv_s_riocm_cdev_fops_file_operations++; 2746 2747 } 2748 2749 } 2750 2751 break; 2752 case 1: { 2753 2754 /** STRUCT: struct type: file_operations, struct name: riocm_cdev_fops **/ 2755 if(ldv_s_riocm_cdev_fops_file_operations==1) { 2756 2757 /* content: static int riocm_cdev_release(struct inode *inode, struct file *filp)*/ 2758 /* LDV_COMMENT_BEGIN_PREP */ 2759 #define DRV_NAME "rio_cm" 2760 #define DRV_VERSION "1.0.0" 2761 #define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>" 2762 #define DRV_DESC "RapidIO Channelized Messaging Driver" 2763 #define DEV_NAME "rio_cm" 2764 #ifdef DEBUG 2765 #define riocm_debug(level, fmt, arg...) \ 2766 do { \ 2767 if (DBG_##level & dbg_level) \ 2768 pr_debug(DRV_NAME ": %s " fmt "\n", \ 2769 __func__, ##arg); \ 2770 } while (0) 2771 #else 2772 #define riocm_debug(level, fmt, arg...) \ 2773 no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg) 2774 #endif 2775 #define riocm_warn(fmt, arg...) \ 2776 pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg) 2777 #define riocm_error(fmt, arg...) \ 2778 pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg) 2779 #ifdef DEBUG 2780 #endif 2781 #define RIOCM_TX_RING_SIZE 128 2782 #define RIOCM_RX_RING_SIZE 128 2783 #define RIOCM_CONNECT_TO 3 2784 #define RIOCM_MAX_CHNUM 0xffff 2785 #define RIOCM_CHNUM_AUTO 0 2786 #define RIOCM_MAX_EP_COUNT 0x10000 2787 #define RIO_HDR_LETTER_MASK 0xffff0000 2788 #define RIO_HDR_MBOX_MASK 0x0000ffff 2789 #define is_msg_capable(src_ops, dst_ops) \ 2790 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \ 2791 (dst_ops & RIO_DST_OPS_DATA_MSG)) 2792 #define dev_cm_capable(dev) \ 2793 is_msg_capable(dev->src_ops, dev->dst_ops) 2794 /* LDV_COMMENT_END_PREP */ 2795 /* LDV_COMMENT_FUNCTION_CALL Function from field "release" from driver structure with callbacks "riocm_cdev_fops" */ 2796 ldv_handler_precall(); 2797 riocm_cdev_release( var_group1, var_group2); 2798 ldv_s_riocm_cdev_fops_file_operations=0; 2799 2800 } 2801 2802 } 2803 2804 break; 2805 case 2: { 2806 2807 /** STRUCT: struct type: file_operations, struct name: riocm_cdev_fops **/ 2808 2809 2810 /* content: static long riocm_cdev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)*/ 2811 /* LDV_COMMENT_BEGIN_PREP */ 2812 #define DRV_NAME "rio_cm" 2813 #define DRV_VERSION "1.0.0" 2814 #define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>" 2815 #define DRV_DESC "RapidIO Channelized Messaging Driver" 2816 #define DEV_NAME "rio_cm" 2817 #ifdef DEBUG 2818 #define riocm_debug(level, fmt, arg...) \ 2819 do { \ 2820 if (DBG_##level & dbg_level) \ 2821 pr_debug(DRV_NAME ": %s " fmt "\n", \ 2822 __func__, ##arg); \ 2823 } while (0) 2824 #else 2825 #define riocm_debug(level, fmt, arg...) \ 2826 no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg) 2827 #endif 2828 #define riocm_warn(fmt, arg...) \ 2829 pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg) 2830 #define riocm_error(fmt, arg...) \ 2831 pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg) 2832 #ifdef DEBUG 2833 #endif 2834 #define RIOCM_TX_RING_SIZE 128 2835 #define RIOCM_RX_RING_SIZE 128 2836 #define RIOCM_CONNECT_TO 3 2837 #define RIOCM_MAX_CHNUM 0xffff 2838 #define RIOCM_CHNUM_AUTO 0 2839 #define RIOCM_MAX_EP_COUNT 0x10000 2840 #define RIO_HDR_LETTER_MASK 0xffff0000 2841 #define RIO_HDR_MBOX_MASK 0x0000ffff 2842 #define is_msg_capable(src_ops, dst_ops) \ 2843 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \ 2844 (dst_ops & RIO_DST_OPS_DATA_MSG)) 2845 #define dev_cm_capable(dev) \ 2846 is_msg_capable(dev->src_ops, dev->dst_ops) 2847 /* LDV_COMMENT_END_PREP */ 2848 /* LDV_COMMENT_FUNCTION_CALL Function from field "unlocked_ioctl" from driver structure with callbacks "riocm_cdev_fops" */ 2849 ldv_handler_precall(); 2850 riocm_cdev_ioctl( var_group2, var_riocm_cdev_ioctl_45_p1, var_riocm_cdev_ioctl_45_p2); 2851 2852 2853 2854 2855 } 2856 2857 break; 2858 case 3: { 2859 2860 /** STRUCT: struct type: subsys_interface, struct name: riocm_interface **/ 2861 2862 2863 /* content: static int riocm_add_dev(struct device *dev, struct subsys_interface *sif)*/ 2864 /* LDV_COMMENT_BEGIN_PREP */ 2865 #define DRV_NAME "rio_cm" 2866 #define DRV_VERSION "1.0.0" 2867 #define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>" 2868 #define DRV_DESC "RapidIO Channelized Messaging Driver" 2869 #define DEV_NAME "rio_cm" 2870 #ifdef DEBUG 2871 #define riocm_debug(level, fmt, arg...) \ 2872 do { \ 2873 if (DBG_##level & dbg_level) \ 2874 pr_debug(DRV_NAME ": %s " fmt "\n", \ 2875 __func__, ##arg); \ 2876 } while (0) 2877 #else 2878 #define riocm_debug(level, fmt, arg...) \ 2879 no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg) 2880 #endif 2881 #define riocm_warn(fmt, arg...) \ 2882 pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg) 2883 #define riocm_error(fmt, arg...) \ 2884 pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg) 2885 #ifdef DEBUG 2886 #endif 2887 #define RIOCM_TX_RING_SIZE 128 2888 #define RIOCM_RX_RING_SIZE 128 2889 #define RIOCM_CONNECT_TO 3 2890 #define RIOCM_MAX_CHNUM 0xffff 2891 #define RIOCM_CHNUM_AUTO 0 2892 #define RIOCM_MAX_EP_COUNT 0x10000 2893 #define RIO_HDR_LETTER_MASK 0xffff0000 2894 #define RIO_HDR_MBOX_MASK 0x0000ffff 2895 #define is_msg_capable(src_ops, dst_ops) \ 2896 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \ 2897 (dst_ops & RIO_DST_OPS_DATA_MSG)) 2898 #define dev_cm_capable(dev) \ 2899 is_msg_capable(dev->src_ops, dev->dst_ops) 2900 /* LDV_COMMENT_END_PREP */ 2901 /* LDV_COMMENT_FUNCTION_CALL Function from field "add_dev" from driver structure with callbacks "riocm_interface" */ 2902 ldv_handler_precall(); 2903 riocm_add_dev( var_group3, var_group4); 2904 2905 2906 2907 2908 } 2909 2910 break; 2911 case 4: { 2912 2913 /** STRUCT: struct type: subsys_interface, struct name: riocm_interface **/ 2914 2915 2916 /* content: static void riocm_remove_dev(struct device *dev, struct subsys_interface *sif)*/ 2917 /* LDV_COMMENT_BEGIN_PREP */ 2918 #define DRV_NAME "rio_cm" 2919 #define DRV_VERSION "1.0.0" 2920 #define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>" 2921 #define DRV_DESC "RapidIO Channelized Messaging Driver" 2922 #define DEV_NAME "rio_cm" 2923 #ifdef DEBUG 2924 #define riocm_debug(level, fmt, arg...) \ 2925 do { \ 2926 if (DBG_##level & dbg_level) \ 2927 pr_debug(DRV_NAME ": %s " fmt "\n", \ 2928 __func__, ##arg); \ 2929 } while (0) 2930 #else 2931 #define riocm_debug(level, fmt, arg...) \ 2932 no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg) 2933 #endif 2934 #define riocm_warn(fmt, arg...) \ 2935 pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg) 2936 #define riocm_error(fmt, arg...) \ 2937 pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg) 2938 #ifdef DEBUG 2939 #endif 2940 #define RIOCM_TX_RING_SIZE 128 2941 #define RIOCM_RX_RING_SIZE 128 2942 #define RIOCM_CONNECT_TO 3 2943 #define RIOCM_MAX_CHNUM 0xffff 2944 #define RIOCM_CHNUM_AUTO 0 2945 #define RIOCM_MAX_EP_COUNT 0x10000 2946 #define RIO_HDR_LETTER_MASK 0xffff0000 2947 #define RIO_HDR_MBOX_MASK 0x0000ffff 2948 #define is_msg_capable(src_ops, dst_ops) \ 2949 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \ 2950 (dst_ops & RIO_DST_OPS_DATA_MSG)) 2951 #define dev_cm_capable(dev) \ 2952 is_msg_capable(dev->src_ops, dev->dst_ops) 2953 /* LDV_COMMENT_END_PREP */ 2954 /* LDV_COMMENT_FUNCTION_CALL Function from field "remove_dev" from driver structure with callbacks "riocm_interface" */ 2955 ldv_handler_precall(); 2956 riocm_remove_dev( var_group3, var_group4); 2957 2958 2959 2960 2961 } 2962 2963 break; 2964 case 5: { 2965 2966 /** STRUCT: struct type: notifier_block, struct name: rio_cm_notifier **/ 2967 2968 2969 /* content: static int rio_cm_shutdown(struct notifier_block *nb, unsigned long code, void *unused)*/ 2970 /* LDV_COMMENT_BEGIN_PREP */ 2971 #define DRV_NAME "rio_cm" 2972 #define DRV_VERSION "1.0.0" 2973 #define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>" 2974 #define DRV_DESC "RapidIO Channelized Messaging Driver" 2975 #define DEV_NAME "rio_cm" 2976 #ifdef DEBUG 2977 #define riocm_debug(level, fmt, arg...) \ 2978 do { \ 2979 if (DBG_##level & dbg_level) \ 2980 pr_debug(DRV_NAME ": %s " fmt "\n", \ 2981 __func__, ##arg); \ 2982 } while (0) 2983 #else 2984 #define riocm_debug(level, fmt, arg...) \ 2985 no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg) 2986 #endif 2987 #define riocm_warn(fmt, arg...) \ 2988 pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg) 2989 #define riocm_error(fmt, arg...) \ 2990 pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg) 2991 #ifdef DEBUG 2992 #endif 2993 #define RIOCM_TX_RING_SIZE 128 2994 #define RIOCM_RX_RING_SIZE 128 2995 #define RIOCM_CONNECT_TO 3 2996 #define RIOCM_MAX_CHNUM 0xffff 2997 #define RIOCM_CHNUM_AUTO 0 2998 #define RIOCM_MAX_EP_COUNT 0x10000 2999 #define RIO_HDR_LETTER_MASK 0xffff0000 3000 #define RIO_HDR_MBOX_MASK 0x0000ffff 3001 #define is_msg_capable(src_ops, dst_ops) \ 3002 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \ 3003 (dst_ops & RIO_DST_OPS_DATA_MSG)) 3004 #define dev_cm_capable(dev) \ 3005 is_msg_capable(dev->src_ops, dev->dst_ops) 3006 /* LDV_COMMENT_END_PREP */ 3007 /* LDV_COMMENT_FUNCTION_CALL Function from field "notifier_call" from driver structure with callbacks "rio_cm_notifier" */ 3008 ldv_handler_precall(); 3009 rio_cm_shutdown( var_group5, var_rio_cm_shutdown_51_p1, var_rio_cm_shutdown_51_p2); 3010 3011 3012 3013 3014 } 3015 3016 break; 3017 default: break; 3018 3019 } 3020 3021 } 3022 3023 ldv_module_exit: 3024 3025 /** INIT: init_type: ST_MODULE_EXIT **/ 3026 /* content: static void __exit riocm_exit(void)*/ 3027 /* LDV_COMMENT_BEGIN_PREP */ 3028 #define DRV_NAME "rio_cm" 3029 #define DRV_VERSION "1.0.0" 3030 #define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>" 3031 #define DRV_DESC "RapidIO Channelized Messaging Driver" 3032 #define DEV_NAME "rio_cm" 3033 #ifdef DEBUG 3034 #define riocm_debug(level, fmt, arg...) \ 3035 do { \ 3036 if (DBG_##level & dbg_level) \ 3037 pr_debug(DRV_NAME ": %s " fmt "\n", \ 3038 __func__, ##arg); \ 3039 } while (0) 3040 #else 3041 #define riocm_debug(level, fmt, arg...) \ 3042 no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg) 3043 #endif 3044 #define riocm_warn(fmt, arg...) \ 3045 pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg) 3046 #define riocm_error(fmt, arg...) \ 3047 pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg) 3048 #ifdef DEBUG 3049 #endif 3050 #define RIOCM_TX_RING_SIZE 128 3051 #define RIOCM_RX_RING_SIZE 128 3052 #define RIOCM_CONNECT_TO 3 3053 #define RIOCM_MAX_CHNUM 0xffff 3054 #define RIOCM_CHNUM_AUTO 0 3055 #define RIOCM_MAX_EP_COUNT 0x10000 3056 #define RIO_HDR_LETTER_MASK 0xffff0000 3057 #define RIO_HDR_MBOX_MASK 0x0000ffff 3058 #define is_msg_capable(src_ops, dst_ops) \ 3059 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \ 3060 (dst_ops & RIO_DST_OPS_DATA_MSG)) 3061 #define dev_cm_capable(dev) \ 3062 is_msg_capable(dev->src_ops, dev->dst_ops) 3063 /* LDV_COMMENT_END_PREP */ 3064 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver release function before driver will be uploaded from kernel. This function declared as "MODULE_EXIT(function name)". */ 3065 ldv_handler_precall(); 3066 riocm_exit(); 3067 3068 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */ 3069 ldv_final: ldv_check_final_state(); 3070 3071 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */ 3072 return; 3073 3074 } 3075 #endif 3076 3077 /* LDV_COMMENT_END_MAIN */
1 2 #include <linux/kernel.h> 3 bool ldv_is_err(const void *ptr); 4 bool ldv_is_err_or_null(const void *ptr); 5 void* ldv_err_ptr(long error); 6 long ldv_ptr_err(const void *ptr); 7 8 extern void ldv_spin_lock(void); 9 extern void ldv_spin_unlock(void); 10 extern int ldv_spin_trylock(void); 11 12 #include <linux/kernel.h> 13 #include <verifier/rcv.h> 14 #include <linux/module.h> 15 #include <linux/slab.h> 16 17 extern void *ldv_undefined_pointer(void); 18 extern void ldv_check_alloc_flags(gfp_t flags); 19 extern void ldv_check_alloc_nonatomic(void); 20 /* Returns an arbitrary page in addition to checking flags */ 21 extern struct page *ldv_check_alloc_flags_and_return_some_page(gfp_t flags); 22 #line 1 "/home/ubuntu/launches/work/current--X--drivers--X--defaultlinux-4.8-rc1.tar.xz--X--43_1a--X--cpachecker/linux-4.8-rc1.tar.xz/csd_deg_dscv/7763/dscv_tempdir/dscv/ri/43_1a/drivers/rapidio/rio_cm.c" 23 24 /* 25 * rio_cm - RapidIO Channelized Messaging Driver 26 * 27 * Copyright 2013-2016 Integrated Device Technology, Inc. 28 * Copyright (c) 2015, Prodrive Technologies 29 * Copyright (c) 2015, RapidIO Trade Association 30 * 31 * This program is free software; you can redistribute it and/or modify it 32 * under the terms of the GNU General Public License as published by the 33 * Free Software Foundation; either version 2 of the License, or (at your 34 * option) any later version. 35 * 36 * THIS PROGRAM IS DISTRIBUTED IN THE HOPE THAT IT WILL BE USEFUL, 37 * BUT WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED WARRANTY OF 38 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. SEE THE 39 * GNU GENERAL PUBLIC LICENSE FOR MORE DETAILS. 40 */ 41 42 #include <linux/module.h> 43 #include <linux/kernel.h> 44 #include <linux/dma-mapping.h> 45 #include <linux/delay.h> 46 #include <linux/sched.h> 47 #include <linux/rio.h> 48 #include <linux/rio_drv.h> 49 #include <linux/slab.h> 50 #include <linux/idr.h> 51 #include <linux/interrupt.h> 52 #include <linux/cdev.h> 53 #include <linux/fs.h> 54 #include <linux/poll.h> 55 #include <linux/reboot.h> 56 #include <linux/bitops.h> 57 #include <linux/printk.h> 58 #include <linux/rio_cm_cdev.h> 59 60 #define DRV_NAME "rio_cm" 61 #define DRV_VERSION "1.0.0" 62 #define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>" 63 #define DRV_DESC "RapidIO Channelized Messaging Driver" 64 #define DEV_NAME "rio_cm" 65 66 /* Debug output filtering masks */ 67 enum { 68 DBG_NONE = 0, 69 DBG_INIT = BIT(0), /* driver init */ 70 DBG_EXIT = BIT(1), /* driver exit */ 71 DBG_MPORT = BIT(2), /* mport add/remove */ 72 DBG_RDEV = BIT(3), /* RapidIO device add/remove */ 73 DBG_CHOP = BIT(4), /* channel operations */ 74 DBG_WAIT = BIT(5), /* waiting for events */ 75 DBG_TX = BIT(6), /* message TX */ 76 DBG_TX_EVENT = BIT(7), /* message TX event */ 77 DBG_RX_DATA = BIT(8), /* inbound data messages */ 78 DBG_RX_CMD = BIT(9), /* inbound REQ/ACK/NACK messages */ 79 DBG_ALL = ~0, 80 }; 81 82 #ifdef DEBUG 83 #define riocm_debug(level, fmt, arg...) \ 84 do { \ 85 if (DBG_##level & dbg_level) \ 86 pr_debug(DRV_NAME ": %s " fmt "\n", \ 87 __func__, ##arg); \ 88 } while (0) 89 #else 90 #define riocm_debug(level, fmt, arg...) \ 91 no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg) 92 #endif 93 94 #define riocm_warn(fmt, arg...) \ 95 pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg) 96 97 #define riocm_error(fmt, arg...) \ 98 pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg) 99 100 101 static int cmbox = 1; 102 module_param(cmbox, int, S_IRUGO); 103 MODULE_PARM_DESC(cmbox, "RapidIO Mailbox number (default 1)"); 104 105 static int chstart = 256; 106 module_param(chstart, int, S_IRUGO); 107 MODULE_PARM_DESC(chstart, 108 "Start channel number for dynamic allocation (default 256)"); 109 110 #ifdef DEBUG 111 static u32 dbg_level = DBG_NONE; 112 module_param(dbg_level, uint, S_IWUSR | S_IRUGO); 113 MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)"); 114 #endif 115 116 MODULE_AUTHOR(DRV_AUTHOR); 117 MODULE_DESCRIPTION(DRV_DESC); 118 MODULE_LICENSE("GPL"); 119 MODULE_VERSION(DRV_VERSION); 120 121 #define RIOCM_TX_RING_SIZE 128 122 #define RIOCM_RX_RING_SIZE 128 123 #define RIOCM_CONNECT_TO 3 /* connect response TO (in sec) */ 124 125 #define RIOCM_MAX_CHNUM 0xffff /* Use full range of u16 field */ 126 #define RIOCM_CHNUM_AUTO 0 127 #define RIOCM_MAX_EP_COUNT 0x10000 /* Max number of endpoints */ 128 129 enum rio_cm_state { 130 RIO_CM_IDLE, 131 RIO_CM_CONNECT, 132 RIO_CM_CONNECTED, 133 RIO_CM_DISCONNECT, 134 RIO_CM_CHAN_BOUND, 135 RIO_CM_LISTEN, 136 RIO_CM_DESTROYING, 137 }; 138 139 enum rio_cm_pkt_type { 140 RIO_CM_SYS = 0xaa, 141 RIO_CM_CHAN = 0x55, 142 }; 143 144 enum rio_cm_chop { 145 CM_CONN_REQ, 146 CM_CONN_ACK, 147 CM_CONN_CLOSE, 148 CM_DATA_MSG, 149 }; 150 151 struct rio_ch_base_bhdr { 152 u32 src_id; 153 u32 dst_id; 154 #define RIO_HDR_LETTER_MASK 0xffff0000 155 #define RIO_HDR_MBOX_MASK 0x0000ffff 156 u8 src_mbox; 157 u8 dst_mbox; 158 u8 type; 159 } __attribute__((__packed__)); 160 161 struct rio_ch_chan_hdr { 162 struct rio_ch_base_bhdr bhdr; 163 u8 ch_op; 164 u16 dst_ch; 165 u16 src_ch; 166 u16 msg_len; 167 u16 rsrvd; 168 } __attribute__((__packed__)); 169 170 struct tx_req { 171 struct list_head node; 172 struct rio_dev *rdev; 173 void *buffer; 174 size_t len; 175 }; 176 177 struct cm_dev { 178 struct list_head list; 179 struct rio_mport *mport; 180 void *rx_buf[RIOCM_RX_RING_SIZE]; 181 int rx_slots; 182 struct mutex rx_lock; 183 184 void *tx_buf[RIOCM_TX_RING_SIZE]; 185 int tx_slot; 186 int tx_cnt; 187 int tx_ack_slot; 188 struct list_head tx_reqs; 189 spinlock_t tx_lock; 190 191 struct list_head peers; 192 u32 npeers; 193 struct workqueue_struct *rx_wq; 194 struct work_struct rx_work; 195 }; 196 197 struct chan_rx_ring { 198 void *buf[RIOCM_RX_RING_SIZE]; 199 int head; 200 int tail; 201 int count; 202 203 /* Tracking RX buffers reported to upper level */ 204 void *inuse[RIOCM_RX_RING_SIZE]; 205 int inuse_cnt; 206 }; 207 208 struct rio_channel { 209 u16 id; /* local channel ID */ 210 struct kref ref; /* channel refcount */ 211 struct file *filp; 212 struct cm_dev *cmdev; /* associated CM device object */ 213 struct rio_dev *rdev; /* remote RapidIO device */ 214 enum rio_cm_state state; 215 int error; 216 spinlock_t lock; 217 void *context; 218 u32 loc_destid; /* local destID */ 219 u32 rem_destid; /* remote destID */ 220 u16 rem_channel; /* remote channel ID */ 221 struct list_head accept_queue; 222 struct list_head ch_node; 223 struct completion comp; 224 struct completion comp_close; 225 struct chan_rx_ring rx_ring; 226 }; 227 228 struct cm_peer { 229 struct list_head node; 230 struct rio_dev *rdev; 231 }; 232 233 struct rio_cm_work { 234 struct work_struct work; 235 struct cm_dev *cm; 236 void *data; 237 }; 238 239 struct conn_req { 240 struct list_head node; 241 u32 destid; /* requester destID */ 242 u16 chan; /* requester channel ID */ 243 struct cm_dev *cmdev; 244 }; 245 246 /* 247 * A channel_dev structure represents a CM_CDEV 248 * @cdev Character device 249 * @dev Associated device object 250 */ 251 struct channel_dev { 252 struct cdev cdev; 253 struct device *dev; 254 }; 255 256 static struct rio_channel *riocm_ch_alloc(u16 ch_num); 257 static void riocm_ch_free(struct kref *ref); 258 static int riocm_post_send(struct cm_dev *cm, struct rio_dev *rdev, 259 void *buffer, size_t len); 260 static int riocm_ch_close(struct rio_channel *ch); 261 262 static DEFINE_SPINLOCK(idr_lock); 263 static DEFINE_IDR(ch_idr); 264 265 static LIST_HEAD(cm_dev_list); 266 static DECLARE_RWSEM(rdev_sem); 267 268 static struct class *dev_class; 269 static unsigned int dev_major; 270 static unsigned int dev_minor_base; 271 static dev_t dev_number; 272 static struct channel_dev riocm_cdev; 273 274 #define is_msg_capable(src_ops, dst_ops) \ 275 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \ 276 (dst_ops & RIO_DST_OPS_DATA_MSG)) 277 #define dev_cm_capable(dev) \ 278 is_msg_capable(dev->src_ops, dev->dst_ops) 279 280 static int riocm_cmp(struct rio_channel *ch, enum rio_cm_state cmp) 281 { 282 int ret; 283 284 spin_lock_bh(&ch->lock); 285 ret = (ch->state == cmp); 286 spin_unlock_bh(&ch->lock); 287 return ret; 288 } 289 290 static int riocm_cmp_exch(struct rio_channel *ch, 291 enum rio_cm_state cmp, enum rio_cm_state exch) 292 { 293 int ret; 294 295 spin_lock_bh(&ch->lock); 296 ret = (ch->state == cmp); 297 if (ret) 298 ch->state = exch; 299 spin_unlock_bh(&ch->lock); 300 return ret; 301 } 302 303 static enum rio_cm_state riocm_exch(struct rio_channel *ch, 304 enum rio_cm_state exch) 305 { 306 enum rio_cm_state old; 307 308 spin_lock_bh(&ch->lock); 309 old = ch->state; 310 ch->state = exch; 311 spin_unlock_bh(&ch->lock); 312 return old; 313 } 314 315 static struct rio_channel *riocm_get_channel(u16 nr) 316 { 317 struct rio_channel *ch; 318 319 spin_lock_bh(&idr_lock); 320 ch = idr_find(&ch_idr, nr); 321 if (ch) 322 kref_get(&ch->ref); 323 spin_unlock_bh(&idr_lock); 324 return ch; 325 } 326 327 static void riocm_put_channel(struct rio_channel *ch) 328 { 329 kref_put(&ch->ref, riocm_ch_free); 330 } 331 332 static void *riocm_rx_get_msg(struct cm_dev *cm) 333 { 334 void *msg; 335 int i; 336 337 msg = rio_get_inb_message(cm->mport, cmbox); 338 if (msg) { 339 for (i = 0; i < RIOCM_RX_RING_SIZE; i++) { 340 if (cm->rx_buf[i] == msg) { 341 cm->rx_buf[i] = NULL; 342 cm->rx_slots++; 343 break; 344 } 345 } 346 347 if (i == RIOCM_RX_RING_SIZE) 348 riocm_warn("no record for buffer 0x%p", msg); 349 } 350 351 return msg; 352 } 353 354 /* 355 * riocm_rx_fill - fills a ring of receive buffers for given cm device 356 * @cm: cm_dev object 357 * @nent: max number of entries to fill 358 * 359 * Returns: none 360 */ 361 static void riocm_rx_fill(struct cm_dev *cm, int nent) 362 { 363 int i; 364 365 if (cm->rx_slots == 0) 366 return; 367 368 for (i = 0; i < RIOCM_RX_RING_SIZE && cm->rx_slots && nent; i++) { 369 if (cm->rx_buf[i] == NULL) { 370 cm->rx_buf[i] = kmalloc(RIO_MAX_MSG_SIZE, GFP_KERNEL); 371 if (cm->rx_buf[i] == NULL) 372 break; 373 rio_add_inb_buffer(cm->mport, cmbox, cm->rx_buf[i]); 374 cm->rx_slots--; 375 nent--; 376 } 377 } 378 } 379 380 /* 381 * riocm_rx_free - frees all receive buffers associated with given cm device 382 * @cm: cm_dev object 383 * 384 * Returns: none 385 */ 386 static void riocm_rx_free(struct cm_dev *cm) 387 { 388 int i; 389 390 for (i = 0; i < RIOCM_RX_RING_SIZE; i++) { 391 if (cm->rx_buf[i] != NULL) { 392 kfree(cm->rx_buf[i]); 393 cm->rx_buf[i] = NULL; 394 } 395 } 396 } 397 398 /* 399 * riocm_req_handler - connection request handler 400 * @cm: cm_dev object 401 * @req_data: pointer to the request packet 402 * 403 * Returns: 0 if success, or 404 * -EINVAL if channel is not in correct state, 405 * -ENODEV if cannot find a channel with specified ID, 406 * -ENOMEM if unable to allocate memory to store the request 407 */ 408 static int riocm_req_handler(struct cm_dev *cm, void *req_data) 409 { 410 struct rio_channel *ch; 411 struct conn_req *req; 412 struct rio_ch_chan_hdr *hh = req_data; 413 u16 chnum; 414 415 chnum = ntohs(hh->dst_ch); 416 417 ch = riocm_get_channel(chnum); 418 419 if (!ch) 420 return -ENODEV; 421 422 if (ch->state != RIO_CM_LISTEN) { 423 riocm_debug(RX_CMD, "channel %d is not in listen state", chnum); 424 riocm_put_channel(ch); 425 return -EINVAL; 426 } 427 428 req = kzalloc(sizeof(*req), GFP_KERNEL); 429 if (!req) { 430 riocm_put_channel(ch); 431 return -ENOMEM; 432 } 433 434 req->destid = ntohl(hh->bhdr.src_id); 435 req->chan = ntohs(hh->src_ch); 436 req->cmdev = cm; 437 438 spin_lock_bh(&ch->lock); 439 list_add_tail(&req->node, &ch->accept_queue); 440 spin_unlock_bh(&ch->lock); 441 complete(&ch->comp); 442 riocm_put_channel(ch); 443 444 return 0; 445 } 446 447 /* 448 * riocm_resp_handler - response to connection request handler 449 * @resp_data: pointer to the response packet 450 * 451 * Returns: 0 if success, or 452 * -EINVAL if channel is not in correct state, 453 * -ENODEV if cannot find a channel with specified ID, 454 */ 455 static int riocm_resp_handler(void *resp_data) 456 { 457 struct rio_channel *ch; 458 struct rio_ch_chan_hdr *hh = resp_data; 459 u16 chnum; 460 461 chnum = ntohs(hh->dst_ch); 462 ch = riocm_get_channel(chnum); 463 if (!ch) 464 return -ENODEV; 465 466 if (ch->state != RIO_CM_CONNECT) { 467 riocm_put_channel(ch); 468 return -EINVAL; 469 } 470 471 riocm_exch(ch, RIO_CM_CONNECTED); 472 ch->rem_channel = ntohs(hh->src_ch); 473 complete(&ch->comp); 474 riocm_put_channel(ch); 475 476 return 0; 477 } 478 479 /* 480 * riocm_close_handler - channel close request handler 481 * @req_data: pointer to the request packet 482 * 483 * Returns: 0 if success, or 484 * -ENODEV if cannot find a channel with specified ID, 485 * + error codes returned by riocm_ch_close. 486 */ 487 static int riocm_close_handler(void *data) 488 { 489 struct rio_channel *ch; 490 struct rio_ch_chan_hdr *hh = data; 491 int ret; 492 493 riocm_debug(RX_CMD, "for ch=%d", ntohs(hh->dst_ch)); 494 495 spin_lock_bh(&idr_lock); 496 ch = idr_find(&ch_idr, ntohs(hh->dst_ch)); 497 if (!ch) { 498 spin_unlock_bh(&idr_lock); 499 return -ENODEV; 500 } 501 idr_remove(&ch_idr, ch->id); 502 spin_unlock_bh(&idr_lock); 503 504 riocm_exch(ch, RIO_CM_DISCONNECT); 505 506 ret = riocm_ch_close(ch); 507 if (ret) 508 riocm_debug(RX_CMD, "riocm_ch_close() returned %d", ret); 509 510 return 0; 511 } 512 513 /* 514 * rio_cm_handler - function that services request (non-data) packets 515 * @cm: cm_dev object 516 * @data: pointer to the packet 517 */ 518 static void rio_cm_handler(struct cm_dev *cm, void *data) 519 { 520 struct rio_ch_chan_hdr *hdr; 521 522 if (!rio_mport_is_running(cm->mport)) 523 goto out; 524 525 hdr = data; 526 527 riocm_debug(RX_CMD, "OP=%x for ch=%d from %d", 528 hdr->ch_op, ntohs(hdr->dst_ch), ntohs(hdr->src_ch)); 529 530 switch (hdr->ch_op) { 531 case CM_CONN_REQ: 532 riocm_req_handler(cm, data); 533 break; 534 case CM_CONN_ACK: 535 riocm_resp_handler(data); 536 break; 537 case CM_CONN_CLOSE: 538 riocm_close_handler(data); 539 break; 540 default: 541 riocm_error("Invalid packet header"); 542 break; 543 } 544 out: 545 kfree(data); 546 } 547 548 /* 549 * rio_rx_data_handler - received data packet handler 550 * @cm: cm_dev object 551 * @buf: data packet 552 * 553 * Returns: 0 if success, or 554 * -ENODEV if cannot find a channel with specified ID, 555 * -EIO if channel is not in CONNECTED state, 556 * -ENOMEM if channel RX queue is full (packet discarded) 557 */ 558 static int rio_rx_data_handler(struct cm_dev *cm, void *buf) 559 { 560 struct rio_ch_chan_hdr *hdr; 561 struct rio_channel *ch; 562 563 hdr = buf; 564 565 riocm_debug(RX_DATA, "for ch=%d", ntohs(hdr->dst_ch)); 566 567 ch = riocm_get_channel(ntohs(hdr->dst_ch)); 568 if (!ch) { 569 /* Discard data message for non-existing channel */ 570 kfree(buf); 571 return -ENODEV; 572 } 573 574 /* Place pointer to the buffer into channel's RX queue */ 575 spin_lock(&ch->lock); 576 577 if (ch->state != RIO_CM_CONNECTED) { 578 /* Channel is not ready to receive data, discard a packet */ 579 riocm_debug(RX_DATA, "ch=%d is in wrong state=%d", 580 ch->id, ch->state); 581 spin_unlock(&ch->lock); 582 kfree(buf); 583 riocm_put_channel(ch); 584 return -EIO; 585 } 586 587 if (ch->rx_ring.count == RIOCM_RX_RING_SIZE) { 588 /* If RX ring is full, discard a packet */ 589 riocm_debug(RX_DATA, "ch=%d is full", ch->id); 590 spin_unlock(&ch->lock); 591 kfree(buf); 592 riocm_put_channel(ch); 593 return -ENOMEM; 594 } 595 596 ch->rx_ring.buf[ch->rx_ring.head] = buf; 597 ch->rx_ring.head++; 598 ch->rx_ring.count++; 599 ch->rx_ring.head %= RIOCM_RX_RING_SIZE; 600 601 complete(&ch->comp); 602 603 spin_unlock(&ch->lock); 604 riocm_put_channel(ch); 605 606 return 0; 607 } 608 609 /* 610 * rio_ibmsg_handler - inbound message packet handler 611 */ 612 static void rio_ibmsg_handler(struct work_struct *work) 613 { 614 struct cm_dev *cm = container_of(work, struct cm_dev, rx_work); 615 void *data; 616 struct rio_ch_chan_hdr *hdr; 617 618 if (!rio_mport_is_running(cm->mport)) 619 return; 620 621 while (1) { 622 mutex_lock(&cm->rx_lock); 623 data = riocm_rx_get_msg(cm); 624 if (data) 625 riocm_rx_fill(cm, 1); 626 mutex_unlock(&cm->rx_lock); 627 628 if (data == NULL) 629 break; 630 631 hdr = data; 632 633 if (hdr->bhdr.type != RIO_CM_CHAN) { 634 /* For now simply discard packets other than channel */ 635 riocm_error("Unsupported TYPE code (0x%x). Msg dropped", 636 hdr->bhdr.type); 637 kfree(data); 638 continue; 639 } 640 641 /* Process a channel message */ 642 if (hdr->ch_op == CM_DATA_MSG) 643 rio_rx_data_handler(cm, data); 644 else 645 rio_cm_handler(cm, data); 646 } 647 } 648 649 static void riocm_inb_msg_event(struct rio_mport *mport, void *dev_id, 650 int mbox, int slot) 651 { 652 struct cm_dev *cm = dev_id; 653 654 if (rio_mport_is_running(cm->mport) && !work_pending(&cm->rx_work)) 655 queue_work(cm->rx_wq, &cm->rx_work); 656 } 657 658 /* 659 * rio_txcq_handler - TX completion handler 660 * @cm: cm_dev object 661 * @slot: TX queue slot 662 * 663 * TX completion handler also ensures that pending request packets are placed 664 * into transmit queue as soon as a free slot becomes available. This is done 665 * to give higher priority to request packets during high intensity data flow. 666 */ 667 static void rio_txcq_handler(struct cm_dev *cm, int slot) 668 { 669 int ack_slot; 670 671 /* ATTN: Add TX completion notification if/when direct buffer 672 * transfer is implemented. At this moment only correct tracking 673 * of tx_count is important. 674 */ 675 riocm_debug(TX_EVENT, "for mport_%d slot %d tx_cnt %d", 676 cm->mport->id, slot, cm->tx_cnt); 677 678 spin_lock(&cm->tx_lock); 679 ack_slot = cm->tx_ack_slot; 680 681 if (ack_slot == slot) 682 riocm_debug(TX_EVENT, "slot == ack_slot"); 683 684 while (cm->tx_cnt && ((ack_slot != slot) || 685 (cm->tx_cnt == RIOCM_TX_RING_SIZE))) { 686 687 cm->tx_buf[ack_slot] = NULL; 688 ++ack_slot; 689 ack_slot &= (RIOCM_TX_RING_SIZE - 1); 690 cm->tx_cnt--; 691 } 692 693 if (cm->tx_cnt < 0 || cm->tx_cnt > RIOCM_TX_RING_SIZE) 694 riocm_error("tx_cnt %d out of sync", cm->tx_cnt); 695 696 WARN_ON((cm->tx_cnt < 0) || (cm->tx_cnt > RIOCM_TX_RING_SIZE)); 697 698 cm->tx_ack_slot = ack_slot; 699 700 /* 701 * If there are pending requests, insert them into transmit queue 702 */ 703 if (!list_empty(&cm->tx_reqs) && (cm->tx_cnt < RIOCM_TX_RING_SIZE)) { 704 struct tx_req *req, *_req; 705 int rc; 706 707 list_for_each_entry_safe(req, _req, &cm->tx_reqs, node) { 708 list_del(&req->node); 709 cm->tx_buf[cm->tx_slot] = req->buffer; 710 rc = rio_add_outb_message(cm->mport, req->rdev, cmbox, 711 req->buffer, req->len); 712 kfree(req->buffer); 713 kfree(req); 714 715 ++cm->tx_cnt; 716 ++cm->tx_slot; 717 cm->tx_slot &= (RIOCM_TX_RING_SIZE - 1); 718 if (cm->tx_cnt == RIOCM_TX_RING_SIZE) 719 break; 720 } 721 } 722 723 spin_unlock(&cm->tx_lock); 724 } 725 726 static void riocm_outb_msg_event(struct rio_mport *mport, void *dev_id, 727 int mbox, int slot) 728 { 729 struct cm_dev *cm = dev_id; 730 731 if (cm && rio_mport_is_running(cm->mport)) 732 rio_txcq_handler(cm, slot); 733 } 734 735 static int riocm_queue_req(struct cm_dev *cm, struct rio_dev *rdev, 736 void *buffer, size_t len) 737 { 738 unsigned long flags; 739 struct tx_req *treq; 740 741 treq = kzalloc(sizeof(*treq), GFP_KERNEL); 742 if (treq == NULL) 743 return -ENOMEM; 744 745 treq->rdev = rdev; 746 treq->buffer = buffer; 747 treq->len = len; 748 749 spin_lock_irqsave(&cm->tx_lock, flags); 750 list_add_tail(&treq->node, &cm->tx_reqs); 751 spin_unlock_irqrestore(&cm->tx_lock, flags); 752 return 0; 753 } 754 755 /* 756 * riocm_post_send - helper function that places packet into msg TX queue 757 * @cm: cm_dev object 758 * @rdev: target RapidIO device object (required by outbound msg interface) 759 * @buffer: pointer to a packet buffer to send 760 * @len: length of data to transfer 761 * @req: request priority flag 762 * 763 * Returns: 0 if success, or error code otherwise. 764 */ 765 static int riocm_post_send(struct cm_dev *cm, struct rio_dev *rdev, 766 void *buffer, size_t len) 767 { 768 int rc; 769 unsigned long flags; 770 771 spin_lock_irqsave(&cm->tx_lock, flags); 772 773 if (cm->mport == NULL) { 774 rc = -ENODEV; 775 goto err_out; 776 } 777 778 if (cm->tx_cnt == RIOCM_TX_RING_SIZE) { 779 riocm_debug(TX, "Tx Queue is full"); 780 rc = -EBUSY; 781 goto err_out; 782 } 783 784 cm->tx_buf[cm->tx_slot] = buffer; 785 rc = rio_add_outb_message(cm->mport, rdev, cmbox, buffer, len); 786 787 riocm_debug(TX, "Add buf@%p destid=%x tx_slot=%d tx_cnt=%d", 788 buffer, rdev->destid, cm->tx_slot, cm->tx_cnt); 789 790 ++cm->tx_cnt; 791 ++cm->tx_slot; 792 cm->tx_slot &= (RIOCM_TX_RING_SIZE - 1); 793 794 err_out: 795 spin_unlock_irqrestore(&cm->tx_lock, flags); 796 return rc; 797 } 798 799 /* 800 * riocm_ch_send - sends a data packet to a remote device 801 * @ch_id: local channel ID 802 * @buf: pointer to a data buffer to send (including CM header) 803 * @len: length of data to transfer (including CM header) 804 * 805 * ATTN: ASSUMES THAT THE HEADER SPACE IS RESERVED PART OF THE DATA PACKET 806 * 807 * Returns: 0 if success, or 808 * -EINVAL if one or more input parameters is/are not valid, 809 * -ENODEV if cannot find a channel with specified ID, 810 * -EAGAIN if a channel is not in CONNECTED state, 811 * + error codes returned by HW send routine. 812 */ 813 static int riocm_ch_send(u16 ch_id, void *buf, int len) 814 { 815 struct rio_channel *ch; 816 struct rio_ch_chan_hdr *hdr; 817 int ret; 818 819 if (buf == NULL || ch_id == 0 || len == 0 || len > RIO_MAX_MSG_SIZE) 820 return -EINVAL; 821 822 ch = riocm_get_channel(ch_id); 823 if (!ch) { 824 riocm_error("%s(%d) ch_%d not found", current->comm, 825 task_pid_nr(current), ch_id); 826 return -ENODEV; 827 } 828 829 if (!riocm_cmp(ch, RIO_CM_CONNECTED)) { 830 ret = -EAGAIN; 831 goto err_out; 832 } 833 834 /* 835 * Fill buffer header section with corresponding channel data 836 */ 837 hdr = buf; 838 839 hdr->bhdr.src_id = htonl(ch->loc_destid); 840 hdr->bhdr.dst_id = htonl(ch->rem_destid); 841 hdr->bhdr.src_mbox = cmbox; 842 hdr->bhdr.dst_mbox = cmbox; 843 hdr->bhdr.type = RIO_CM_CHAN; 844 hdr->ch_op = CM_DATA_MSG; 845 hdr->dst_ch = htons(ch->rem_channel); 846 hdr->src_ch = htons(ch->id); 847 hdr->msg_len = htons((u16)len); 848 849 /* ATTN: the function call below relies on the fact that underlying 850 * HW-specific add_outb_message() routine copies TX data into its own 851 * internal transfer buffer (true for all RIONET compatible mport 852 * drivers). Must be reviewed if mport driver uses the buffer directly. 853 */ 854 855 ret = riocm_post_send(ch->cmdev, ch->rdev, buf, len); 856 if (ret) 857 riocm_debug(TX, "ch %d send_err=%d", ch->id, ret); 858 err_out: 859 riocm_put_channel(ch); 860 return ret; 861 } 862 863 static int riocm_ch_free_rxbuf(struct rio_channel *ch, void *buf) 864 { 865 int i, ret = -EINVAL; 866 867 spin_lock_bh(&ch->lock); 868 869 for (i = 0; i < RIOCM_RX_RING_SIZE; i++) { 870 if (ch->rx_ring.inuse[i] == buf) { 871 ch->rx_ring.inuse[i] = NULL; 872 ch->rx_ring.inuse_cnt--; 873 ret = 0; 874 break; 875 } 876 } 877 878 spin_unlock_bh(&ch->lock); 879 880 if (!ret) 881 kfree(buf); 882 883 return ret; 884 } 885 886 /* 887 * riocm_ch_receive - fetch a data packet received for the specified channel 888 * @ch: local channel ID 889 * @buf: pointer to a packet buffer 890 * @timeout: timeout to wait for incoming packet (in jiffies) 891 * 892 * Returns: 0 and valid buffer pointer if success, or NULL pointer and one of: 893 * -EAGAIN if a channel is not in CONNECTED state, 894 * -ENOMEM if in-use tracking queue is full, 895 * -ETIME if wait timeout expired, 896 * -EINTR if wait was interrupted. 897 */ 898 static int riocm_ch_receive(struct rio_channel *ch, void **buf, long timeout) 899 { 900 void *rxmsg = NULL; 901 int i, ret = 0; 902 long wret; 903 904 if (!riocm_cmp(ch, RIO_CM_CONNECTED)) { 905 ret = -EAGAIN; 906 goto out; 907 } 908 909 if (ch->rx_ring.inuse_cnt == RIOCM_RX_RING_SIZE) { 910 /* If we do not have entries to track buffers given to upper 911 * layer, reject request. 912 */ 913 ret = -ENOMEM; 914 goto out; 915 } 916 917 wret = wait_for_completion_interruptible_timeout(&ch->comp, timeout); 918 919 riocm_debug(WAIT, "wait on %d returned %ld", ch->id, wret); 920 921 if (!wret) 922 ret = -ETIME; 923 else if (wret == -ERESTARTSYS) 924 ret = -EINTR; 925 else 926 ret = riocm_cmp(ch, RIO_CM_CONNECTED) ? 0 : -ECONNRESET; 927 928 if (ret) 929 goto out; 930 931 spin_lock_bh(&ch->lock); 932 933 rxmsg = ch->rx_ring.buf[ch->rx_ring.tail]; 934 ch->rx_ring.buf[ch->rx_ring.tail] = NULL; 935 ch->rx_ring.count--; 936 ch->rx_ring.tail++; 937 ch->rx_ring.tail %= RIOCM_RX_RING_SIZE; 938 ret = -ENOMEM; 939 940 for (i = 0; i < RIOCM_RX_RING_SIZE; i++) { 941 if (ch->rx_ring.inuse[i] == NULL) { 942 ch->rx_ring.inuse[i] = rxmsg; 943 ch->rx_ring.inuse_cnt++; 944 ret = 0; 945 break; 946 } 947 } 948 949 if (ret) { 950 /* We have no entry to store pending message: drop it */ 951 kfree(rxmsg); 952 rxmsg = NULL; 953 } 954 955 spin_unlock_bh(&ch->lock); 956 out: 957 *buf = rxmsg; 958 return ret; 959 } 960 961 /* 962 * riocm_ch_connect - sends a connect request to a remote device 963 * @loc_ch: local channel ID 964 * @cm: CM device to send connect request 965 * @peer: target RapidIO device 966 * @rem_ch: remote channel ID 967 * 968 * Returns: 0 if success, or 969 * -EINVAL if the channel is not in IDLE state, 970 * -EAGAIN if no connection request available immediately, 971 * -ETIME if ACK response timeout expired, 972 * -EINTR if wait for response was interrupted. 973 */ 974 static int riocm_ch_connect(u16 loc_ch, struct cm_dev *cm, 975 struct cm_peer *peer, u16 rem_ch) 976 { 977 struct rio_channel *ch = NULL; 978 struct rio_ch_chan_hdr *hdr; 979 int ret; 980 long wret; 981 982 ch = riocm_get_channel(loc_ch); 983 if (!ch) 984 return -ENODEV; 985 986 if (!riocm_cmp_exch(ch, RIO_CM_IDLE, RIO_CM_CONNECT)) { 987 ret = -EINVAL; 988 goto conn_done; 989 } 990 991 ch->cmdev = cm; 992 ch->rdev = peer->rdev; 993 ch->context = NULL; 994 ch->loc_destid = cm->mport->host_deviceid; 995 ch->rem_channel = rem_ch; 996 997 /* 998 * Send connect request to the remote RapidIO device 999 */ 1000 1001 hdr = kzalloc(sizeof(*hdr), GFP_KERNEL); 1002 if (hdr == NULL) { 1003 ret = -ENOMEM; 1004 goto conn_done; 1005 } 1006 1007 hdr->bhdr.src_id = htonl(ch->loc_destid); 1008 hdr->bhdr.dst_id = htonl(peer->rdev->destid); 1009 hdr->bhdr.src_mbox = cmbox; 1010 hdr->bhdr.dst_mbox = cmbox; 1011 hdr->bhdr.type = RIO_CM_CHAN; 1012 hdr->ch_op = CM_CONN_REQ; 1013 hdr->dst_ch = htons(rem_ch); 1014 hdr->src_ch = htons(loc_ch); 1015 1016 /* ATTN: the function call below relies on the fact that underlying 1017 * HW-specific add_outb_message() routine copies TX data into its 1018 * internal transfer buffer. Must be reviewed if mport driver uses 1019 * this buffer directly. 1020 */ 1021 ret = riocm_post_send(cm, peer->rdev, hdr, sizeof(*hdr)); 1022 1023 if (ret != -EBUSY) { 1024 kfree(hdr); 1025 } else { 1026 ret = riocm_queue_req(cm, peer->rdev, hdr, sizeof(*hdr)); 1027 if (ret) 1028 kfree(hdr); 1029 } 1030 1031 if (ret) { 1032 riocm_cmp_exch(ch, RIO_CM_CONNECT, RIO_CM_IDLE); 1033 goto conn_done; 1034 } 1035 1036 /* Wait for connect response from the remote device */ 1037 wret = wait_for_completion_interruptible_timeout(&ch->comp, 1038 RIOCM_CONNECT_TO * HZ); 1039 riocm_debug(WAIT, "wait on %d returns %ld", ch->id, wret); 1040 1041 if (!wret) 1042 ret = -ETIME; 1043 else if (wret == -ERESTARTSYS) 1044 ret = -EINTR; 1045 else 1046 ret = riocm_cmp(ch, RIO_CM_CONNECTED) ? 0 : -1; 1047 1048 conn_done: 1049 riocm_put_channel(ch); 1050 return ret; 1051 } 1052 1053 static int riocm_send_ack(struct rio_channel *ch) 1054 { 1055 struct rio_ch_chan_hdr *hdr; 1056 int ret; 1057 1058 hdr = kzalloc(sizeof(*hdr), GFP_KERNEL); 1059 if (hdr == NULL) 1060 return -ENOMEM; 1061 1062 hdr->bhdr.src_id = htonl(ch->loc_destid); 1063 hdr->bhdr.dst_id = htonl(ch->rem_destid); 1064 hdr->dst_ch = htons(ch->rem_channel); 1065 hdr->src_ch = htons(ch->id); 1066 hdr->bhdr.src_mbox = cmbox; 1067 hdr->bhdr.dst_mbox = cmbox; 1068 hdr->bhdr.type = RIO_CM_CHAN; 1069 hdr->ch_op = CM_CONN_ACK; 1070 1071 /* ATTN: the function call below relies on the fact that underlying 1072 * add_outb_message() routine copies TX data into its internal transfer 1073 * buffer. Review if switching to direct buffer version. 1074 */ 1075 ret = riocm_post_send(ch->cmdev, ch->rdev, hdr, sizeof(*hdr)); 1076 1077 if (ret == -EBUSY && !riocm_queue_req(ch->cmdev, 1078 ch->rdev, hdr, sizeof(*hdr))) 1079 return 0; 1080 kfree(hdr); 1081 1082 if (ret) 1083 riocm_error("send ACK to ch_%d on %s failed (ret=%d)", 1084 ch->id, rio_name(ch->rdev), ret); 1085 return ret; 1086 } 1087 1088 /* 1089 * riocm_ch_accept - accept incoming connection request 1090 * @ch_id: channel ID 1091 * @new_ch_id: local mport device 1092 * @timeout: wait timeout (if 0 non-blocking call, do not wait if connection 1093 * request is not available). 1094 * 1095 * Returns: pointer to new channel struct if success, or error-valued pointer: 1096 * -ENODEV - cannot find specified channel or mport, 1097 * -EINVAL - the channel is not in IDLE state, 1098 * -EAGAIN - no connection request available immediately (timeout=0), 1099 * -ENOMEM - unable to allocate new channel, 1100 * -ETIME - wait timeout expired, 1101 * -EINTR - wait was interrupted. 1102 */ 1103 static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id, 1104 long timeout) 1105 { 1106 struct rio_channel *ch = NULL; 1107 struct rio_channel *new_ch = NULL; 1108 struct conn_req *req; 1109 struct cm_peer *peer; 1110 int found = 0; 1111 int err = 0; 1112 long wret; 1113 1114 ch = riocm_get_channel(ch_id); 1115 if (!ch) 1116 return ERR_PTR(-EINVAL); 1117 1118 if (!riocm_cmp(ch, RIO_CM_LISTEN)) { 1119 err = -EINVAL; 1120 goto err_put; 1121 } 1122 1123 /* Don't sleep if this is a non blocking call */ 1124 if (!timeout) { 1125 if (!try_wait_for_completion(&ch->comp)) { 1126 err = -EAGAIN; 1127 goto err_put; 1128 } 1129 } else { 1130 riocm_debug(WAIT, "on %d", ch->id); 1131 1132 wret = wait_for_completion_interruptible_timeout(&ch->comp, 1133 timeout); 1134 if (!wret) { 1135 err = -ETIME; 1136 goto err_put; 1137 } else if (wret == -ERESTARTSYS) { 1138 err = -EINTR; 1139 goto err_put; 1140 } 1141 } 1142 1143 spin_lock_bh(&ch->lock); 1144 1145 if (ch->state != RIO_CM_LISTEN) { 1146 err = -ECANCELED; 1147 } else if (list_empty(&ch->accept_queue)) { 1148 riocm_debug(WAIT, "on %d accept_queue is empty on completion", 1149 ch->id); 1150 err = -EIO; 1151 } 1152 1153 spin_unlock_bh(&ch->lock); 1154 1155 if (err) { 1156 riocm_debug(WAIT, "on %d returns %d", ch->id, err); 1157 goto err_put; 1158 } 1159 1160 /* Create new channel for this connection */ 1161 new_ch = riocm_ch_alloc(RIOCM_CHNUM_AUTO); 1162 1163 if (IS_ERR(new_ch)) { 1164 riocm_error("failed to get channel for new req (%ld)", 1165 PTR_ERR(new_ch)); 1166 err = -ENOMEM; 1167 goto err_put; 1168 } 1169 1170 spin_lock_bh(&ch->lock); 1171 1172 req = list_first_entry(&ch->accept_queue, struct conn_req, node); 1173 list_del(&req->node); 1174 new_ch->cmdev = ch->cmdev; 1175 new_ch->loc_destid = ch->loc_destid; 1176 new_ch->rem_destid = req->destid; 1177 new_ch->rem_channel = req->chan; 1178 1179 spin_unlock_bh(&ch->lock); 1180 riocm_put_channel(ch); 1181 kfree(req); 1182 1183 down_read(&rdev_sem); 1184 /* Find requester's device object */ 1185 list_for_each_entry(peer, &new_ch->cmdev->peers, node) { 1186 if (peer->rdev->destid == new_ch->rem_destid) { 1187 riocm_debug(RX_CMD, "found matching device(%s)", 1188 rio_name(peer->rdev)); 1189 found = 1; 1190 break; 1191 } 1192 } 1193 up_read(&rdev_sem); 1194 1195 if (!found) { 1196 /* If peer device object not found, simply ignore the request */ 1197 err = -ENODEV; 1198 goto err_nodev; 1199 } 1200 1201 new_ch->rdev = peer->rdev; 1202 new_ch->state = RIO_CM_CONNECTED; 1203 spin_lock_init(&new_ch->lock); 1204 1205 /* Acknowledge the connection request. */ 1206 riocm_send_ack(new_ch); 1207 1208 *new_ch_id = new_ch->id; 1209 return new_ch; 1210 err_put: 1211 riocm_put_channel(ch); 1212 err_nodev: 1213 if (new_ch) { 1214 spin_lock_bh(&idr_lock); 1215 idr_remove(&ch_idr, new_ch->id); 1216 spin_unlock_bh(&idr_lock); 1217 riocm_put_channel(new_ch); 1218 } 1219 *new_ch_id = 0; 1220 return ERR_PTR(err); 1221 } 1222 1223 /* 1224 * riocm_ch_listen - puts a channel into LISTEN state 1225 * @ch_id: channel ID 1226 * 1227 * Returns: 0 if success, or 1228 * -EINVAL if the specified channel does not exists or 1229 * is not in CHAN_BOUND state. 1230 */ 1231 static int riocm_ch_listen(u16 ch_id) 1232 { 1233 struct rio_channel *ch = NULL; 1234 int ret = 0; 1235 1236 riocm_debug(CHOP, "(ch_%d)", ch_id); 1237 1238 ch = riocm_get_channel(ch_id); 1239 if (!ch || !riocm_cmp_exch(ch, RIO_CM_CHAN_BOUND, RIO_CM_LISTEN)) 1240 ret = -EINVAL; 1241 riocm_put_channel(ch); 1242 return ret; 1243 } 1244 1245 /* 1246 * riocm_ch_bind - associate a channel object and an mport device 1247 * @ch_id: channel ID 1248 * @mport_id: local mport device ID 1249 * @context: pointer to the additional caller's context 1250 * 1251 * Returns: 0 if success, or 1252 * -ENODEV if cannot find specified mport, 1253 * -EINVAL if the specified channel does not exist or 1254 * is not in IDLE state. 1255 */ 1256 static int riocm_ch_bind(u16 ch_id, u8 mport_id, void *context) 1257 { 1258 struct rio_channel *ch = NULL; 1259 struct cm_dev *cm; 1260 int rc = -ENODEV; 1261 1262 riocm_debug(CHOP, "ch_%d to mport_%d", ch_id, mport_id); 1263 1264 /* Find matching cm_dev object */ 1265 down_read(&rdev_sem); 1266 list_for_each_entry(cm, &cm_dev_list, list) { 1267 if ((cm->mport->id == mport_id) && 1268 rio_mport_is_running(cm->mport)) { 1269 rc = 0; 1270 break; 1271 } 1272 } 1273 1274 if (rc) 1275 goto exit; 1276 1277 ch = riocm_get_channel(ch_id); 1278 if (!ch) { 1279 rc = -EINVAL; 1280 goto exit; 1281 } 1282 1283 spin_lock_bh(&ch->lock); 1284 if (ch->state != RIO_CM_IDLE) { 1285 spin_unlock_bh(&ch->lock); 1286 rc = -EINVAL; 1287 goto err_put; 1288 } 1289 1290 ch->cmdev = cm; 1291 ch->loc_destid = cm->mport->host_deviceid; 1292 ch->context = context; 1293 ch->state = RIO_CM_CHAN_BOUND; 1294 spin_unlock_bh(&ch->lock); 1295 err_put: 1296 riocm_put_channel(ch); 1297 exit: 1298 up_read(&rdev_sem); 1299 return rc; 1300 } 1301 1302 /* 1303 * riocm_ch_alloc - channel object allocation helper routine 1304 * @ch_num: channel ID (1 ... RIOCM_MAX_CHNUM, 0 = automatic) 1305 * 1306 * Return value: pointer to newly created channel object, 1307 * or error-valued pointer 1308 */ 1309 static struct rio_channel *riocm_ch_alloc(u16 ch_num) 1310 { 1311 int id; 1312 int start, end; 1313 struct rio_channel *ch; 1314 1315 ch = kzalloc(sizeof(*ch), GFP_KERNEL); 1316 if (!ch) 1317 return ERR_PTR(-ENOMEM); 1318 1319 if (ch_num) { 1320 /* If requested, try to obtain the specified channel ID */ 1321 start = ch_num; 1322 end = ch_num + 1; 1323 } else { 1324 /* Obtain channel ID from the dynamic allocation range */ 1325 start = chstart; 1326 end = RIOCM_MAX_CHNUM + 1; 1327 } 1328 1329 idr_preload(GFP_KERNEL); 1330 spin_lock_bh(&idr_lock); 1331 id = idr_alloc_cyclic(&ch_idr, ch, start, end, GFP_NOWAIT); 1332 spin_unlock_bh(&idr_lock); 1333 idr_preload_end(); 1334 1335 if (id < 0) { 1336 kfree(ch); 1337 return ERR_PTR(id == -ENOSPC ? -EBUSY : id); 1338 } 1339 1340 ch->id = (u16)id; 1341 ch->state = RIO_CM_IDLE; 1342 spin_lock_init(&ch->lock); 1343 INIT_LIST_HEAD(&ch->accept_queue); 1344 INIT_LIST_HEAD(&ch->ch_node); 1345 init_completion(&ch->comp); 1346 init_completion(&ch->comp_close); 1347 kref_init(&ch->ref); 1348 ch->rx_ring.head = 0; 1349 ch->rx_ring.tail = 0; 1350 ch->rx_ring.count = 0; 1351 ch->rx_ring.inuse_cnt = 0; 1352 1353 return ch; 1354 } 1355 1356 /* 1357 * riocm_ch_create - creates a new channel object and allocates ID for it 1358 * @ch_num: channel ID (1 ... RIOCM_MAX_CHNUM, 0 = automatic) 1359 * 1360 * Allocates and initializes a new channel object. If the parameter ch_num > 0 1361 * and is within the valid range, riocm_ch_create tries to allocate the 1362 * specified ID for the new channel. If ch_num = 0, channel ID will be assigned 1363 * automatically from the range (chstart ... RIOCM_MAX_CHNUM). 1364 * Module parameter 'chstart' defines start of an ID range available for dynamic 1365 * allocation. Range below 'chstart' is reserved for pre-defined ID numbers. 1366 * Available channel numbers are limited by 16-bit size of channel numbers used 1367 * in the packet header. 1368 * 1369 * Return value: PTR to rio_channel structure if successful (with channel number 1370 * updated via pointer) or error-valued pointer if error. 1371 */ 1372 static struct rio_channel *riocm_ch_create(u16 *ch_num) 1373 { 1374 struct rio_channel *ch = NULL; 1375 1376 ch = riocm_ch_alloc(*ch_num); 1377 1378 if (IS_ERR(ch)) 1379 riocm_debug(CHOP, "Failed to allocate channel %d (err=%ld)", 1380 *ch_num, PTR_ERR(ch)); 1381 else 1382 *ch_num = ch->id; 1383 1384 return ch; 1385 } 1386 1387 /* 1388 * riocm_ch_free - channel object release routine 1389 * @ref: pointer to a channel's kref structure 1390 */ 1391 static void riocm_ch_free(struct kref *ref) 1392 { 1393 struct rio_channel *ch = container_of(ref, struct rio_channel, ref); 1394 int i; 1395 1396 riocm_debug(CHOP, "(ch_%d)", ch->id); 1397 1398 if (ch->rx_ring.inuse_cnt) { 1399 for (i = 0; 1400 i < RIOCM_RX_RING_SIZE && ch->rx_ring.inuse_cnt; i++) { 1401 if (ch->rx_ring.inuse[i] != NULL) { 1402 kfree(ch->rx_ring.inuse[i]); 1403 ch->rx_ring.inuse_cnt--; 1404 } 1405 } 1406 } 1407 1408 if (ch->rx_ring.count) 1409 for (i = 0; i < RIOCM_RX_RING_SIZE && ch->rx_ring.count; i++) { 1410 if (ch->rx_ring.buf[i] != NULL) { 1411 kfree(ch->rx_ring.buf[i]); 1412 ch->rx_ring.count--; 1413 } 1414 } 1415 1416 complete(&ch->comp_close); 1417 } 1418 1419 static int riocm_send_close(struct rio_channel *ch) 1420 { 1421 struct rio_ch_chan_hdr *hdr; 1422 int ret; 1423 1424 /* 1425 * Send CH_CLOSE notification to the remote RapidIO device 1426 */ 1427 1428 hdr = kzalloc(sizeof(*hdr), GFP_KERNEL); 1429 if (hdr == NULL) 1430 return -ENOMEM; 1431 1432 hdr->bhdr.src_id = htonl(ch->loc_destid); 1433 hdr->bhdr.dst_id = htonl(ch->rem_destid); 1434 hdr->bhdr.src_mbox = cmbox; 1435 hdr->bhdr.dst_mbox = cmbox; 1436 hdr->bhdr.type = RIO_CM_CHAN; 1437 hdr->ch_op = CM_CONN_CLOSE; 1438 hdr->dst_ch = htons(ch->rem_channel); 1439 hdr->src_ch = htons(ch->id); 1440 1441 /* ATTN: the function call below relies on the fact that underlying 1442 * add_outb_message() routine copies TX data into its internal transfer 1443 * buffer. Needs to be reviewed if switched to direct buffer mode. 1444 */ 1445 ret = riocm_post_send(ch->cmdev, ch->rdev, hdr, sizeof(*hdr)); 1446 1447 if (ret == -EBUSY && !riocm_queue_req(ch->cmdev, ch->rdev, 1448 hdr, sizeof(*hdr))) 1449 return 0; 1450 kfree(hdr); 1451 1452 if (ret) 1453 riocm_error("ch(%d) send CLOSE failed (ret=%d)", ch->id, ret); 1454 1455 return ret; 1456 } 1457 1458 /* 1459 * riocm_ch_close - closes a channel object with specified ID (by local request) 1460 * @ch: channel to be closed 1461 */ 1462 static int riocm_ch_close(struct rio_channel *ch) 1463 { 1464 unsigned long tmo = msecs_to_jiffies(3000); 1465 enum rio_cm_state state; 1466 long wret; 1467 int ret = 0; 1468 1469 riocm_debug(CHOP, "ch_%d by %s(%d)", 1470 ch->id, current->comm, task_pid_nr(current)); 1471 1472 state = riocm_exch(ch, RIO_CM_DESTROYING); 1473 if (state == RIO_CM_CONNECTED) 1474 riocm_send_close(ch); 1475 1476 complete_all(&ch->comp); 1477 1478 riocm_put_channel(ch); 1479 wret = wait_for_completion_interruptible_timeout(&ch->comp_close, tmo); 1480 1481 riocm_debug(WAIT, "wait on %d returns %ld", ch->id, wret); 1482 1483 if (wret == 0) { 1484 /* Timeout on wait occurred */ 1485 riocm_debug(CHOP, "%s(%d) timed out waiting for ch %d", 1486 current->comm, task_pid_nr(current), ch->id); 1487 ret = -ETIMEDOUT; 1488 } else if (wret == -ERESTARTSYS) { 1489 /* Wait_for_completion was interrupted by a signal */ 1490 riocm_debug(CHOP, "%s(%d) wait for ch %d was interrupted", 1491 current->comm, task_pid_nr(current), ch->id); 1492 ret = -EINTR; 1493 } 1494 1495 if (!ret) { 1496 riocm_debug(CHOP, "ch_%d resources released", ch->id); 1497 kfree(ch); 1498 } else { 1499 riocm_debug(CHOP, "failed to release ch_%d resources", ch->id); 1500 } 1501 1502 return ret; 1503 } 1504 1505 /* 1506 * riocm_cdev_open() - Open character device 1507 */ 1508 static int riocm_cdev_open(struct inode *inode, struct file *filp) 1509 { 1510 riocm_debug(INIT, "by %s(%d) filp=%p ", 1511 current->comm, task_pid_nr(current), filp); 1512 1513 if (list_empty(&cm_dev_list)) 1514 return -ENODEV; 1515 1516 return 0; 1517 } 1518 1519 /* 1520 * riocm_cdev_release() - Release character device 1521 */ 1522 static int riocm_cdev_release(struct inode *inode, struct file *filp) 1523 { 1524 struct rio_channel *ch, *_c; 1525 unsigned int i; 1526 LIST_HEAD(list); 1527 1528 riocm_debug(EXIT, "by %s(%d) filp=%p", 1529 current->comm, task_pid_nr(current), filp); 1530 1531 /* Check if there are channels associated with this file descriptor */ 1532 spin_lock_bh(&idr_lock); 1533 idr_for_each_entry(&ch_idr, ch, i) { 1534 if (ch && ch->filp == filp) { 1535 riocm_debug(EXIT, "ch_%d not released by %s(%d)", 1536 ch->id, current->comm, 1537 task_pid_nr(current)); 1538 idr_remove(&ch_idr, ch->id); 1539 list_add(&ch->ch_node, &list); 1540 } 1541 } 1542 spin_unlock_bh(&idr_lock); 1543 1544 if (!list_empty(&list)) { 1545 list_for_each_entry_safe(ch, _c, &list, ch_node) { 1546 list_del(&ch->ch_node); 1547 riocm_ch_close(ch); 1548 } 1549 } 1550 1551 return 0; 1552 } 1553 1554 /* 1555 * cm_ep_get_list_size() - Reports number of endpoints in the network 1556 */ 1557 static int cm_ep_get_list_size(void __user *arg) 1558 { 1559 u32 __user *p = arg; 1560 u32 mport_id; 1561 u32 count = 0; 1562 struct cm_dev *cm; 1563 1564 if (get_user(mport_id, p)) 1565 return -EFAULT; 1566 if (mport_id >= RIO_MAX_MPORTS) 1567 return -EINVAL; 1568 1569 /* Find a matching cm_dev object */ 1570 down_read(&rdev_sem); 1571 list_for_each_entry(cm, &cm_dev_list, list) { 1572 if (cm->mport->id == mport_id) { 1573 count = cm->npeers; 1574 up_read(&rdev_sem); 1575 if (copy_to_user(arg, &count, sizeof(u32))) 1576 return -EFAULT; 1577 return 0; 1578 } 1579 } 1580 up_read(&rdev_sem); 1581 1582 return -ENODEV; 1583 } 1584 1585 /* 1586 * cm_ep_get_list() - Returns list of attached endpoints 1587 */ 1588 static int cm_ep_get_list(void __user *arg) 1589 { 1590 struct cm_dev *cm; 1591 struct cm_peer *peer; 1592 u32 info[2]; 1593 void *buf; 1594 u32 nent; 1595 u32 *entry_ptr; 1596 u32 i = 0; 1597 int ret = 0; 1598 1599 if (copy_from_user(&info, arg, sizeof(info))) 1600 return -EFAULT; 1601 1602 if (info[1] >= RIO_MAX_MPORTS || info[0] > RIOCM_MAX_EP_COUNT) 1603 return -EINVAL; 1604 1605 /* Find a matching cm_dev object */ 1606 down_read(&rdev_sem); 1607 list_for_each_entry(cm, &cm_dev_list, list) 1608 if (cm->mport->id == (u8)info[1]) 1609 goto found; 1610 1611 up_read(&rdev_sem); 1612 return -ENODEV; 1613 1614 found: 1615 nent = min(info[0], cm->npeers); 1616 buf = kcalloc(nent + 2, sizeof(u32), GFP_KERNEL); 1617 if (!buf) { 1618 up_read(&rdev_sem); 1619 return -ENOMEM; 1620 } 1621 1622 entry_ptr = (u32 *)((uintptr_t)buf + 2*sizeof(u32)); 1623 1624 list_for_each_entry(peer, &cm->peers, node) { 1625 *entry_ptr = (u32)peer->rdev->destid; 1626 entry_ptr++; 1627 if (++i == nent) 1628 break; 1629 } 1630 up_read(&rdev_sem); 1631 1632 ((u32 *)buf)[0] = i; /* report an updated number of entries */ 1633 ((u32 *)buf)[1] = info[1]; /* put back an mport ID */ 1634 if (copy_to_user(arg, buf, sizeof(u32) * (info[0] + 2))) 1635 ret = -EFAULT; 1636 1637 kfree(buf); 1638 return ret; 1639 } 1640 1641 /* 1642 * cm_mport_get_list() - Returns list of available local mport devices 1643 */ 1644 static int cm_mport_get_list(void __user *arg) 1645 { 1646 int ret = 0; 1647 u32 entries; 1648 void *buf; 1649 struct cm_dev *cm; 1650 u32 *entry_ptr; 1651 int count = 0; 1652 1653 if (copy_from_user(&entries, arg, sizeof(entries))) 1654 return -EFAULT; 1655 if (entries == 0 || entries > RIO_MAX_MPORTS) 1656 return -EINVAL; 1657 buf = kcalloc(entries + 1, sizeof(u32), GFP_KERNEL); 1658 if (!buf) 1659 return -ENOMEM; 1660 1661 /* Scan all registered cm_dev objects */ 1662 entry_ptr = (u32 *)((uintptr_t)buf + sizeof(u32)); 1663 down_read(&rdev_sem); 1664 list_for_each_entry(cm, &cm_dev_list, list) { 1665 if (count++ < entries) { 1666 *entry_ptr = (cm->mport->id << 16) | 1667 cm->mport->host_deviceid; 1668 entry_ptr++; 1669 } 1670 } 1671 up_read(&rdev_sem); 1672 1673 *((u32 *)buf) = count; /* report a real number of entries */ 1674 if (copy_to_user(arg, buf, sizeof(u32) * (count + 1))) 1675 ret = -EFAULT; 1676 1677 kfree(buf); 1678 return ret; 1679 } 1680 1681 /* 1682 * cm_chan_create() - Create a message exchange channel 1683 */ 1684 static int cm_chan_create(struct file *filp, void __user *arg) 1685 { 1686 u16 __user *p = arg; 1687 u16 ch_num; 1688 struct rio_channel *ch; 1689 1690 if (get_user(ch_num, p)) 1691 return -EFAULT; 1692 1693 riocm_debug(CHOP, "ch_%d requested by %s(%d)", 1694 ch_num, current->comm, task_pid_nr(current)); 1695 ch = riocm_ch_create(&ch_num); 1696 if (IS_ERR(ch)) 1697 return PTR_ERR(ch); 1698 1699 ch->filp = filp; 1700 riocm_debug(CHOP, "ch_%d created by %s(%d)", 1701 ch_num, current->comm, task_pid_nr(current)); 1702 return put_user(ch_num, p); 1703 } 1704 1705 /* 1706 * cm_chan_close() - Close channel 1707 * @filp: Pointer to file object 1708 * @arg: Channel to close 1709 */ 1710 static int cm_chan_close(struct file *filp, void __user *arg) 1711 { 1712 u16 __user *p = arg; 1713 u16 ch_num; 1714 struct rio_channel *ch; 1715 1716 if (get_user(ch_num, p)) 1717 return -EFAULT; 1718 1719 riocm_debug(CHOP, "ch_%d by %s(%d)", 1720 ch_num, current->comm, task_pid_nr(current)); 1721 1722 spin_lock_bh(&idr_lock); 1723 ch = idr_find(&ch_idr, ch_num); 1724 if (!ch) { 1725 spin_unlock_bh(&idr_lock); 1726 return 0; 1727 } 1728 if (ch->filp != filp) { 1729 spin_unlock_bh(&idr_lock); 1730 return -EINVAL; 1731 } 1732 idr_remove(&ch_idr, ch->id); 1733 spin_unlock_bh(&idr_lock); 1734 1735 return riocm_ch_close(ch); 1736 } 1737 1738 /* 1739 * cm_chan_bind() - Bind channel 1740 * @arg: Channel number 1741 */ 1742 static int cm_chan_bind(void __user *arg) 1743 { 1744 struct rio_cm_channel chan; 1745 1746 if (copy_from_user(&chan, arg, sizeof(chan))) 1747 return -EFAULT; 1748 if (chan.mport_id >= RIO_MAX_MPORTS) 1749 return -EINVAL; 1750 1751 return riocm_ch_bind(chan.id, chan.mport_id, NULL); 1752 } 1753 1754 /* 1755 * cm_chan_listen() - Listen on channel 1756 * @arg: Channel number 1757 */ 1758 static int cm_chan_listen(void __user *arg) 1759 { 1760 u16 __user *p = arg; 1761 u16 ch_num; 1762 1763 if (get_user(ch_num, p)) 1764 return -EFAULT; 1765 1766 return riocm_ch_listen(ch_num); 1767 } 1768 1769 /* 1770 * cm_chan_accept() - Accept incoming connection 1771 * @filp: Pointer to file object 1772 * @arg: Channel number 1773 */ 1774 static int cm_chan_accept(struct file *filp, void __user *arg) 1775 { 1776 struct rio_cm_accept param; 1777 long accept_to; 1778 struct rio_channel *ch; 1779 1780 if (copy_from_user(&param, arg, sizeof(param))) 1781 return -EFAULT; 1782 1783 riocm_debug(CHOP, "on ch_%d by %s(%d)", 1784 param.ch_num, current->comm, task_pid_nr(current)); 1785 1786 accept_to = param.wait_to ? 1787 msecs_to_jiffies(param.wait_to) : 0; 1788 1789 ch = riocm_ch_accept(param.ch_num, &param.ch_num, accept_to); 1790 if (IS_ERR(ch)) 1791 return PTR_ERR(ch); 1792 ch->filp = filp; 1793 1794 riocm_debug(CHOP, "new ch_%d for %s(%d)", 1795 ch->id, current->comm, task_pid_nr(current)); 1796 1797 if (copy_to_user(arg, &param, sizeof(param))) 1798 return -EFAULT; 1799 return 0; 1800 } 1801 1802 /* 1803 * cm_chan_connect() - Connect on channel 1804 * @arg: Channel information 1805 */ 1806 static int cm_chan_connect(void __user *arg) 1807 { 1808 struct rio_cm_channel chan; 1809 struct cm_dev *cm; 1810 struct cm_peer *peer; 1811 int ret = -ENODEV; 1812 1813 if (copy_from_user(&chan, arg, sizeof(chan))) 1814 return -EFAULT; 1815 if (chan.mport_id >= RIO_MAX_MPORTS) 1816 return -EINVAL; 1817 1818 down_read(&rdev_sem); 1819 1820 /* Find matching cm_dev object */ 1821 list_for_each_entry(cm, &cm_dev_list, list) { 1822 if (cm->mport->id == chan.mport_id) { 1823 ret = 0; 1824 break; 1825 } 1826 } 1827 1828 if (ret) 1829 goto err_out; 1830 1831 if (chan.remote_destid >= RIO_ANY_DESTID(cm->mport->sys_size)) { 1832 ret = -EINVAL; 1833 goto err_out; 1834 } 1835 1836 /* Find corresponding RapidIO endpoint device object */ 1837 ret = -ENODEV; 1838 1839 list_for_each_entry(peer, &cm->peers, node) { 1840 if (peer->rdev->destid == chan.remote_destid) { 1841 ret = 0; 1842 break; 1843 } 1844 } 1845 1846 if (ret) 1847 goto err_out; 1848 1849 up_read(&rdev_sem); 1850 1851 return riocm_ch_connect(chan.id, cm, peer, chan.remote_channel); 1852 err_out: 1853 up_read(&rdev_sem); 1854 return ret; 1855 } 1856 1857 /* 1858 * cm_chan_msg_send() - Send a message through channel 1859 * @arg: Outbound message information 1860 */ 1861 static int cm_chan_msg_send(void __user *arg) 1862 { 1863 struct rio_cm_msg msg; 1864 void *buf; 1865 int ret = 0; 1866 1867 if (copy_from_user(&msg, arg, sizeof(msg))) 1868 return -EFAULT; 1869 if (msg.size > RIO_MAX_MSG_SIZE) 1870 return -EINVAL; 1871 1872 buf = kmalloc(msg.size, GFP_KERNEL); 1873 if (!buf) 1874 return -ENOMEM; 1875 1876 if (copy_from_user(buf, (void __user *)(uintptr_t)msg.msg, msg.size)) { 1877 ret = -EFAULT; 1878 goto out; 1879 } 1880 1881 ret = riocm_ch_send(msg.ch_num, buf, msg.size); 1882 out: 1883 kfree(buf); 1884 return ret; 1885 } 1886 1887 /* 1888 * cm_chan_msg_rcv() - Receive a message through channel 1889 * @arg: Inbound message information 1890 */ 1891 static int cm_chan_msg_rcv(void __user *arg) 1892 { 1893 struct rio_cm_msg msg; 1894 struct rio_channel *ch; 1895 void *buf; 1896 long rxto; 1897 int ret = 0, msg_size; 1898 1899 if (copy_from_user(&msg, arg, sizeof(msg))) 1900 return -EFAULT; 1901 1902 if (msg.ch_num == 0 || msg.size == 0) 1903 return -EINVAL; 1904 1905 ch = riocm_get_channel(msg.ch_num); 1906 if (!ch) 1907 return -ENODEV; 1908 1909 rxto = msg.rxto ? msecs_to_jiffies(msg.rxto) : MAX_SCHEDULE_TIMEOUT; 1910 1911 ret = riocm_ch_receive(ch, &buf, rxto); 1912 if (ret) 1913 goto out; 1914 1915 msg_size = min(msg.size, (u16)(RIO_MAX_MSG_SIZE)); 1916 1917 if (copy_to_user((void __user *)(uintptr_t)msg.msg, buf, msg_size)) 1918 ret = -EFAULT; 1919 1920 riocm_ch_free_rxbuf(ch, buf); 1921 out: 1922 riocm_put_channel(ch); 1923 return ret; 1924 } 1925 1926 /* 1927 * riocm_cdev_ioctl() - IOCTL requests handler 1928 */ 1929 static long 1930 riocm_cdev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 1931 { 1932 switch (cmd) { 1933 case RIO_CM_EP_GET_LIST_SIZE: 1934 return cm_ep_get_list_size((void __user *)arg); 1935 case RIO_CM_EP_GET_LIST: 1936 return cm_ep_get_list((void __user *)arg); 1937 case RIO_CM_CHAN_CREATE: 1938 return cm_chan_create(filp, (void __user *)arg); 1939 case RIO_CM_CHAN_CLOSE: 1940 return cm_chan_close(filp, (void __user *)arg); 1941 case RIO_CM_CHAN_BIND: 1942 return cm_chan_bind((void __user *)arg); 1943 case RIO_CM_CHAN_LISTEN: 1944 return cm_chan_listen((void __user *)arg); 1945 case RIO_CM_CHAN_ACCEPT: 1946 return cm_chan_accept(filp, (void __user *)arg); 1947 case RIO_CM_CHAN_CONNECT: 1948 return cm_chan_connect((void __user *)arg); 1949 case RIO_CM_CHAN_SEND: 1950 return cm_chan_msg_send((void __user *)arg); 1951 case RIO_CM_CHAN_RECEIVE: 1952 return cm_chan_msg_rcv((void __user *)arg); 1953 case RIO_CM_MPORT_GET_LIST: 1954 return cm_mport_get_list((void __user *)arg); 1955 default: 1956 break; 1957 } 1958 1959 return -EINVAL; 1960 } 1961 1962 static const struct file_operations riocm_cdev_fops = { 1963 .owner = THIS_MODULE, 1964 .open = riocm_cdev_open, 1965 .release = riocm_cdev_release, 1966 .unlocked_ioctl = riocm_cdev_ioctl, 1967 }; 1968 1969 /* 1970 * riocm_add_dev - add new remote RapidIO device into channel management core 1971 * @dev: device object associated with RapidIO device 1972 * @sif: subsystem interface 1973 * 1974 * Adds the specified RapidIO device (if applicable) into peers list of 1975 * the corresponding channel management device (cm_dev). 1976 */ 1977 static int riocm_add_dev(struct device *dev, struct subsys_interface *sif) 1978 { 1979 struct cm_peer *peer; 1980 struct rio_dev *rdev = to_rio_dev(dev); 1981 struct cm_dev *cm; 1982 1983 /* Check if the remote device has capabilities required to support CM */ 1984 if (!dev_cm_capable(rdev)) 1985 return 0; 1986 1987 riocm_debug(RDEV, "(%s)", rio_name(rdev)); 1988 1989 peer = kmalloc(sizeof(*peer), GFP_KERNEL); 1990 if (!peer) 1991 return -ENOMEM; 1992 1993 /* Find a corresponding cm_dev object */ 1994 down_write(&rdev_sem); 1995 list_for_each_entry(cm, &cm_dev_list, list) { 1996 if (cm->mport == rdev->net->hport) 1997 goto found; 1998 } 1999 2000 up_write(&rdev_sem); 2001 kfree(peer); 2002 return -ENODEV; 2003 2004 found: 2005 peer->rdev = rdev; 2006 list_add_tail(&peer->node, &cm->peers); 2007 cm->npeers++; 2008 2009 up_write(&rdev_sem); 2010 return 0; 2011 } 2012 2013 /* 2014 * riocm_remove_dev - remove remote RapidIO device from channel management core 2015 * @dev: device object associated with RapidIO device 2016 * @sif: subsystem interface 2017 * 2018 * Removes the specified RapidIO device (if applicable) from peers list of 2019 * the corresponding channel management device (cm_dev). 2020 */ 2021 static void riocm_remove_dev(struct device *dev, struct subsys_interface *sif) 2022 { 2023 struct rio_dev *rdev = to_rio_dev(dev); 2024 struct cm_dev *cm; 2025 struct cm_peer *peer; 2026 struct rio_channel *ch, *_c; 2027 unsigned int i; 2028 bool found = false; 2029 LIST_HEAD(list); 2030 2031 /* Check if the remote device has capabilities required to support CM */ 2032 if (!dev_cm_capable(rdev)) 2033 return; 2034 2035 riocm_debug(RDEV, "(%s)", rio_name(rdev)); 2036 2037 /* Find matching cm_dev object */ 2038 down_write(&rdev_sem); 2039 list_for_each_entry(cm, &cm_dev_list, list) { 2040 if (cm->mport == rdev->net->hport) { 2041 found = true; 2042 break; 2043 } 2044 } 2045 2046 if (!found) { 2047 up_write(&rdev_sem); 2048 return; 2049 } 2050 2051 /* Remove remote device from the list of peers */ 2052 found = false; 2053 list_for_each_entry(peer, &cm->peers, node) { 2054 if (peer->rdev == rdev) { 2055 riocm_debug(RDEV, "removing peer %s", rio_name(rdev)); 2056 found = true; 2057 list_del(&peer->node); 2058 cm->npeers--; 2059 kfree(peer); 2060 break; 2061 } 2062 } 2063 2064 up_write(&rdev_sem); 2065 2066 if (!found) 2067 return; 2068 2069 /* 2070 * Release channels associated with this peer 2071 */ 2072 2073 spin_lock_bh(&idr_lock); 2074 idr_for_each_entry(&ch_idr, ch, i) { 2075 if (ch && ch->rdev == rdev) { 2076 if (atomic_read(&rdev->state) != RIO_DEVICE_SHUTDOWN) 2077 riocm_exch(ch, RIO_CM_DISCONNECT); 2078 idr_remove(&ch_idr, ch->id); 2079 list_add(&ch->ch_node, &list); 2080 } 2081 } 2082 spin_unlock_bh(&idr_lock); 2083 2084 if (!list_empty(&list)) { 2085 list_for_each_entry_safe(ch, _c, &list, ch_node) { 2086 list_del(&ch->ch_node); 2087 riocm_ch_close(ch); 2088 } 2089 } 2090 } 2091 2092 /* 2093 * riocm_cdev_add() - Create rio_cm char device 2094 * @devno: device number assigned to device (MAJ + MIN) 2095 */ 2096 static int riocm_cdev_add(dev_t devno) 2097 { 2098 int ret; 2099 2100 cdev_init(&riocm_cdev.cdev, &riocm_cdev_fops); 2101 riocm_cdev.cdev.owner = THIS_MODULE; 2102 ret = cdev_add(&riocm_cdev.cdev, devno, 1); 2103 if (ret < 0) { 2104 riocm_error("Cannot register a device with error %d", ret); 2105 return ret; 2106 } 2107 2108 riocm_cdev.dev = device_create(dev_class, NULL, devno, NULL, DEV_NAME); 2109 if (IS_ERR(riocm_cdev.dev)) { 2110 cdev_del(&riocm_cdev.cdev); 2111 return PTR_ERR(riocm_cdev.dev); 2112 } 2113 2114 riocm_debug(MPORT, "Added %s cdev(%d:%d)", 2115 DEV_NAME, MAJOR(devno), MINOR(devno)); 2116 2117 return 0; 2118 } 2119 2120 /* 2121 * riocm_add_mport - add new local mport device into channel management core 2122 * @dev: device object associated with mport 2123 * @class_intf: class interface 2124 * 2125 * When a new mport device is added, CM immediately reserves inbound and 2126 * outbound RapidIO mailboxes that will be used. 2127 */ 2128 static int riocm_add_mport(struct device *dev, 2129 struct class_interface *class_intf) 2130 { 2131 int rc; 2132 int i; 2133 struct cm_dev *cm; 2134 struct rio_mport *mport = to_rio_mport(dev); 2135 2136 riocm_debug(MPORT, "add mport %s", mport->name); 2137 2138 cm = kzalloc(sizeof(*cm), GFP_KERNEL); 2139 if (!cm) 2140 return -ENOMEM; 2141 2142 cm->mport = mport; 2143 2144 rc = rio_request_outb_mbox(mport, cm, cmbox, 2145 RIOCM_TX_RING_SIZE, riocm_outb_msg_event); 2146 if (rc) { 2147 riocm_error("failed to allocate OBMBOX_%d on %s", 2148 cmbox, mport->name); 2149 kfree(cm); 2150 return -ENODEV; 2151 } 2152 2153 rc = rio_request_inb_mbox(mport, cm, cmbox, 2154 RIOCM_RX_RING_SIZE, riocm_inb_msg_event); 2155 if (rc) { 2156 riocm_error("failed to allocate IBMBOX_%d on %s", 2157 cmbox, mport->name); 2158 rio_release_outb_mbox(mport, cmbox); 2159 kfree(cm); 2160 return -ENODEV; 2161 } 2162 2163 /* 2164 * Allocate and register inbound messaging buffers to be ready 2165 * to receive channel and system management requests 2166 */ 2167 for (i = 0; i < RIOCM_RX_RING_SIZE; i++) 2168 cm->rx_buf[i] = NULL; 2169 2170 cm->rx_slots = RIOCM_RX_RING_SIZE; 2171 mutex_init(&cm->rx_lock); 2172 riocm_rx_fill(cm, RIOCM_RX_RING_SIZE); 2173 cm->rx_wq = create_workqueue(DRV_NAME "/rxq"); 2174 INIT_WORK(&cm->rx_work, rio_ibmsg_handler); 2175 2176 cm->tx_slot = 0; 2177 cm->tx_cnt = 0; 2178 cm->tx_ack_slot = 0; 2179 spin_lock_init(&cm->tx_lock); 2180 2181 INIT_LIST_HEAD(&cm->peers); 2182 cm->npeers = 0; 2183 INIT_LIST_HEAD(&cm->tx_reqs); 2184 2185 down_write(&rdev_sem); 2186 list_add_tail(&cm->list, &cm_dev_list); 2187 up_write(&rdev_sem); 2188 2189 return 0; 2190 } 2191 2192 /* 2193 * riocm_remove_mport - remove local mport device from channel management core 2194 * @dev: device object associated with mport 2195 * @class_intf: class interface 2196 * 2197 * Removes a local mport device from the list of registered devices that provide 2198 * channel management services. Returns an error if the specified mport is not 2199 * registered with the CM core. 2200 */ 2201 static void riocm_remove_mport(struct device *dev, 2202 struct class_interface *class_intf) 2203 { 2204 struct rio_mport *mport = to_rio_mport(dev); 2205 struct cm_dev *cm; 2206 struct cm_peer *peer, *temp; 2207 struct rio_channel *ch, *_c; 2208 unsigned int i; 2209 bool found = false; 2210 LIST_HEAD(list); 2211 2212 riocm_debug(MPORT, "%s", mport->name); 2213 2214 /* Find a matching cm_dev object */ 2215 down_write(&rdev_sem); 2216 list_for_each_entry(cm, &cm_dev_list, list) { 2217 if (cm->mport == mport) { 2218 list_del(&cm->list); 2219 found = true; 2220 break; 2221 } 2222 } 2223 up_write(&rdev_sem); 2224 if (!found) 2225 return; 2226 2227 flush_workqueue(cm->rx_wq); 2228 destroy_workqueue(cm->rx_wq); 2229 2230 /* Release channels bound to this mport */ 2231 spin_lock_bh(&idr_lock); 2232 idr_for_each_entry(&ch_idr, ch, i) { 2233 if (ch->cmdev == cm) { 2234 riocm_debug(RDEV, "%s drop ch_%d", 2235 mport->name, ch->id); 2236 idr_remove(&ch_idr, ch->id); 2237 list_add(&ch->ch_node, &list); 2238 } 2239 } 2240 spin_unlock_bh(&idr_lock); 2241 2242 if (!list_empty(&list)) { 2243 list_for_each_entry_safe(ch, _c, &list, ch_node) { 2244 list_del(&ch->ch_node); 2245 riocm_ch_close(ch); 2246 } 2247 } 2248 2249 rio_release_inb_mbox(mport, cmbox); 2250 rio_release_outb_mbox(mport, cmbox); 2251 2252 /* Remove and free peer entries */ 2253 if (!list_empty(&cm->peers)) 2254 riocm_debug(RDEV, "ATTN: peer list not empty"); 2255 list_for_each_entry_safe(peer, temp, &cm->peers, node) { 2256 riocm_debug(RDEV, "removing peer %s", rio_name(peer->rdev)); 2257 list_del(&peer->node); 2258 kfree(peer); 2259 } 2260 2261 riocm_rx_free(cm); 2262 kfree(cm); 2263 riocm_debug(MPORT, "%s done", mport->name); 2264 } 2265 2266 static int rio_cm_shutdown(struct notifier_block *nb, unsigned long code, 2267 void *unused) 2268 { 2269 struct rio_channel *ch; 2270 unsigned int i; 2271 2272 riocm_debug(EXIT, "."); 2273 2274 spin_lock_bh(&idr_lock); 2275 idr_for_each_entry(&ch_idr, ch, i) { 2276 riocm_debug(EXIT, "close ch %d", ch->id); 2277 if (ch->state == RIO_CM_CONNECTED) 2278 riocm_send_close(ch); 2279 } 2280 spin_unlock_bh(&idr_lock); 2281 2282 return NOTIFY_DONE; 2283 } 2284 2285 /* 2286 * riocm_interface handles addition/removal of remote RapidIO devices 2287 */ 2288 static struct subsys_interface riocm_interface = { 2289 .name = "rio_cm", 2290 .subsys = &rio_bus_type, 2291 .add_dev = riocm_add_dev, 2292 .remove_dev = riocm_remove_dev, 2293 }; 2294 2295 /* 2296 * rio_mport_interface handles addition/removal local mport devices 2297 */ 2298 static struct class_interface rio_mport_interface __refdata = { 2299 .class = &rio_mport_class, 2300 .add_dev = riocm_add_mport, 2301 .remove_dev = riocm_remove_mport, 2302 }; 2303 2304 static struct notifier_block rio_cm_notifier = { 2305 .notifier_call = rio_cm_shutdown, 2306 }; 2307 2308 static int __init riocm_init(void) 2309 { 2310 int ret; 2311 2312 /* Create device class needed by udev */ 2313 dev_class = class_create(THIS_MODULE, DRV_NAME); 2314 if (IS_ERR(dev_class)) { 2315 riocm_error("Cannot create " DRV_NAME " class"); 2316 return PTR_ERR(dev_class); 2317 } 2318 2319 ret = alloc_chrdev_region(&dev_number, 0, 1, DRV_NAME); 2320 if (ret) { 2321 class_destroy(dev_class); 2322 return ret; 2323 } 2324 2325 dev_major = MAJOR(dev_number); 2326 dev_minor_base = MINOR(dev_number); 2327 riocm_debug(INIT, "Registered class with %d major", dev_major); 2328 2329 /* 2330 * Register as rapidio_port class interface to get notifications about 2331 * mport additions and removals. 2332 */ 2333 ret = class_interface_register(&rio_mport_interface); 2334 if (ret) { 2335 riocm_error("class_interface_register error: %d", ret); 2336 goto err_reg; 2337 } 2338 2339 /* 2340 * Register as RapidIO bus interface to get notifications about 2341 * addition/removal of remote RapidIO devices. 2342 */ 2343 ret = subsys_interface_register(&riocm_interface); 2344 if (ret) { 2345 riocm_error("subsys_interface_register error: %d", ret); 2346 goto err_cl; 2347 } 2348 2349 ret = register_reboot_notifier(&rio_cm_notifier); 2350 if (ret) { 2351 riocm_error("failed to register reboot notifier (err=%d)", ret); 2352 goto err_sif; 2353 } 2354 2355 ret = riocm_cdev_add(dev_number); 2356 if (ret) { 2357 unregister_reboot_notifier(&rio_cm_notifier); 2358 ret = -ENODEV; 2359 goto err_sif; 2360 } 2361 2362 return 0; 2363 err_sif: 2364 subsys_interface_unregister(&riocm_interface); 2365 err_cl: 2366 class_interface_unregister(&rio_mport_interface); 2367 err_reg: 2368 unregister_chrdev_region(dev_number, 1); 2369 class_destroy(dev_class); 2370 return ret; 2371 } 2372 2373 static void __exit riocm_exit(void) 2374 { 2375 riocm_debug(EXIT, "enter"); 2376 unregister_reboot_notifier(&rio_cm_notifier); 2377 subsys_interface_unregister(&riocm_interface); 2378 class_interface_unregister(&rio_mport_interface); 2379 idr_destroy(&ch_idr); 2380 2381 device_unregister(riocm_cdev.dev); 2382 cdev_del(&(riocm_cdev.cdev)); 2383 2384 class_destroy(dev_class); 2385 unregister_chrdev_region(dev_number, 1); 2386 } 2387 2388 late_initcall(riocm_init); 2389 module_exit(riocm_exit); 2390 2391 2392 2393 2394 2395 /* LDV_COMMENT_BEGIN_MAIN */ 2396 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful 2397 2398 /*###########################################################################*/ 2399 2400 /*############## Driver Environment Generator 0.2 output ####################*/ 2401 2402 /*###########################################################################*/ 2403 2404 2405 2406 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */ 2407 void ldv_check_final_state(void); 2408 2409 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */ 2410 void ldv_check_return_value(int res); 2411 2412 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */ 2413 void ldv_check_return_value_probe(int res); 2414 2415 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */ 2416 void ldv_initialize(void); 2417 2418 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */ 2419 void ldv_handler_precall(void); 2420 2421 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */ 2422 int nondet_int(void); 2423 2424 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */ 2425 int LDV_IN_INTERRUPT; 2426 2427 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */ 2428 void ldv_main0_sequence_infinite_withcheck_stateful(void) { 2429 2430 2431 2432 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */ 2433 /*============================= VARIABLE DECLARATION PART =============================*/ 2434 /** STRUCT: struct type: file_operations, struct name: riocm_cdev_fops **/ 2435 /* content: static int riocm_cdev_open(struct inode *inode, struct file *filp)*/ 2436 /* LDV_COMMENT_BEGIN_PREP */ 2437 #define DRV_NAME "rio_cm" 2438 #define DRV_VERSION "1.0.0" 2439 #define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>" 2440 #define DRV_DESC "RapidIO Channelized Messaging Driver" 2441 #define DEV_NAME "rio_cm" 2442 #ifdef DEBUG 2443 #define riocm_debug(level, fmt, arg...) \ 2444 do { \ 2445 if (DBG_##level & dbg_level) \ 2446 pr_debug(DRV_NAME ": %s " fmt "\n", \ 2447 __func__, ##arg); \ 2448 } while (0) 2449 #else 2450 #define riocm_debug(level, fmt, arg...) \ 2451 no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg) 2452 #endif 2453 #define riocm_warn(fmt, arg...) \ 2454 pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg) 2455 #define riocm_error(fmt, arg...) \ 2456 pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg) 2457 #ifdef DEBUG 2458 #endif 2459 #define RIOCM_TX_RING_SIZE 128 2460 #define RIOCM_RX_RING_SIZE 128 2461 #define RIOCM_CONNECT_TO 3 2462 #define RIOCM_MAX_CHNUM 0xffff 2463 #define RIOCM_CHNUM_AUTO 0 2464 #define RIOCM_MAX_EP_COUNT 0x10000 2465 #define RIO_HDR_LETTER_MASK 0xffff0000 2466 #define RIO_HDR_MBOX_MASK 0x0000ffff 2467 #define is_msg_capable(src_ops, dst_ops) \ 2468 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \ 2469 (dst_ops & RIO_DST_OPS_DATA_MSG)) 2470 #define dev_cm_capable(dev) \ 2471 is_msg_capable(dev->src_ops, dev->dst_ops) 2472 /* LDV_COMMENT_END_PREP */ 2473 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "riocm_cdev_open" */ 2474 struct inode * var_group1; 2475 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "riocm_cdev_open" */ 2476 struct file * var_group2; 2477 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "riocm_cdev_open" */ 2478 static int res_riocm_cdev_open_32; 2479 /* content: static int riocm_cdev_release(struct inode *inode, struct file *filp)*/ 2480 /* LDV_COMMENT_BEGIN_PREP */ 2481 #define DRV_NAME "rio_cm" 2482 #define DRV_VERSION "1.0.0" 2483 #define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>" 2484 #define DRV_DESC "RapidIO Channelized Messaging Driver" 2485 #define DEV_NAME "rio_cm" 2486 #ifdef DEBUG 2487 #define riocm_debug(level, fmt, arg...) \ 2488 do { \ 2489 if (DBG_##level & dbg_level) \ 2490 pr_debug(DRV_NAME ": %s " fmt "\n", \ 2491 __func__, ##arg); \ 2492 } while (0) 2493 #else 2494 #define riocm_debug(level, fmt, arg...) \ 2495 no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg) 2496 #endif 2497 #define riocm_warn(fmt, arg...) \ 2498 pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg) 2499 #define riocm_error(fmt, arg...) \ 2500 pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg) 2501 #ifdef DEBUG 2502 #endif 2503 #define RIOCM_TX_RING_SIZE 128 2504 #define RIOCM_RX_RING_SIZE 128 2505 #define RIOCM_CONNECT_TO 3 2506 #define RIOCM_MAX_CHNUM 0xffff 2507 #define RIOCM_CHNUM_AUTO 0 2508 #define RIOCM_MAX_EP_COUNT 0x10000 2509 #define RIO_HDR_LETTER_MASK 0xffff0000 2510 #define RIO_HDR_MBOX_MASK 0x0000ffff 2511 #define is_msg_capable(src_ops, dst_ops) \ 2512 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \ 2513 (dst_ops & RIO_DST_OPS_DATA_MSG)) 2514 #define dev_cm_capable(dev) \ 2515 is_msg_capable(dev->src_ops, dev->dst_ops) 2516 /* LDV_COMMENT_END_PREP */ 2517 /* content: static long riocm_cdev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)*/ 2518 /* LDV_COMMENT_BEGIN_PREP */ 2519 #define DRV_NAME "rio_cm" 2520 #define DRV_VERSION "1.0.0" 2521 #define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>" 2522 #define DRV_DESC "RapidIO Channelized Messaging Driver" 2523 #define DEV_NAME "rio_cm" 2524 #ifdef DEBUG 2525 #define riocm_debug(level, fmt, arg...) \ 2526 do { \ 2527 if (DBG_##level & dbg_level) \ 2528 pr_debug(DRV_NAME ": %s " fmt "\n", \ 2529 __func__, ##arg); \ 2530 } while (0) 2531 #else 2532 #define riocm_debug(level, fmt, arg...) \ 2533 no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg) 2534 #endif 2535 #define riocm_warn(fmt, arg...) \ 2536 pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg) 2537 #define riocm_error(fmt, arg...) \ 2538 pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg) 2539 #ifdef DEBUG 2540 #endif 2541 #define RIOCM_TX_RING_SIZE 128 2542 #define RIOCM_RX_RING_SIZE 128 2543 #define RIOCM_CONNECT_TO 3 2544 #define RIOCM_MAX_CHNUM 0xffff 2545 #define RIOCM_CHNUM_AUTO 0 2546 #define RIOCM_MAX_EP_COUNT 0x10000 2547 #define RIO_HDR_LETTER_MASK 0xffff0000 2548 #define RIO_HDR_MBOX_MASK 0x0000ffff 2549 #define is_msg_capable(src_ops, dst_ops) \ 2550 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \ 2551 (dst_ops & RIO_DST_OPS_DATA_MSG)) 2552 #define dev_cm_capable(dev) \ 2553 is_msg_capable(dev->src_ops, dev->dst_ops) 2554 /* LDV_COMMENT_END_PREP */ 2555 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "riocm_cdev_ioctl" */ 2556 unsigned int var_riocm_cdev_ioctl_45_p1; 2557 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "riocm_cdev_ioctl" */ 2558 unsigned long var_riocm_cdev_ioctl_45_p2; 2559 2560 /** STRUCT: struct type: subsys_interface, struct name: riocm_interface **/ 2561 /* content: static int riocm_add_dev(struct device *dev, struct subsys_interface *sif)*/ 2562 /* LDV_COMMENT_BEGIN_PREP */ 2563 #define DRV_NAME "rio_cm" 2564 #define DRV_VERSION "1.0.0" 2565 #define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>" 2566 #define DRV_DESC "RapidIO Channelized Messaging Driver" 2567 #define DEV_NAME "rio_cm" 2568 #ifdef DEBUG 2569 #define riocm_debug(level, fmt, arg...) \ 2570 do { \ 2571 if (DBG_##level & dbg_level) \ 2572 pr_debug(DRV_NAME ": %s " fmt "\n", \ 2573 __func__, ##arg); \ 2574 } while (0) 2575 #else 2576 #define riocm_debug(level, fmt, arg...) \ 2577 no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg) 2578 #endif 2579 #define riocm_warn(fmt, arg...) \ 2580 pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg) 2581 #define riocm_error(fmt, arg...) \ 2582 pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg) 2583 #ifdef DEBUG 2584 #endif 2585 #define RIOCM_TX_RING_SIZE 128 2586 #define RIOCM_RX_RING_SIZE 128 2587 #define RIOCM_CONNECT_TO 3 2588 #define RIOCM_MAX_CHNUM 0xffff 2589 #define RIOCM_CHNUM_AUTO 0 2590 #define RIOCM_MAX_EP_COUNT 0x10000 2591 #define RIO_HDR_LETTER_MASK 0xffff0000 2592 #define RIO_HDR_MBOX_MASK 0x0000ffff 2593 #define is_msg_capable(src_ops, dst_ops) \ 2594 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \ 2595 (dst_ops & RIO_DST_OPS_DATA_MSG)) 2596 #define dev_cm_capable(dev) \ 2597 is_msg_capable(dev->src_ops, dev->dst_ops) 2598 /* LDV_COMMENT_END_PREP */ 2599 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "riocm_add_dev" */ 2600 struct device * var_group3; 2601 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "riocm_add_dev" */ 2602 struct subsys_interface * var_group4; 2603 /* content: static void riocm_remove_dev(struct device *dev, struct subsys_interface *sif)*/ 2604 /* LDV_COMMENT_BEGIN_PREP */ 2605 #define DRV_NAME "rio_cm" 2606 #define DRV_VERSION "1.0.0" 2607 #define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>" 2608 #define DRV_DESC "RapidIO Channelized Messaging Driver" 2609 #define DEV_NAME "rio_cm" 2610 #ifdef DEBUG 2611 #define riocm_debug(level, fmt, arg...) \ 2612 do { \ 2613 if (DBG_##level & dbg_level) \ 2614 pr_debug(DRV_NAME ": %s " fmt "\n", \ 2615 __func__, ##arg); \ 2616 } while (0) 2617 #else 2618 #define riocm_debug(level, fmt, arg...) \ 2619 no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg) 2620 #endif 2621 #define riocm_warn(fmt, arg...) \ 2622 pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg) 2623 #define riocm_error(fmt, arg...) \ 2624 pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg) 2625 #ifdef DEBUG 2626 #endif 2627 #define RIOCM_TX_RING_SIZE 128 2628 #define RIOCM_RX_RING_SIZE 128 2629 #define RIOCM_CONNECT_TO 3 2630 #define RIOCM_MAX_CHNUM 0xffff 2631 #define RIOCM_CHNUM_AUTO 0 2632 #define RIOCM_MAX_EP_COUNT 0x10000 2633 #define RIO_HDR_LETTER_MASK 0xffff0000 2634 #define RIO_HDR_MBOX_MASK 0x0000ffff 2635 #define is_msg_capable(src_ops, dst_ops) \ 2636 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \ 2637 (dst_ops & RIO_DST_OPS_DATA_MSG)) 2638 #define dev_cm_capable(dev) \ 2639 is_msg_capable(dev->src_ops, dev->dst_ops) 2640 /* LDV_COMMENT_END_PREP */ 2641 2642 /** STRUCT: struct type: notifier_block, struct name: rio_cm_notifier **/ 2643 /* content: static int rio_cm_shutdown(struct notifier_block *nb, unsigned long code, void *unused)*/ 2644 /* LDV_COMMENT_BEGIN_PREP */ 2645 #define DRV_NAME "rio_cm" 2646 #define DRV_VERSION "1.0.0" 2647 #define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>" 2648 #define DRV_DESC "RapidIO Channelized Messaging Driver" 2649 #define DEV_NAME "rio_cm" 2650 #ifdef DEBUG 2651 #define riocm_debug(level, fmt, arg...) \ 2652 do { \ 2653 if (DBG_##level & dbg_level) \ 2654 pr_debug(DRV_NAME ": %s " fmt "\n", \ 2655 __func__, ##arg); \ 2656 } while (0) 2657 #else 2658 #define riocm_debug(level, fmt, arg...) \ 2659 no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg) 2660 #endif 2661 #define riocm_warn(fmt, arg...) \ 2662 pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg) 2663 #define riocm_error(fmt, arg...) \ 2664 pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg) 2665 #ifdef DEBUG 2666 #endif 2667 #define RIOCM_TX_RING_SIZE 128 2668 #define RIOCM_RX_RING_SIZE 128 2669 #define RIOCM_CONNECT_TO 3 2670 #define RIOCM_MAX_CHNUM 0xffff 2671 #define RIOCM_CHNUM_AUTO 0 2672 #define RIOCM_MAX_EP_COUNT 0x10000 2673 #define RIO_HDR_LETTER_MASK 0xffff0000 2674 #define RIO_HDR_MBOX_MASK 0x0000ffff 2675 #define is_msg_capable(src_ops, dst_ops) \ 2676 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \ 2677 (dst_ops & RIO_DST_OPS_DATA_MSG)) 2678 #define dev_cm_capable(dev) \ 2679 is_msg_capable(dev->src_ops, dev->dst_ops) 2680 /* LDV_COMMENT_END_PREP */ 2681 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "rio_cm_shutdown" */ 2682 struct notifier_block * var_group5; 2683 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "rio_cm_shutdown" */ 2684 unsigned long var_rio_cm_shutdown_51_p1; 2685 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "rio_cm_shutdown" */ 2686 void * var_rio_cm_shutdown_51_p2; 2687 2688 2689 2690 2691 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */ 2692 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */ 2693 /*============================= VARIABLE INITIALIZING PART =============================*/ 2694 LDV_IN_INTERRUPT=1; 2695 2696 2697 2698 2699 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */ 2700 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */ 2701 /*============================= FUNCTION CALL SECTION =============================*/ 2702 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */ 2703 ldv_initialize(); 2704 int ldv_s_riocm_cdev_fops_file_operations = 0; 2705 2706 2707 2708 2709 2710 2711 2712 while( nondet_int() 2713 || !(ldv_s_riocm_cdev_fops_file_operations == 0) 2714 ) { 2715 2716 switch(nondet_int()) { 2717 2718 case 0: { 2719 2720 /** STRUCT: struct type: file_operations, struct name: riocm_cdev_fops **/ 2721 if(ldv_s_riocm_cdev_fops_file_operations==0) { 2722 2723 /* content: static int riocm_cdev_open(struct inode *inode, struct file *filp)*/ 2724 /* LDV_COMMENT_BEGIN_PREP */ 2725 #define DRV_NAME "rio_cm" 2726 #define DRV_VERSION "1.0.0" 2727 #define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>" 2728 #define DRV_DESC "RapidIO Channelized Messaging Driver" 2729 #define DEV_NAME "rio_cm" 2730 #ifdef DEBUG 2731 #define riocm_debug(level, fmt, arg...) \ 2732 do { \ 2733 if (DBG_##level & dbg_level) \ 2734 pr_debug(DRV_NAME ": %s " fmt "\n", \ 2735 __func__, ##arg); \ 2736 } while (0) 2737 #else 2738 #define riocm_debug(level, fmt, arg...) \ 2739 no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg) 2740 #endif 2741 #define riocm_warn(fmt, arg...) \ 2742 pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg) 2743 #define riocm_error(fmt, arg...) \ 2744 pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg) 2745 #ifdef DEBUG 2746 #endif 2747 #define RIOCM_TX_RING_SIZE 128 2748 #define RIOCM_RX_RING_SIZE 128 2749 #define RIOCM_CONNECT_TO 3 2750 #define RIOCM_MAX_CHNUM 0xffff 2751 #define RIOCM_CHNUM_AUTO 0 2752 #define RIOCM_MAX_EP_COUNT 0x10000 2753 #define RIO_HDR_LETTER_MASK 0xffff0000 2754 #define RIO_HDR_MBOX_MASK 0x0000ffff 2755 #define is_msg_capable(src_ops, dst_ops) \ 2756 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \ 2757 (dst_ops & RIO_DST_OPS_DATA_MSG)) 2758 #define dev_cm_capable(dev) \ 2759 is_msg_capable(dev->src_ops, dev->dst_ops) 2760 /* LDV_COMMENT_END_PREP */ 2761 /* LDV_COMMENT_FUNCTION_CALL Function from field "open" from driver structure with callbacks "riocm_cdev_fops". Standart function test for correct return result. */ 2762 ldv_handler_precall(); 2763 res_riocm_cdev_open_32 = riocm_cdev_open( var_group1, var_group2); 2764 ldv_check_return_value(res_riocm_cdev_open_32); 2765 if(res_riocm_cdev_open_32) 2766 goto ldv_module_exit; 2767 ldv_s_riocm_cdev_fops_file_operations++; 2768 2769 } 2770 2771 } 2772 2773 break; 2774 case 1: { 2775 2776 /** STRUCT: struct type: file_operations, struct name: riocm_cdev_fops **/ 2777 if(ldv_s_riocm_cdev_fops_file_operations==1) { 2778 2779 /* content: static int riocm_cdev_release(struct inode *inode, struct file *filp)*/ 2780 /* LDV_COMMENT_BEGIN_PREP */ 2781 #define DRV_NAME "rio_cm" 2782 #define DRV_VERSION "1.0.0" 2783 #define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>" 2784 #define DRV_DESC "RapidIO Channelized Messaging Driver" 2785 #define DEV_NAME "rio_cm" 2786 #ifdef DEBUG 2787 #define riocm_debug(level, fmt, arg...) \ 2788 do { \ 2789 if (DBG_##level & dbg_level) \ 2790 pr_debug(DRV_NAME ": %s " fmt "\n", \ 2791 __func__, ##arg); \ 2792 } while (0) 2793 #else 2794 #define riocm_debug(level, fmt, arg...) \ 2795 no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg) 2796 #endif 2797 #define riocm_warn(fmt, arg...) \ 2798 pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg) 2799 #define riocm_error(fmt, arg...) \ 2800 pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg) 2801 #ifdef DEBUG 2802 #endif 2803 #define RIOCM_TX_RING_SIZE 128 2804 #define RIOCM_RX_RING_SIZE 128 2805 #define RIOCM_CONNECT_TO 3 2806 #define RIOCM_MAX_CHNUM 0xffff 2807 #define RIOCM_CHNUM_AUTO 0 2808 #define RIOCM_MAX_EP_COUNT 0x10000 2809 #define RIO_HDR_LETTER_MASK 0xffff0000 2810 #define RIO_HDR_MBOX_MASK 0x0000ffff 2811 #define is_msg_capable(src_ops, dst_ops) \ 2812 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \ 2813 (dst_ops & RIO_DST_OPS_DATA_MSG)) 2814 #define dev_cm_capable(dev) \ 2815 is_msg_capable(dev->src_ops, dev->dst_ops) 2816 /* LDV_COMMENT_END_PREP */ 2817 /* LDV_COMMENT_FUNCTION_CALL Function from field "release" from driver structure with callbacks "riocm_cdev_fops" */ 2818 ldv_handler_precall(); 2819 riocm_cdev_release( var_group1, var_group2); 2820 ldv_s_riocm_cdev_fops_file_operations=0; 2821 2822 } 2823 2824 } 2825 2826 break; 2827 case 2: { 2828 2829 /** STRUCT: struct type: file_operations, struct name: riocm_cdev_fops **/ 2830 2831 2832 /* content: static long riocm_cdev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)*/ 2833 /* LDV_COMMENT_BEGIN_PREP */ 2834 #define DRV_NAME "rio_cm" 2835 #define DRV_VERSION "1.0.0" 2836 #define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>" 2837 #define DRV_DESC "RapidIO Channelized Messaging Driver" 2838 #define DEV_NAME "rio_cm" 2839 #ifdef DEBUG 2840 #define riocm_debug(level, fmt, arg...) \ 2841 do { \ 2842 if (DBG_##level & dbg_level) \ 2843 pr_debug(DRV_NAME ": %s " fmt "\n", \ 2844 __func__, ##arg); \ 2845 } while (0) 2846 #else 2847 #define riocm_debug(level, fmt, arg...) \ 2848 no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg) 2849 #endif 2850 #define riocm_warn(fmt, arg...) \ 2851 pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg) 2852 #define riocm_error(fmt, arg...) \ 2853 pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg) 2854 #ifdef DEBUG 2855 #endif 2856 #define RIOCM_TX_RING_SIZE 128 2857 #define RIOCM_RX_RING_SIZE 128 2858 #define RIOCM_CONNECT_TO 3 2859 #define RIOCM_MAX_CHNUM 0xffff 2860 #define RIOCM_CHNUM_AUTO 0 2861 #define RIOCM_MAX_EP_COUNT 0x10000 2862 #define RIO_HDR_LETTER_MASK 0xffff0000 2863 #define RIO_HDR_MBOX_MASK 0x0000ffff 2864 #define is_msg_capable(src_ops, dst_ops) \ 2865 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \ 2866 (dst_ops & RIO_DST_OPS_DATA_MSG)) 2867 #define dev_cm_capable(dev) \ 2868 is_msg_capable(dev->src_ops, dev->dst_ops) 2869 /* LDV_COMMENT_END_PREP */ 2870 /* LDV_COMMENT_FUNCTION_CALL Function from field "unlocked_ioctl" from driver structure with callbacks "riocm_cdev_fops" */ 2871 ldv_handler_precall(); 2872 riocm_cdev_ioctl( var_group2, var_riocm_cdev_ioctl_45_p1, var_riocm_cdev_ioctl_45_p2); 2873 2874 2875 2876 2877 } 2878 2879 break; 2880 case 3: { 2881 2882 /** STRUCT: struct type: subsys_interface, struct name: riocm_interface **/ 2883 2884 2885 /* content: static int riocm_add_dev(struct device *dev, struct subsys_interface *sif)*/ 2886 /* LDV_COMMENT_BEGIN_PREP */ 2887 #define DRV_NAME "rio_cm" 2888 #define DRV_VERSION "1.0.0" 2889 #define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>" 2890 #define DRV_DESC "RapidIO Channelized Messaging Driver" 2891 #define DEV_NAME "rio_cm" 2892 #ifdef DEBUG 2893 #define riocm_debug(level, fmt, arg...) \ 2894 do { \ 2895 if (DBG_##level & dbg_level) \ 2896 pr_debug(DRV_NAME ": %s " fmt "\n", \ 2897 __func__, ##arg); \ 2898 } while (0) 2899 #else 2900 #define riocm_debug(level, fmt, arg...) \ 2901 no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg) 2902 #endif 2903 #define riocm_warn(fmt, arg...) \ 2904 pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg) 2905 #define riocm_error(fmt, arg...) \ 2906 pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg) 2907 #ifdef DEBUG 2908 #endif 2909 #define RIOCM_TX_RING_SIZE 128 2910 #define RIOCM_RX_RING_SIZE 128 2911 #define RIOCM_CONNECT_TO 3 2912 #define RIOCM_MAX_CHNUM 0xffff 2913 #define RIOCM_CHNUM_AUTO 0 2914 #define RIOCM_MAX_EP_COUNT 0x10000 2915 #define RIO_HDR_LETTER_MASK 0xffff0000 2916 #define RIO_HDR_MBOX_MASK 0x0000ffff 2917 #define is_msg_capable(src_ops, dst_ops) \ 2918 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \ 2919 (dst_ops & RIO_DST_OPS_DATA_MSG)) 2920 #define dev_cm_capable(dev) \ 2921 is_msg_capable(dev->src_ops, dev->dst_ops) 2922 /* LDV_COMMENT_END_PREP */ 2923 /* LDV_COMMENT_FUNCTION_CALL Function from field "add_dev" from driver structure with callbacks "riocm_interface" */ 2924 ldv_handler_precall(); 2925 riocm_add_dev( var_group3, var_group4); 2926 2927 2928 2929 2930 } 2931 2932 break; 2933 case 4: { 2934 2935 /** STRUCT: struct type: subsys_interface, struct name: riocm_interface **/ 2936 2937 2938 /* content: static void riocm_remove_dev(struct device *dev, struct subsys_interface *sif)*/ 2939 /* LDV_COMMENT_BEGIN_PREP */ 2940 #define DRV_NAME "rio_cm" 2941 #define DRV_VERSION "1.0.0" 2942 #define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>" 2943 #define DRV_DESC "RapidIO Channelized Messaging Driver" 2944 #define DEV_NAME "rio_cm" 2945 #ifdef DEBUG 2946 #define riocm_debug(level, fmt, arg...) \ 2947 do { \ 2948 if (DBG_##level & dbg_level) \ 2949 pr_debug(DRV_NAME ": %s " fmt "\n", \ 2950 __func__, ##arg); \ 2951 } while (0) 2952 #else 2953 #define riocm_debug(level, fmt, arg...) \ 2954 no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg) 2955 #endif 2956 #define riocm_warn(fmt, arg...) \ 2957 pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg) 2958 #define riocm_error(fmt, arg...) \ 2959 pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg) 2960 #ifdef DEBUG 2961 #endif 2962 #define RIOCM_TX_RING_SIZE 128 2963 #define RIOCM_RX_RING_SIZE 128 2964 #define RIOCM_CONNECT_TO 3 2965 #define RIOCM_MAX_CHNUM 0xffff 2966 #define RIOCM_CHNUM_AUTO 0 2967 #define RIOCM_MAX_EP_COUNT 0x10000 2968 #define RIO_HDR_LETTER_MASK 0xffff0000 2969 #define RIO_HDR_MBOX_MASK 0x0000ffff 2970 #define is_msg_capable(src_ops, dst_ops) \ 2971 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \ 2972 (dst_ops & RIO_DST_OPS_DATA_MSG)) 2973 #define dev_cm_capable(dev) \ 2974 is_msg_capable(dev->src_ops, dev->dst_ops) 2975 /* LDV_COMMENT_END_PREP */ 2976 /* LDV_COMMENT_FUNCTION_CALL Function from field "remove_dev" from driver structure with callbacks "riocm_interface" */ 2977 ldv_handler_precall(); 2978 riocm_remove_dev( var_group3, var_group4); 2979 2980 2981 2982 2983 } 2984 2985 break; 2986 case 5: { 2987 2988 /** STRUCT: struct type: notifier_block, struct name: rio_cm_notifier **/ 2989 2990 2991 /* content: static int rio_cm_shutdown(struct notifier_block *nb, unsigned long code, void *unused)*/ 2992 /* LDV_COMMENT_BEGIN_PREP */ 2993 #define DRV_NAME "rio_cm" 2994 #define DRV_VERSION "1.0.0" 2995 #define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>" 2996 #define DRV_DESC "RapidIO Channelized Messaging Driver" 2997 #define DEV_NAME "rio_cm" 2998 #ifdef DEBUG 2999 #define riocm_debug(level, fmt, arg...) \ 3000 do { \ 3001 if (DBG_##level & dbg_level) \ 3002 pr_debug(DRV_NAME ": %s " fmt "\n", \ 3003 __func__, ##arg); \ 3004 } while (0) 3005 #else 3006 #define riocm_debug(level, fmt, arg...) \ 3007 no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg) 3008 #endif 3009 #define riocm_warn(fmt, arg...) \ 3010 pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg) 3011 #define riocm_error(fmt, arg...) \ 3012 pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg) 3013 #ifdef DEBUG 3014 #endif 3015 #define RIOCM_TX_RING_SIZE 128 3016 #define RIOCM_RX_RING_SIZE 128 3017 #define RIOCM_CONNECT_TO 3 3018 #define RIOCM_MAX_CHNUM 0xffff 3019 #define RIOCM_CHNUM_AUTO 0 3020 #define RIOCM_MAX_EP_COUNT 0x10000 3021 #define RIO_HDR_LETTER_MASK 0xffff0000 3022 #define RIO_HDR_MBOX_MASK 0x0000ffff 3023 #define is_msg_capable(src_ops, dst_ops) \ 3024 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \ 3025 (dst_ops & RIO_DST_OPS_DATA_MSG)) 3026 #define dev_cm_capable(dev) \ 3027 is_msg_capable(dev->src_ops, dev->dst_ops) 3028 /* LDV_COMMENT_END_PREP */ 3029 /* LDV_COMMENT_FUNCTION_CALL Function from field "notifier_call" from driver structure with callbacks "rio_cm_notifier" */ 3030 ldv_handler_precall(); 3031 rio_cm_shutdown( var_group5, var_rio_cm_shutdown_51_p1, var_rio_cm_shutdown_51_p2); 3032 3033 3034 3035 3036 } 3037 3038 break; 3039 default: break; 3040 3041 } 3042 3043 } 3044 3045 ldv_module_exit: 3046 3047 /** INIT: init_type: ST_MODULE_EXIT **/ 3048 /* content: static void __exit riocm_exit(void)*/ 3049 /* LDV_COMMENT_BEGIN_PREP */ 3050 #define DRV_NAME "rio_cm" 3051 #define DRV_VERSION "1.0.0" 3052 #define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>" 3053 #define DRV_DESC "RapidIO Channelized Messaging Driver" 3054 #define DEV_NAME "rio_cm" 3055 #ifdef DEBUG 3056 #define riocm_debug(level, fmt, arg...) \ 3057 do { \ 3058 if (DBG_##level & dbg_level) \ 3059 pr_debug(DRV_NAME ": %s " fmt "\n", \ 3060 __func__, ##arg); \ 3061 } while (0) 3062 #else 3063 #define riocm_debug(level, fmt, arg...) \ 3064 no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg) 3065 #endif 3066 #define riocm_warn(fmt, arg...) \ 3067 pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg) 3068 #define riocm_error(fmt, arg...) \ 3069 pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg) 3070 #ifdef DEBUG 3071 #endif 3072 #define RIOCM_TX_RING_SIZE 128 3073 #define RIOCM_RX_RING_SIZE 128 3074 #define RIOCM_CONNECT_TO 3 3075 #define RIOCM_MAX_CHNUM 0xffff 3076 #define RIOCM_CHNUM_AUTO 0 3077 #define RIOCM_MAX_EP_COUNT 0x10000 3078 #define RIO_HDR_LETTER_MASK 0xffff0000 3079 #define RIO_HDR_MBOX_MASK 0x0000ffff 3080 #define is_msg_capable(src_ops, dst_ops) \ 3081 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \ 3082 (dst_ops & RIO_DST_OPS_DATA_MSG)) 3083 #define dev_cm_capable(dev) \ 3084 is_msg_capable(dev->src_ops, dev->dst_ops) 3085 /* LDV_COMMENT_END_PREP */ 3086 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver release function before driver will be uploaded from kernel. This function declared as "MODULE_EXIT(function name)". */ 3087 ldv_handler_precall(); 3088 riocm_exit(); 3089 3090 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */ 3091 ldv_final: ldv_check_final_state(); 3092 3093 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */ 3094 return; 3095 3096 } 3097 #endif 3098 3099 /* LDV_COMMENT_END_MAIN */ 3100 3101 #line 22 "/home/ubuntu/launches/work/current--X--drivers--X--defaultlinux-4.8-rc1.tar.xz--X--43_1a--X--cpachecker/linux-4.8-rc1.tar.xz/csd_deg_dscv/7763/dscv_tempdir/dscv/ri/43_1a/drivers/rapidio/rio_cm.o.c.prepared"
1 2 3 /* Here is the definition of CHECK_WAIT_FLAGS(flags) macro. */ 4 #include "include/gfp.h" 5 #include <linux/gfp.h> 6 #include <verifier/rcv.h> 7 #include <kernel-model/ERR.inc> 8 9 #define LDV_ZERO_STATE 0 10 11 12 /* There are 2 possible states of spin lock. */ 13 enum { 14 LDV_SPIN_UNLOCKED = LDV_ZERO_STATE, /* Spin isn't locked. */ 15 LDV_SPIN_LOCKED /* Spin is locked. */ 16 }; 17 18 19 /* Spin isn't locked at the beginning. */ 20 int ldv_spin = LDV_SPIN_UNLOCKED; 21 22 23 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_check_alloc_flags') Check that a memory allocating function was called with a correct value of flags in spin locking. */ 24 void ldv_check_alloc_flags(gfp_t flags) 25 { 26 /* LDV_COMMENT_ASSERT If spin is locked (ldv_spin != LDV_SPIN_UNLOCKED) then a memory allocating function should be called with __GFP_WAIT flag unset (GFP_ATOMIC or GFP_NOWAIT). */ 27 ldv_assert(ldv_spin == LDV_SPIN_UNLOCKED || CHECK_WAIT_FLAGS(flags)); 28 } 29 30 extern struct page *ldv_some_page(void); 31 32 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_check_alloc_flags_and_return_some_page') Check that a memory allocating function was called with a correct value of flags in spin locking. */ 33 struct page *ldv_check_alloc_flags_and_return_some_page(gfp_t flags) 34 { 35 /* LDV_COMMENT_ASSERT If spin is locked (ldv_spin != LDV_SPIN_UNLOCKED) then a memory allocating function should be called with __GFP_WAIT flag unset (GFP_ATOMIC or GFP_NOWAIT). */ 36 ldv_assert(ldv_spin == LDV_SPIN_UNLOCKED || CHECK_WAIT_FLAGS(flags)); 37 /* LDV_COMMENT_RETURN Return a page pointer (maybe NULL). */ 38 return ldv_some_page(); 39 } 40 41 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_check_alloc_nonatomic') Check that a memory allocating function was not calledin spin locking. */ 42 void ldv_check_alloc_nonatomic(void) 43 { 44 /* LDV_COMMENT_ASSERT If spin is locked (ldv_spin != LDV_SPIN_UNLOCKED) then the memory allocating function should be called, because it implicitly uses GFP_KERNEL flag. */ 45 ldv_assert(ldv_spin == LDV_SPIN_UNLOCKED); 46 } 47 48 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock') Lock spin. */ 49 void ldv_spin_lock(void) 50 { 51 /* LDV_COMMENT_CHANGE_STATE Lock spin. */ 52 ldv_spin = LDV_SPIN_LOCKED; 53 } 54 55 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock') Unlock spin. */ 56 void ldv_spin_unlock(void) 57 { 58 /* LDV_COMMENT_CHANGE_STATE Unlock spin. */ 59 ldv_spin = LDV_SPIN_UNLOCKED; 60 } 61 62 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock') Try to lock spin. It should return 0 if spin wasn't locked. */ 63 int ldv_spin_trylock(void) 64 { 65 int is_lock; 66 67 /* LDV_COMMENT_OTHER Do this to make nondetermined choice. */ 68 is_lock = ldv_undef_int(); 69 70 if (is_lock) 71 { 72 /* LDV_COMMENT_RETURN Don't lock spin and return 0. */ 73 return 0; 74 } 75 else 76 { 77 /* LDV_COMMENT_CHANGE_STATE Lock spin. */ 78 ldv_spin = LDV_SPIN_LOCKED; 79 /* LDV_COMMENT_RETURN Return 1 since spin was locked. */ 80 return 1; 81 } 82 }
1 #ifndef _LDV_RCV_H_ 2 #define _LDV_RCV_H_ 3 4 /* If expr evaluates to zero, ldv_assert() causes a program to reach the error 5 label like the standard assert(). */ 6 #define ldv_assert(expr) ((expr) ? 0 : ldv_error()) 7 8 /* The error label wrapper. It is used because of some static verifiers (like 9 BLAST) don't accept multiple error labels through a program. */ 10 static inline void ldv_error(void) 11 { 12 LDV_ERROR: goto LDV_ERROR; 13 } 14 15 /* If expr evaluates to zero, ldv_assume() causes an infinite loop that is 16 avoided by verifiers. */ 17 #define ldv_assume(expr) ((expr) ? 0 : ldv_stop()) 18 19 /* Infinite loop, that causes verifiers to skip such paths. */ 20 static inline void ldv_stop(void) { 21 LDV_STOP: goto LDV_STOP; 22 } 23 24 /* Special nondeterministic functions. */ 25 int ldv_undef_int(void); 26 void *ldv_undef_ptr(void); 27 unsigned long ldv_undef_ulong(void); 28 long ldv_undef_long(void); 29 /* Return nondeterministic negative integer number. */ 30 static inline int ldv_undef_int_negative(void) 31 { 32 int ret = ldv_undef_int(); 33 34 ldv_assume(ret < 0); 35 36 return ret; 37 } 38 /* Return nondeterministic nonpositive integer number. */ 39 static inline int ldv_undef_int_nonpositive(void) 40 { 41 int ret = ldv_undef_int(); 42 43 ldv_assume(ret <= 0); 44 45 return ret; 46 } 47 48 /* Add explicit model for __builin_expect GCC function. Without the model a 49 return value will be treated as nondetermined by verifiers. */ 50 static inline long __builtin_expect(long exp, long c) 51 { 52 return exp; 53 } 54 55 /* This function causes the program to exit abnormally. GCC implements this 56 function by using a target-dependent mechanism (such as intentionally executing 57 an illegal instruction) or by calling abort. The mechanism used may vary from 58 release to release so you should not rely on any particular implementation. 59 http://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html */ 60 static inline void __builtin_trap(void) 61 { 62 ldv_assert(0); 63 } 64 65 /* The constant is for simulating an error of ldv_undef_ptr() function. */ 66 #define LDV_PTR_MAX 2012 67 68 #endif /* _LDV_RCV_H_ */
1 #ifndef __LINUX_SPINLOCK_H 2 #define __LINUX_SPINLOCK_H 3 4 /* 5 * include/linux/spinlock.h - generic spinlock/rwlock declarations 6 * 7 * here's the role of the various spinlock/rwlock related include files: 8 * 9 * on SMP builds: 10 * 11 * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the 12 * initializers 13 * 14 * linux/spinlock_types.h: 15 * defines the generic type and initializers 16 * 17 * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel 18 * implementations, mostly inline assembly code 19 * 20 * (also included on UP-debug builds:) 21 * 22 * linux/spinlock_api_smp.h: 23 * contains the prototypes for the _spin_*() APIs. 24 * 25 * linux/spinlock.h: builds the final spin_*() APIs. 26 * 27 * on UP builds: 28 * 29 * linux/spinlock_type_up.h: 30 * contains the generic, simplified UP spinlock type. 31 * (which is an empty structure on non-debug builds) 32 * 33 * linux/spinlock_types.h: 34 * defines the generic type and initializers 35 * 36 * linux/spinlock_up.h: 37 * contains the arch_spin_*()/etc. version of UP 38 * builds. (which are NOPs on non-debug, non-preempt 39 * builds) 40 * 41 * (included on UP-non-debug builds:) 42 * 43 * linux/spinlock_api_up.h: 44 * builds the _spin_*() APIs. 45 * 46 * linux/spinlock.h: builds the final spin_*() APIs. 47 */ 48 49 #include <linux/typecheck.h> 50 #include <linux/preempt.h> 51 #include <linux/linkage.h> 52 #include <linux/compiler.h> 53 #include <linux/irqflags.h> 54 #include <linux/thread_info.h> 55 #include <linux/kernel.h> 56 #include <linux/stringify.h> 57 #include <linux/bottom_half.h> 58 #include <asm/barrier.h> 59 60 61 /* 62 * Must define these before including other files, inline functions need them 63 */ 64 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME 65 66 #define LOCK_SECTION_START(extra) \ 67 ".subsection 1\n\t" \ 68 extra \ 69 ".ifndef " LOCK_SECTION_NAME "\n\t" \ 70 LOCK_SECTION_NAME ":\n\t" \ 71 ".endif\n" 72 73 #define LOCK_SECTION_END \ 74 ".previous\n\t" 75 76 #define __lockfunc __attribute__((section(".spinlock.text"))) 77 78 /* 79 * Pull the arch_spinlock_t and arch_rwlock_t definitions: 80 */ 81 #include <linux/spinlock_types.h> 82 83 /* 84 * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them): 85 */ 86 #ifdef CONFIG_SMP 87 # include <asm/spinlock.h> 88 #else 89 # include <linux/spinlock_up.h> 90 #endif 91 92 #ifdef CONFIG_DEBUG_SPINLOCK 93 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, 94 struct lock_class_key *key); 95 # define raw_spin_lock_init(lock) \ 96 do { \ 97 static struct lock_class_key __key; \ 98 \ 99 __raw_spin_lock_init((lock), #lock, &__key); \ 100 } while (0) 101 102 #else 103 # define raw_spin_lock_init(lock) \ 104 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0) 105 #endif 106 107 #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) 108 109 #ifdef CONFIG_GENERIC_LOCKBREAK 110 #define raw_spin_is_contended(lock) ((lock)->break_lock) 111 #else 112 113 #ifdef arch_spin_is_contended 114 #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) 115 #else 116 #define raw_spin_is_contended(lock) (((void)(lock), 0)) 117 #endif /*arch_spin_is_contended*/ 118 #endif 119 120 /* 121 * Despite its name it doesn't necessarily has to be a full barrier. 122 * It should only guarantee that a STORE before the critical section 123 * can not be reordered with LOADs and STOREs inside this section. 124 * spin_lock() is the one-way barrier, this LOAD can not escape out 125 * of the region. So the default implementation simply ensures that 126 * a STORE can not move into the critical section, smp_wmb() should 127 * serialize it with another STORE done by spin_lock(). 128 */ 129 #ifndef smp_mb__before_spinlock 130 #define smp_mb__before_spinlock() smp_wmb() 131 #endif 132 133 /** 134 * raw_spin_unlock_wait - wait until the spinlock gets unlocked 135 * @lock: the spinlock in question. 136 */ 137 #define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock) 138 139 #ifdef CONFIG_DEBUG_SPINLOCK 140 extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); 141 #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock) 142 extern int do_raw_spin_trylock(raw_spinlock_t *lock); 143 extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); 144 #else 145 static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock) 146 { 147 __acquire(lock); 148 arch_spin_lock(&lock->raw_lock); 149 } 150 151 static inline void 152 do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock) 153 { 154 __acquire(lock); 155 arch_spin_lock_flags(&lock->raw_lock, *flags); 156 } 157 158 static inline int do_raw_spin_trylock(raw_spinlock_t *lock) 159 { 160 return arch_spin_trylock(&(lock)->raw_lock); 161 } 162 163 static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) 164 { 165 arch_spin_unlock(&lock->raw_lock); 166 __release(lock); 167 } 168 #endif 169 170 /* 171 * Define the various spin_lock methods. Note we define these 172 * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The 173 * various methods are defined as nops in the case they are not 174 * required. 175 */ 176 #define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock)) 177 178 #define raw_spin_lock(lock) _raw_spin_lock(lock) 179 180 #ifdef CONFIG_DEBUG_LOCK_ALLOC 181 # define raw_spin_lock_nested(lock, subclass) \ 182 _raw_spin_lock_nested(lock, subclass) 183 # define raw_spin_lock_bh_nested(lock, subclass) \ 184 _raw_spin_lock_bh_nested(lock, subclass) 185 186 # define raw_spin_lock_nest_lock(lock, nest_lock) \ 187 do { \ 188 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ 189 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ 190 } while (0) 191 #else 192 /* 193 * Always evaluate the 'subclass' argument to avoid that the compiler 194 * warns about set-but-not-used variables when building with 195 * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1. 196 */ 197 # define raw_spin_lock_nested(lock, subclass) \ 198 _raw_spin_lock(((void)(subclass), (lock))) 199 # define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock) 200 # define raw_spin_lock_bh_nested(lock, subclass) _raw_spin_lock_bh(lock) 201 #endif 202 203 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) 204 205 #define raw_spin_lock_irqsave(lock, flags) \ 206 do { \ 207 typecheck(unsigned long, flags); \ 208 flags = _raw_spin_lock_irqsave(lock); \ 209 } while (0) 210 211 #ifdef CONFIG_DEBUG_LOCK_ALLOC 212 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ 213 do { \ 214 typecheck(unsigned long, flags); \ 215 flags = _raw_spin_lock_irqsave_nested(lock, subclass); \ 216 } while (0) 217 #else 218 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ 219 do { \ 220 typecheck(unsigned long, flags); \ 221 flags = _raw_spin_lock_irqsave(lock); \ 222 } while (0) 223 #endif 224 225 #else 226 227 #define raw_spin_lock_irqsave(lock, flags) \ 228 do { \ 229 typecheck(unsigned long, flags); \ 230 _raw_spin_lock_irqsave(lock, flags); \ 231 } while (0) 232 233 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ 234 raw_spin_lock_irqsave(lock, flags) 235 236 #endif 237 238 #define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock) 239 #define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock) 240 #define raw_spin_unlock(lock) _raw_spin_unlock(lock) 241 #define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock) 242 243 #define raw_spin_unlock_irqrestore(lock, flags) \ 244 do { \ 245 typecheck(unsigned long, flags); \ 246 _raw_spin_unlock_irqrestore(lock, flags); \ 247 } while (0) 248 #define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock) 249 250 #define raw_spin_trylock_bh(lock) \ 251 __cond_lock(lock, _raw_spin_trylock_bh(lock)) 252 253 #define raw_spin_trylock_irq(lock) \ 254 ({ \ 255 local_irq_disable(); \ 256 raw_spin_trylock(lock) ? \ 257 1 : ({ local_irq_enable(); 0; }); \ 258 }) 259 260 #define raw_spin_trylock_irqsave(lock, flags) \ 261 ({ \ 262 local_irq_save(flags); \ 263 raw_spin_trylock(lock) ? \ 264 1 : ({ local_irq_restore(flags); 0; }); \ 265 }) 266 267 /** 268 * raw_spin_can_lock - would raw_spin_trylock() succeed? 269 * @lock: the spinlock in question. 270 */ 271 #define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock)) 272 273 /* Include rwlock functions */ 274 #include <linux/rwlock.h> 275 276 /* 277 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: 278 */ 279 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) 280 # include <linux/spinlock_api_smp.h> 281 #else 282 # include <linux/spinlock_api_up.h> 283 #endif 284 285 /* 286 * Map the spin_lock functions to the raw variants for PREEMPT_RT=n 287 */ 288 289 static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock) 290 { 291 return &lock->rlock; 292 } 293 294 #define spin_lock_init(_lock) \ 295 do { \ 296 spinlock_check(_lock); \ 297 raw_spin_lock_init(&(_lock)->rlock); \ 298 } while (0) 299 300 static __always_inline void spin_lock(spinlock_t *lock) 301 { 302 raw_spin_lock(&lock->rlock); 303 } 304 305 static __always_inline void spin_lock_bh(spinlock_t *lock) 306 { 307 raw_spin_lock_bh(&lock->rlock); 308 } 309 310 static __always_inline int spin_trylock(spinlock_t *lock) 311 { 312 return raw_spin_trylock(&lock->rlock); 313 } 314 315 #define spin_lock_nested(lock, subclass) \ 316 do { \ 317 raw_spin_lock_nested(spinlock_check(lock), subclass); \ 318 } while (0) 319 320 #define spin_lock_bh_nested(lock, subclass) \ 321 do { \ 322 raw_spin_lock_bh_nested(spinlock_check(lock), subclass);\ 323 } while (0) 324 325 #define spin_lock_nest_lock(lock, nest_lock) \ 326 do { \ 327 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \ 328 } while (0) 329 330 static __always_inline void spin_lock_irq(spinlock_t *lock) 331 { 332 raw_spin_lock_irq(&lock->rlock); 333 } 334 335 #define spin_lock_irqsave(lock, flags) \ 336 do { \ 337 raw_spin_lock_irqsave(spinlock_check(lock), flags); \ 338 } while (0) 339 340 #define spin_lock_irqsave_nested(lock, flags, subclass) \ 341 do { \ 342 raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \ 343 } while (0) 344 345 static __always_inline void spin_unlock(spinlock_t *lock) 346 { 347 raw_spin_unlock(&lock->rlock); 348 } 349 350 static __always_inline void spin_unlock_bh(spinlock_t *lock) 351 { 352 raw_spin_unlock_bh(&lock->rlock); 353 } 354 355 static __always_inline void spin_unlock_irq(spinlock_t *lock) 356 { 357 raw_spin_unlock_irq(&lock->rlock); 358 } 359 360 static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) 361 { 362 raw_spin_unlock_irqrestore(&lock->rlock, flags); 363 } 364 365 static __always_inline int spin_trylock_bh(spinlock_t *lock) 366 { 367 return raw_spin_trylock_bh(&lock->rlock); 368 } 369 370 static __always_inline int spin_trylock_irq(spinlock_t *lock) 371 { 372 return raw_spin_trylock_irq(&lock->rlock); 373 } 374 375 #define spin_trylock_irqsave(lock, flags) \ 376 ({ \ 377 raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ 378 }) 379 380 static __always_inline void spin_unlock_wait(spinlock_t *lock) 381 { 382 raw_spin_unlock_wait(&lock->rlock); 383 } 384 385 static __always_inline int spin_is_locked(spinlock_t *lock) 386 { 387 return raw_spin_is_locked(&lock->rlock); 388 } 389 390 static __always_inline int spin_is_contended(spinlock_t *lock) 391 { 392 return raw_spin_is_contended(&lock->rlock); 393 } 394 395 static __always_inline int spin_can_lock(spinlock_t *lock) 396 { 397 return raw_spin_can_lock(&lock->rlock); 398 } 399 400 #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock) 401 402 /* 403 * Pull the atomic_t declaration: 404 * (asm-mips/atomic.h needs above definitions) 405 */ 406 #include <linux/atomic.h> 407 /** 408 * atomic_dec_and_lock - lock on reaching reference count zero 409 * @atomic: the atomic counter 410 * @lock: the spinlock in question 411 * 412 * Decrements @atomic by 1. If the result is 0, returns true and locks 413 * @lock. Returns false for all other cases. 414 */ 415 extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); 416 #define atomic_dec_and_lock(atomic, lock) \ 417 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) 418 419 #endif /* __LINUX_SPINLOCK_H */

Here is an explanation of a rule violation arisen while checking your driver against a corresponding kernel.

Note that it may be false positive, i.e. there isn't a real error indeed. Please analyze a given error trace and related source code to understand whether there is an error in your driver.

Error trace column contains a path on which the given rule is violated. You can expand/collapse some entity classes by clicking on corresponding checkboxes in a main menu or in an advanced Others menu. Also you can expand/collapse each particular entity by clicking on +/-. In hovering on some entities you can see some tips. Also the error trace is bound with related source code. Line numbers may be shown as links on the left. You can click on them to open corresponding lines in source code.

Source code column contains a content of files related with the error trace. There is source code of your driver (note that there are some LDV modifications at the end), kernel headers and rule model. Tabs show a currently opened file and other available files. In hovering on them you can see full file names. On clicking a corresponding file content will be shown.

Ядро Модуль Правило Верификатор Вердикт Статус Время создания Описание проблемы
linux-4.8-rc1.tar.xz drivers/rapidio/rio_cm.ko 43_1a CPAchecker Bug Fixed 2016-09-09 23:37:19 L0245

Комментарий

Reported: 9 Sep 2016

[В начало]