Bug
[В начало]
Ошибка # 165
Показать/спрятать трассу ошибок Error trace
{ 20 typedef unsigned char __u8; 23 typedef unsigned short __u16; 25 typedef int __s32; 26 typedef unsigned int __u32; 29 typedef long long __s64; 30 typedef unsigned long long __u64; 15 typedef signed char s8; 16 typedef unsigned char u8; 18 typedef short s16; 19 typedef unsigned short u16; 21 typedef int s32; 22 typedef unsigned int u32; 24 typedef long long s64; 25 typedef unsigned long long u64; 14 typedef long __kernel_long_t; 15 typedef unsigned long __kernel_ulong_t; 27 typedef int __kernel_pid_t; 48 typedef unsigned int __kernel_uid32_t; 49 typedef unsigned int __kernel_gid32_t; 71 typedef __kernel_ulong_t __kernel_size_t; 72 typedef __kernel_long_t __kernel_ssize_t; 87 typedef long long __kernel_loff_t; 88 typedef __kernel_long_t __kernel_time_t; 89 typedef __kernel_long_t __kernel_clock_t; 90 typedef int __kernel_timer_t; 91 typedef int __kernel_clockid_t; 280 struct kernel_symbol { unsigned long value; const char *name; } ; 34 struct module ; 12 typedef __u32 __kernel_dev_t; 15 typedef __kernel_dev_t dev_t; 18 typedef unsigned short umode_t; 21 typedef __kernel_pid_t pid_t; 26 typedef __kernel_clockid_t clockid_t; 29 typedef _Bool bool; 31 typedef __kernel_uid32_t uid_t; 32 typedef __kernel_gid32_t gid_t; 45 typedef __kernel_loff_t loff_t; 54 typedef __kernel_size_t size_t; 59 typedef __kernel_ssize_t ssize_t; 69 typedef __kernel_time_t time_t; 102 typedef __s32 int32_t; 108 typedef __u32 uint32_t; 133 typedef unsigned long sector_t; 134 typedef unsigned long blkcnt_t; 152 typedef u64 dma_addr_t; 157 typedef unsigned int gfp_t; 158 typedef unsigned int fmode_t; 161 typedef u64 phys_addr_t; 176 struct __anonstruct_atomic_t_6 { int counter; } ; 176 typedef struct __anonstruct_atomic_t_6 atomic_t; 181 struct __anonstruct_atomic64_t_7 { long counter; } ; 181 typedef struct __anonstruct_atomic64_t_7 atomic64_t; 182 struct list_head { struct list_head *next; struct list_head *prev; } ; 187 struct hlist_node ; 187 struct hlist_head { struct hlist_node *first; } ; 191 struct hlist_node { struct hlist_node *next; struct hlist_node **pprev; } ; 202 struct callback_head { struct callback_head *next; void (*func)(struct callback_head *); } ; 115 typedef void (*ctor_fn_t)(); 274 struct _ddebug { const char *modname; const char *function; const char *filename; const char *format; unsigned int lineno; unsigned char flags; } ; 58 struct device ; 465 struct file_operations ; 477 struct completion ; 478 struct pt_regs ; 27 union __anonunion___u_9 { struct list_head *__val; char __c[1U]; } ; 65 union __anonunion___u_11 { struct list_head *__val; char __c[1U]; } ; 105 union __anonunion___u_13 { struct list_head *__val; char __c[1U]; } ; 546 struct bug_entry { int bug_addr_disp; int file_disp; unsigned short line; unsigned short flags; } ; 114 struct timespec ; 115 struct compat_timespec ; 116 struct pollfd ; 117 struct __anonstruct_futex_27 { u32 *uaddr; u32 val; u32 flags; u32 bitset; u64 time; u32 *uaddr2; } ; 117 struct __anonstruct_nanosleep_28 { clockid_t clockid; struct timespec *rmtp; struct compat_timespec *compat_rmtp; u64 expires; } ; 117 struct __anonstruct_poll_29 { struct pollfd *ufds; int nfds; int has_timeout; unsigned long tv_sec; unsigned long tv_nsec; } ; 117 union __anonunion____missing_field_name_26 { struct __anonstruct_futex_27 futex; struct __anonstruct_nanosleep_28 nanosleep; struct __anonstruct_poll_29 poll; } ; 117 struct restart_block { long int (*fn)(struct restart_block *); union __anonunion____missing_field_name_26 __annonCompField4; } ; 50 struct task_struct ; 39 struct page ; 26 struct mm_struct ; 288 struct pt_regs { unsigned long r15; unsigned long r14; unsigned long r13; unsigned long r12; unsigned long bp; unsigned long bx; unsigned long r11; unsigned long r10; unsigned long r9; unsigned long r8; unsigned long ax; unsigned long cx; unsigned long dx; unsigned long si; unsigned long di; unsigned long orig_ax; unsigned long ip; unsigned long cs; unsigned long flags; unsigned long sp; unsigned long ss; } ; 66 struct __anonstruct____missing_field_name_32 { unsigned int a; unsigned int b; } ; 66 struct __anonstruct____missing_field_name_33 { u16 limit0; u16 base0; unsigned char base1; unsigned char type; unsigned char s; unsigned char dpl; unsigned char p; unsigned char limit; unsigned char avl; unsigned char l; unsigned char d; unsigned char g; unsigned char base2; } ; 66 union __anonunion____missing_field_name_31 { struct __anonstruct____missing_field_name_32 __annonCompField5; struct __anonstruct____missing_field_name_33 __annonCompField6; } ; 66 struct desc_struct { union __anonunion____missing_field_name_31 __annonCompField7; } ; 13 typedef unsigned long pteval_t; 14 typedef unsigned long pmdval_t; 16 typedef unsigned long pgdval_t; 17 typedef unsigned long pgprotval_t; 19 struct __anonstruct_pte_t_34 { pteval_t pte; } ; 19 typedef struct __anonstruct_pte_t_34 pte_t; 21 struct pgprot { pgprotval_t pgprot; } ; 256 typedef struct pgprot pgprot_t; 258 struct __anonstruct_pgd_t_35 { pgdval_t pgd; } ; 258 typedef struct __anonstruct_pgd_t_35 pgd_t; 297 struct __anonstruct_pmd_t_37 { pmdval_t pmd; } ; 297 typedef struct __anonstruct_pmd_t_37 pmd_t; 423 typedef struct page *pgtable_t; 434 struct file ; 445 struct seq_file ; 481 struct thread_struct ; 483 struct cpumask ; 20 struct qspinlock { atomic_t val; } ; 33 typedef struct qspinlock arch_spinlock_t; 34 struct qrwlock { atomic_t cnts; arch_spinlock_t wait_lock; } ; 14 typedef struct qrwlock arch_rwlock_t; 247 struct math_emu_info { long ___orig_eip; struct pt_regs *regs; } ; 83 struct static_key { atomic_t enabled; } ; 23 typedef atomic64_t atomic_long_t; 359 struct cpumask { unsigned long bits[128U]; } ; 15 typedef struct cpumask cpumask_t; 654 typedef struct cpumask *cpumask_var_t; 22 struct tracepoint_func { void *func; void *data; int prio; } ; 28 struct tracepoint { const char *name; struct static_key key; int (*regfunc)(); void (*unregfunc)(); struct tracepoint_func *funcs; } ; 233 struct fregs_state { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u32 status; } ; 26 struct __anonstruct____missing_field_name_61 { u64 rip; u64 rdp; } ; 26 struct __anonstruct____missing_field_name_62 { u32 fip; u32 fcs; u32 foo; u32 fos; } ; 26 union __anonunion____missing_field_name_60 { struct __anonstruct____missing_field_name_61 __annonCompField13; struct __anonstruct____missing_field_name_62 __annonCompField14; } ; 26 union __anonunion____missing_field_name_63 { u32 padding1[12U]; u32 sw_reserved[12U]; } ; 26 struct fxregs_state { u16 cwd; u16 swd; u16 twd; u16 fop; union __anonunion____missing_field_name_60 __annonCompField15; u32 mxcsr; u32 mxcsr_mask; u32 st_space[32U]; u32 xmm_space[64U]; u32 padding[12U]; union __anonunion____missing_field_name_63 __annonCompField16; } ; 66 struct swregs_state { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u8 ftop; u8 changed; u8 lookahead; u8 no_update; u8 rm; u8 alimit; struct math_emu_info *info; u32 entry_eip; } ; 227 struct xstate_header { u64 xfeatures; u64 xcomp_bv; u64 reserved[6U]; } ; 233 struct xregs_state { struct fxregs_state i387; struct xstate_header header; u8 extended_state_area[0U]; } ; 254 union fpregs_state { struct fregs_state fsave; struct fxregs_state fxsave; struct swregs_state soft; struct xregs_state xsave; u8 __padding[4096U]; } ; 271 struct fpu { unsigned int last_cpu; unsigned char fpstate_active; unsigned char fpregs_active; union fpregs_state state; } ; 180 struct seq_operations ; 386 struct perf_event ; 391 struct __anonstruct_mm_segment_t_75 { unsigned long seg; } ; 391 typedef struct __anonstruct_mm_segment_t_75 mm_segment_t; 392 struct thread_struct { struct desc_struct tls_array[3U]; unsigned long sp0; unsigned long sp; unsigned short es; unsigned short ds; unsigned short fsindex; unsigned short gsindex; u32 status; unsigned long fsbase; unsigned long gsbase; struct perf_event *ptrace_bps[4U]; unsigned long debugreg6; unsigned long ptrace_dr7; unsigned long cr2; unsigned long trap_nr; unsigned long error_code; unsigned long *io_bitmap_ptr; unsigned long iopl; unsigned int io_bitmap_max; mm_segment_t addr_limit; unsigned char sig_on_uaccess_err; unsigned char uaccess_err; struct fpu fpu; } ; 48 struct thread_info { unsigned long flags; } ; 33 struct lockdep_map ; 55 struct stack_trace { unsigned int nr_entries; unsigned int max_entries; unsigned long *entries; int skip; } ; 28 struct lockdep_subclass_key { char __one_byte; } ; 53 struct lock_class_key { struct lockdep_subclass_key subkeys[8U]; } ; 59 struct lock_class { struct hlist_node hash_entry; struct list_head lock_entry; struct lockdep_subclass_key *key; unsigned int subclass; unsigned int dep_gen_id; unsigned long usage_mask; struct stack_trace usage_traces[13U]; struct list_head locks_after; struct list_head locks_before; unsigned int version; unsigned long ops; const char *name; int name_version; unsigned long contention_point[4U]; unsigned long contending_point[4U]; } ; 144 struct lockdep_map { struct lock_class_key *key; struct lock_class *class_cache[2U]; const char *name; int cpu; unsigned long ip; } ; 207 struct held_lock { u64 prev_chain_key; unsigned long acquire_ip; struct lockdep_map *instance; struct lockdep_map *nest_lock; u64 waittime_stamp; u64 holdtime_stamp; unsigned short class_idx; unsigned char irq_context; unsigned char trylock; unsigned char read; unsigned char check; unsigned char hardirqs_off; unsigned short references; unsigned int pin_count; } ; 593 struct raw_spinlock { arch_spinlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ; 32 typedef struct raw_spinlock raw_spinlock_t; 33 struct __anonstruct____missing_field_name_77 { u8 __padding[24U]; struct lockdep_map dep_map; } ; 33 union __anonunion____missing_field_name_76 { struct raw_spinlock rlock; struct __anonstruct____missing_field_name_77 __annonCompField19; } ; 33 struct spinlock { union __anonunion____missing_field_name_76 __annonCompField20; } ; 76 typedef struct spinlock spinlock_t; 23 struct __anonstruct_rwlock_t_78 { arch_rwlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ; 23 typedef struct __anonstruct_rwlock_t_78 rwlock_t; 416 struct seqcount { unsigned int sequence; struct lockdep_map dep_map; } ; 52 typedef struct seqcount seqcount_t; 407 struct __anonstruct_seqlock_t_93 { struct seqcount seqcount; spinlock_t lock; } ; 407 typedef struct __anonstruct_seqlock_t_93 seqlock_t; 601 struct timespec { __kernel_time_t tv_sec; long tv_nsec; } ; 7 typedef __s64 time64_t; 83 struct user_namespace ; 22 struct __anonstruct_kuid_t_94 { uid_t val; } ; 22 typedef struct __anonstruct_kuid_t_94 kuid_t; 27 struct __anonstruct_kgid_t_95 { gid_t val; } ; 27 typedef struct __anonstruct_kgid_t_95 kgid_t; 139 struct kstat { u64 ino; dev_t dev; umode_t mode; unsigned int nlink; kuid_t uid; kgid_t gid; dev_t rdev; loff_t size; struct timespec atime; struct timespec mtime; struct timespec ctime; unsigned long blksize; unsigned long long blocks; } ; 36 struct vm_area_struct ; 38 struct __wait_queue_head { spinlock_t lock; struct list_head task_list; } ; 43 typedef struct __wait_queue_head wait_queue_head_t; 97 struct __anonstruct_nodemask_t_96 { unsigned long bits[16U]; } ; 97 typedef struct __anonstruct_nodemask_t_96 nodemask_t; 249 typedef unsigned int isolate_mode_t; 13 struct optimistic_spin_queue { atomic_t tail; } ; 39 struct mutex { atomic_long_t owner; spinlock_t wait_lock; struct optimistic_spin_queue osq; struct list_head wait_list; void *magic; struct lockdep_map dep_map; } ; 70 struct mutex_waiter { struct list_head list; struct task_struct *task; void *magic; } ; 222 struct rw_semaphore ; 223 struct rw_semaphore { atomic_long_t count; struct list_head wait_list; raw_spinlock_t wait_lock; struct optimistic_spin_queue osq; struct task_struct *owner; struct lockdep_map dep_map; } ; 178 struct completion { unsigned int done; wait_queue_head_t wait; } ; 28 typedef s64 ktime_t; 1145 struct timer_list { struct hlist_node entry; unsigned long expires; void (*function)(unsigned long); unsigned long data; u32 flags; int start_pid; void *start_site; char start_comm[16U]; struct lockdep_map lockdep_map; } ; 254 struct hrtimer ; 255 enum hrtimer_restart ; 256 struct rb_node { unsigned long __rb_parent_color; struct rb_node *rb_right; struct rb_node *rb_left; } ; 41 struct rb_root { struct rb_node *rb_node; } ; 835 struct nsproxy ; 278 struct workqueue_struct ; 279 struct work_struct ; 54 struct work_struct { atomic_long_t data; struct list_head entry; void (*func)(struct work_struct *); struct lockdep_map lockdep_map; } ; 107 struct delayed_work { struct work_struct work; struct timer_list timer; struct workqueue_struct *wq; int cpu; } ; 58 struct pm_message { int event; } ; 64 typedef struct pm_message pm_message_t; 65 struct dev_pm_ops { int (*prepare)(struct device *); void (*complete)(struct device *); int (*suspend)(struct device *); int (*resume)(struct device *); int (*freeze)(struct device *); int (*thaw)(struct device *); int (*poweroff)(struct device *); int (*restore)(struct device *); int (*suspend_late)(struct device *); int (*resume_early)(struct device *); int (*freeze_late)(struct device *); int (*thaw_early)(struct device *); int (*poweroff_late)(struct device *); int (*restore_early)(struct device *); int (*suspend_noirq)(struct device *); int (*resume_noirq)(struct device *); int (*freeze_noirq)(struct device *); int (*thaw_noirq)(struct device *); int (*poweroff_noirq)(struct device *); int (*restore_noirq)(struct device *); int (*runtime_suspend)(struct device *); int (*runtime_resume)(struct device *); int (*runtime_idle)(struct device *); } ; 320 enum rpm_status { RPM_ACTIVE = 0, RPM_RESUMING = 1, RPM_SUSPENDED = 2, RPM_SUSPENDING = 3 } ; 327 enum rpm_request { RPM_REQ_NONE = 0, RPM_REQ_IDLE = 1, RPM_REQ_SUSPEND = 2, RPM_REQ_AUTOSUSPEND = 3, RPM_REQ_RESUME = 4 } ; 335 struct wakeup_source ; 336 struct wake_irq ; 337 struct pm_domain_data ; 338 struct pm_subsys_data { spinlock_t lock; unsigned int refcount; struct list_head clock_list; struct pm_domain_data *domain_data; } ; 556 struct dev_pm_qos ; 556 struct dev_pm_info { pm_message_t power_state; unsigned char can_wakeup; unsigned char async_suspend; bool in_dpm_list; bool is_prepared; bool is_suspended; bool is_noirq_suspended; bool is_late_suspended; bool early_init; bool direct_complete; spinlock_t lock; struct list_head entry; struct completion completion; struct wakeup_source *wakeup; bool wakeup_path; bool syscore; bool no_pm_callbacks; struct timer_list suspend_timer; unsigned long timer_expires; struct work_struct work; wait_queue_head_t wait_queue; struct wake_irq *wakeirq; atomic_t usage_count; atomic_t child_count; unsigned char disable_depth; unsigned char idle_notification; unsigned char request_pending; unsigned char deferred_resume; unsigned char run_wake; unsigned char runtime_auto; bool ignore_children; unsigned char no_callbacks; unsigned char irq_safe; unsigned char use_autosuspend; unsigned char timer_autosuspends; unsigned char memalloc_noio; unsigned int links_count; enum rpm_request request; enum rpm_status runtime_status; int runtime_error; int autosuspend_delay; unsigned long last_busy; unsigned long active_jiffies; unsigned long suspended_jiffies; unsigned long accounting_timestamp; struct pm_subsys_data *subsys_data; void (*set_latency_tolerance)(struct device *, s32 ); struct dev_pm_qos *qos; } ; 618 struct dev_pm_domain { struct dev_pm_ops ops; void (*detach)(struct device *, bool ); int (*activate)(struct device *); void (*sync)(struct device *); void (*dismiss)(struct device *); } ; 38 struct ldt_struct ; 38 struct vdso_image ; 38 struct __anonstruct_mm_context_t_167 { struct ldt_struct *ldt; unsigned short ia32_compat; struct mutex lock; void *vdso; const struct vdso_image *vdso_image; atomic_t perf_rdpmc_allowed; u16 pkey_allocation_map; s16 execute_only_pkey; void *bd_addr; } ; 38 typedef struct __anonstruct_mm_context_t_167 mm_context_t; 1264 struct llist_node ; 64 struct llist_node { struct llist_node *next; } ; 37 struct cred ; 19 struct inode ; 58 struct arch_uprobe_task { unsigned long saved_scratch_register; unsigned int saved_trap_nr; unsigned int saved_tf; } ; 66 enum uprobe_task_state { UTASK_RUNNING = 0, UTASK_SSTEP = 1, UTASK_SSTEP_ACK = 2, UTASK_SSTEP_TRAPPED = 3 } ; 73 struct __anonstruct____missing_field_name_215 { struct arch_uprobe_task autask; unsigned long vaddr; } ; 73 struct __anonstruct____missing_field_name_216 { struct callback_head dup_xol_work; unsigned long dup_xol_addr; } ; 73 union __anonunion____missing_field_name_214 { struct __anonstruct____missing_field_name_215 __annonCompField35; struct __anonstruct____missing_field_name_216 __annonCompField36; } ; 73 struct uprobe ; 73 struct return_instance ; 73 struct uprobe_task { enum uprobe_task_state state; union __anonunion____missing_field_name_214 __annonCompField37; struct uprobe *active_uprobe; unsigned long xol_vaddr; struct return_instance *return_instances; unsigned int depth; } ; 95 struct return_instance { struct uprobe *uprobe; unsigned long func; unsigned long stack; unsigned long orig_ret_vaddr; bool chained; struct return_instance *next; } ; 111 struct xol_area ; 112 struct uprobes_state { struct xol_area *xol_area; } ; 151 struct address_space ; 152 struct mem_cgroup ; 153 union __anonunion____missing_field_name_217 { struct address_space *mapping; void *s_mem; atomic_t compound_mapcount; } ; 153 union __anonunion____missing_field_name_218 { unsigned long index; void *freelist; } ; 153 struct __anonstruct____missing_field_name_222 { unsigned short inuse; unsigned short objects; unsigned char frozen; } ; 153 union __anonunion____missing_field_name_221 { atomic_t _mapcount; unsigned int active; struct __anonstruct____missing_field_name_222 __annonCompField40; int units; } ; 153 struct __anonstruct____missing_field_name_220 { union __anonunion____missing_field_name_221 __annonCompField41; atomic_t _refcount; } ; 153 union __anonunion____missing_field_name_219 { unsigned long counters; struct __anonstruct____missing_field_name_220 __annonCompField42; } ; 153 struct dev_pagemap ; 153 struct __anonstruct____missing_field_name_224 { struct page *next; int pages; int pobjects; } ; 153 struct __anonstruct____missing_field_name_225 { unsigned long compound_head; unsigned int compound_dtor; unsigned int compound_order; } ; 153 struct __anonstruct____missing_field_name_226 { unsigned long __pad; pgtable_t pmd_huge_pte; } ; 153 union __anonunion____missing_field_name_223 { struct list_head lru; struct dev_pagemap *pgmap; struct __anonstruct____missing_field_name_224 __annonCompField44; struct callback_head callback_head; struct __anonstruct____missing_field_name_225 __annonCompField45; struct __anonstruct____missing_field_name_226 __annonCompField46; } ; 153 struct kmem_cache ; 153 union __anonunion____missing_field_name_227 { unsigned long private; spinlock_t *ptl; struct kmem_cache *slab_cache; } ; 153 struct page { unsigned long flags; union __anonunion____missing_field_name_217 __annonCompField38; union __anonunion____missing_field_name_218 __annonCompField39; union __anonunion____missing_field_name_219 __annonCompField43; union __anonunion____missing_field_name_223 __annonCompField47; union __anonunion____missing_field_name_227 __annonCompField48; struct mem_cgroup *mem_cgroup; } ; 197 struct page_frag { struct page *page; __u32 offset; __u32 size; } ; 282 struct userfaultfd_ctx ; 282 struct vm_userfaultfd_ctx { struct userfaultfd_ctx *ctx; } ; 289 struct __anonstruct_shared_228 { struct rb_node rb; unsigned long rb_subtree_last; } ; 289 struct anon_vma ; 289 struct vm_operations_struct ; 289 struct mempolicy ; 289 struct vm_area_struct { unsigned long vm_start; unsigned long vm_end; struct vm_area_struct *vm_next; struct vm_area_struct *vm_prev; struct rb_node vm_rb; unsigned long rb_subtree_gap; struct mm_struct *vm_mm; pgprot_t vm_page_prot; unsigned long vm_flags; struct __anonstruct_shared_228 shared; struct list_head anon_vma_chain; struct anon_vma *anon_vma; const struct vm_operations_struct *vm_ops; unsigned long vm_pgoff; struct file *vm_file; void *vm_private_data; struct mempolicy *vm_policy; struct vm_userfaultfd_ctx vm_userfaultfd_ctx; } ; 362 struct core_thread { struct task_struct *task; struct core_thread *next; } ; 367 struct core_state { atomic_t nr_threads; struct core_thread dumper; struct completion startup; } ; 381 struct task_rss_stat { int events; int count[4U]; } ; 389 struct mm_rss_stat { atomic_long_t count[4U]; } ; 394 struct kioctx_table ; 395 struct linux_binfmt ; 395 struct mmu_notifier_mm ; 395 struct mm_struct { struct vm_area_struct *mmap; struct rb_root mm_rb; u32 vmacache_seqnum; unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); unsigned long mmap_base; unsigned long mmap_legacy_base; unsigned long task_size; unsigned long highest_vm_end; pgd_t *pgd; atomic_t mm_users; atomic_t mm_count; atomic_long_t nr_ptes; atomic_long_t nr_pmds; int map_count; spinlock_t page_table_lock; struct rw_semaphore mmap_sem; struct list_head mmlist; unsigned long hiwater_rss; unsigned long hiwater_vm; unsigned long total_vm; unsigned long locked_vm; unsigned long pinned_vm; unsigned long data_vm; unsigned long exec_vm; unsigned long stack_vm; unsigned long def_flags; unsigned long start_code; unsigned long end_code; unsigned long start_data; unsigned long end_data; unsigned long start_brk; unsigned long brk; unsigned long start_stack; unsigned long arg_start; unsigned long arg_end; unsigned long env_start; unsigned long env_end; unsigned long saved_auxv[46U]; struct mm_rss_stat rss_stat; struct linux_binfmt *binfmt; cpumask_var_t cpu_vm_mask_var; mm_context_t context; unsigned long flags; struct core_state *core_state; spinlock_t ioctx_lock; struct kioctx_table *ioctx_table; struct task_struct *owner; struct user_namespace *user_ns; struct file *exe_file; struct mmu_notifier_mm *mmu_notifier_mm; struct cpumask cpumask_allocation; unsigned long numa_next_scan; unsigned long numa_scan_offset; int numa_scan_seq; bool tlb_flush_pending; struct uprobes_state uprobes_state; atomic_long_t hugetlb_usage; struct work_struct async_put_work; } ; 560 struct vm_fault ; 614 struct vdso_image { void *data; unsigned long size; unsigned long alt; unsigned long alt_len; long sym_vvar_start; long sym_vvar_page; long sym_hpet_page; long sym_pvclock_page; long sym_VDSO32_NOTE_MASK; long sym___kernel_sigreturn; long sym___kernel_rt_sigreturn; long sym___kernel_vsyscall; long sym_int80_landing_pad; } ; 15 typedef __u64 Elf64_Addr; 16 typedef __u16 Elf64_Half; 18 typedef __u64 Elf64_Off; 20 typedef __u32 Elf64_Word; 21 typedef __u64 Elf64_Xword; 190 struct elf64_sym { Elf64_Word st_name; unsigned char st_info; unsigned char st_other; Elf64_Half st_shndx; Elf64_Addr st_value; Elf64_Xword st_size; } ; 198 typedef struct elf64_sym Elf64_Sym; 219 struct elf64_hdr { unsigned char e_ident[16U]; Elf64_Half e_type; Elf64_Half e_machine; Elf64_Word e_version; Elf64_Addr e_entry; Elf64_Off e_phoff; Elf64_Off e_shoff; Elf64_Word e_flags; Elf64_Half e_ehsize; Elf64_Half e_phentsize; Elf64_Half e_phnum; Elf64_Half e_shentsize; Elf64_Half e_shnum; Elf64_Half e_shstrndx; } ; 235 typedef struct elf64_hdr Elf64_Ehdr; 314 struct elf64_shdr { Elf64_Word sh_name; Elf64_Word sh_type; Elf64_Xword sh_flags; Elf64_Addr sh_addr; Elf64_Off sh_offset; Elf64_Xword sh_size; Elf64_Word sh_link; Elf64_Word sh_info; Elf64_Xword sh_addralign; Elf64_Xword sh_entsize; } ; 326 typedef struct elf64_shdr Elf64_Shdr; 53 union __anonunion____missing_field_name_233 { unsigned long bitmap[1U]; struct callback_head callback_head; } ; 53 struct idr_layer { int prefix; int layer; struct idr_layer *ary[64U]; int count; union __anonunion____missing_field_name_233 __annonCompField49; } ; 40 struct idr { struct idr_layer *hint; struct idr_layer *top; int layers; int cur; spinlock_t lock; int id_free_cnt; struct idr_layer *id_free; } ; 149 struct ida_bitmap { long nr_busy; unsigned long bitmap[15U]; } ; 192 struct ida { struct idr idr; struct ida_bitmap *free_bitmap; } ; 229 struct dentry ; 230 struct iattr ; 231 struct super_block ; 232 struct file_system_type ; 233 struct kernfs_open_node ; 234 struct kernfs_iattrs ; 257 struct kernfs_root ; 257 struct kernfs_elem_dir { unsigned long subdirs; struct rb_root children; struct kernfs_root *root; } ; 85 struct kernfs_node ; 85 struct kernfs_elem_symlink { struct kernfs_node *target_kn; } ; 89 struct kernfs_ops ; 89 struct kernfs_elem_attr { const struct kernfs_ops *ops; struct kernfs_open_node *open; loff_t size; struct kernfs_node *notify_next; } ; 96 union __anonunion____missing_field_name_242 { struct kernfs_elem_dir dir; struct kernfs_elem_symlink symlink; struct kernfs_elem_attr attr; } ; 96 struct kernfs_node { atomic_t count; atomic_t active; struct lockdep_map dep_map; struct kernfs_node *parent; const char *name; struct rb_node rb; const void *ns; unsigned int hash; union __anonunion____missing_field_name_242 __annonCompField50; void *priv; unsigned short flags; umode_t mode; unsigned int ino; struct kernfs_iattrs *iattr; } ; 138 struct kernfs_syscall_ops { int (*remount_fs)(struct kernfs_root *, int *, char *); int (*show_options)(struct seq_file *, struct kernfs_root *); int (*mkdir)(struct kernfs_node *, const char *, umode_t ); int (*rmdir)(struct kernfs_node *); int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *); int (*show_path)(struct seq_file *, struct kernfs_node *, struct kernfs_root *); } ; 157 struct kernfs_root { struct kernfs_node *kn; unsigned int flags; struct ida ino_ida; struct kernfs_syscall_ops *syscall_ops; struct list_head supers; wait_queue_head_t deactivate_waitq; } ; 173 struct kernfs_open_file { struct kernfs_node *kn; struct file *file; void *priv; struct mutex mutex; struct mutex prealloc_mutex; int event; struct list_head list; char *prealloc_buf; size_t atomic_write_len; bool mmapped; const struct vm_operations_struct *vm_ops; } ; 191 struct kernfs_ops { int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); ssize_t (*read)(struct kernfs_open_file *, char *, size_t , loff_t ); size_t atomic_write_len; bool prealloc; ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *); struct lock_class_key lockdep_key; } ; 511 struct sock ; 512 struct kobject ; 513 enum kobj_ns_type { KOBJ_NS_TYPE_NONE = 0, KOBJ_NS_TYPE_NET = 1, KOBJ_NS_TYPES = 2 } ; 519 struct kobj_ns_type_operations { enum kobj_ns_type type; bool (*current_may_mount)(); void * (*grab_current_ns)(); const void * (*netlink_ns)(struct sock *); const void * (*initial_ns)(); void (*drop_ns)(void *); } ; 59 struct bin_attribute ; 60 struct attribute { const char *name; umode_t mode; bool ignore_lockdep; struct lock_class_key *key; struct lock_class_key skey; } ; 37 struct attribute_group { const char *name; umode_t (*is_visible)(struct kobject *, struct attribute *, int); umode_t (*is_bin_visible)(struct kobject *, struct bin_attribute *, int); struct attribute **attrs; struct bin_attribute **bin_attrs; } ; 92 struct bin_attribute { struct attribute attr; size_t size; void *private; ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); int (*mmap)(struct file *, struct kobject *, struct bin_attribute *, struct vm_area_struct *); } ; 165 struct sysfs_ops { ssize_t (*show)(struct kobject *, struct attribute *, char *); ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t ); } ; 530 struct kref { atomic_t refcount; } ; 52 struct kset ; 52 struct kobj_type ; 52 struct kobject { const char *name; struct list_head entry; struct kobject *parent; struct kset *kset; struct kobj_type *ktype; struct kernfs_node *sd; struct kref kref; struct delayed_work release; unsigned char state_initialized; unsigned char state_in_sysfs; unsigned char state_add_uevent_sent; unsigned char state_remove_uevent_sent; unsigned char uevent_suppress; } ; 115 struct kobj_type { void (*release)(struct kobject *); const struct sysfs_ops *sysfs_ops; struct attribute **default_attrs; const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject *); const void * (*namespace)(struct kobject *); } ; 123 struct kobj_uevent_env { char *argv[3U]; char *envp[32U]; int envp_idx; char buf[2048U]; int buflen; } ; 131 struct kset_uevent_ops { const int (*filter)(struct kset *, struct kobject *); const const char * (*name)(struct kset *, struct kobject *); const int (*uevent)(struct kset *, struct kobject *, struct kobj_uevent_env *); } ; 148 struct kset { struct list_head list; spinlock_t list_lock; struct kobject kobj; const struct kset_uevent_ops *uevent_ops; } ; 223 struct kernel_param ; 228 struct kernel_param_ops { unsigned int flags; int (*set)(const char *, const struct kernel_param *); int (*get)(char *, const struct kernel_param *); void (*free)(void *); } ; 62 struct kparam_string ; 62 struct kparam_array ; 62 union __anonunion____missing_field_name_245 { void *arg; const struct kparam_string *str; const struct kparam_array *arr; } ; 62 struct kernel_param { const char *name; struct module *mod; const struct kernel_param_ops *ops; const u16 perm; s8 level; u8 flags; union __anonunion____missing_field_name_245 __annonCompField51; } ; 83 struct kparam_string { unsigned int maxlen; char *string; } ; 89 struct kparam_array { unsigned int max; unsigned int elemsize; unsigned int *num; const struct kernel_param_ops *ops; void *elem; } ; 470 struct exception_table_entry ; 24 struct latch_tree_node { struct rb_node node[2U]; } ; 211 struct mod_arch_specific { } ; 39 struct module_param_attrs ; 39 struct module_kobject { struct kobject kobj; struct module *mod; struct kobject *drivers_dir; struct module_param_attrs *mp; struct completion *kobj_completion; } ; 50 struct module_attribute { struct attribute attr; ssize_t (*show)(struct module_attribute *, struct module_kobject *, char *); ssize_t (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t ); void (*setup)(struct module *, const char *); int (*test)(struct module *); void (*free)(struct module *); } ; 277 enum module_state { MODULE_STATE_LIVE = 0, MODULE_STATE_COMING = 1, MODULE_STATE_GOING = 2, MODULE_STATE_UNFORMED = 3 } ; 284 struct mod_tree_node { struct module *mod; struct latch_tree_node node; } ; 291 struct module_layout { void *base; unsigned int size; unsigned int text_size; unsigned int ro_size; unsigned int ro_after_init_size; struct mod_tree_node mtn; } ; 307 struct mod_kallsyms { Elf64_Sym *symtab; unsigned int num_symtab; char *strtab; } ; 321 struct klp_modinfo { Elf64_Ehdr hdr; Elf64_Shdr *sechdrs; char *secstrings; unsigned int symndx; } ; 329 struct module_sect_attrs ; 329 struct module_notes_attrs ; 329 struct trace_event_call ; 329 struct trace_enum_map ; 329 struct module { enum module_state state; struct list_head list; char name[56U]; struct module_kobject mkobj; struct module_attribute *modinfo_attrs; const char *version; const char *srcversion; struct kobject *holders_dir; const struct kernel_symbol *syms; const unsigned long *crcs; unsigned int num_syms; struct mutex param_lock; struct kernel_param *kp; unsigned int num_kp; unsigned int num_gpl_syms; const struct kernel_symbol *gpl_syms; const unsigned long *gpl_crcs; const struct kernel_symbol *unused_syms; const unsigned long *unused_crcs; unsigned int num_unused_syms; unsigned int num_unused_gpl_syms; const struct kernel_symbol *unused_gpl_syms; const unsigned long *unused_gpl_crcs; bool sig_ok; bool async_probe_requested; const struct kernel_symbol *gpl_future_syms; const unsigned long *gpl_future_crcs; unsigned int num_gpl_future_syms; unsigned int num_exentries; struct exception_table_entry *extable; int (*init)(); struct module_layout core_layout; struct module_layout init_layout; struct mod_arch_specific arch; unsigned long taints; unsigned int num_bugs; struct list_head bug_list; struct bug_entry *bug_table; struct mod_kallsyms *kallsyms; struct mod_kallsyms core_kallsyms; struct module_sect_attrs *sect_attrs; struct module_notes_attrs *notes_attrs; char *args; void *percpu; unsigned int percpu_size; unsigned int num_tracepoints; const struct tracepoint **tracepoints_ptrs; unsigned int num_trace_bprintk_fmt; const char **trace_bprintk_fmt_start; struct trace_event_call **trace_events; unsigned int num_trace_events; struct trace_enum_map **trace_enums; unsigned int num_trace_enums; bool klp; bool klp_alive; struct klp_modinfo *klp_info; struct list_head source_list; struct list_head target_list; void (*exit)(); atomic_t refcnt; ctor_fn_t (**ctors)(); unsigned int num_ctors; } ; 799 struct klist_node ; 37 struct klist_node { void *n_klist; struct list_head n_node; struct kref n_ref; } ; 93 struct hlist_bl_node ; 93 struct hlist_bl_head { struct hlist_bl_node *first; } ; 36 struct hlist_bl_node { struct hlist_bl_node *next; struct hlist_bl_node **pprev; } ; 114 struct __anonstruct____missing_field_name_299 { spinlock_t lock; int count; } ; 114 union __anonunion____missing_field_name_298 { struct __anonstruct____missing_field_name_299 __annonCompField52; } ; 114 struct lockref { union __anonunion____missing_field_name_298 __annonCompField53; } ; 77 struct path ; 78 struct vfsmount ; 79 struct __anonstruct____missing_field_name_301 { u32 hash; u32 len; } ; 79 union __anonunion____missing_field_name_300 { struct __anonstruct____missing_field_name_301 __annonCompField54; u64 hash_len; } ; 79 struct qstr { union __anonunion____missing_field_name_300 __annonCompField55; const unsigned char *name; } ; 65 struct dentry_operations ; 65 union __anonunion____missing_field_name_302 { struct list_head d_lru; wait_queue_head_t *d_wait; } ; 65 union __anonunion_d_u_303 { struct hlist_node d_alias; struct hlist_bl_node d_in_lookup_hash; struct callback_head d_rcu; } ; 65 struct dentry { unsigned int d_flags; seqcount_t d_seq; struct hlist_bl_node d_hash; struct dentry *d_parent; struct qstr d_name; struct inode *d_inode; unsigned char d_iname[32U]; struct lockref d_lockref; const struct dentry_operations *d_op; struct super_block *d_sb; unsigned long d_time; void *d_fsdata; union __anonunion____missing_field_name_302 __annonCompField56; struct list_head d_child; struct list_head d_subdirs; union __anonunion_d_u_303 d_u; } ; 121 struct dentry_operations { int (*d_revalidate)(struct dentry *, unsigned int); int (*d_weak_revalidate)(struct dentry *, unsigned int); int (*d_hash)(const struct dentry *, struct qstr *); int (*d_compare)(const struct dentry *, unsigned int, const char *, const struct qstr *); int (*d_delete)(const struct dentry *); int (*d_init)(struct dentry *); void (*d_release)(struct dentry *); void (*d_prune)(struct dentry *); void (*d_iput)(struct dentry *, struct inode *); char * (*d_dname)(struct dentry *, char *, int); struct vfsmount * (*d_automount)(struct path *); int (*d_manage)(const struct path *, bool ); struct dentry * (*d_real)(struct dentry *, const struct inode *, unsigned int); } ; 592 struct path { struct vfsmount *mnt; struct dentry *dentry; } ; 19 struct shrink_control { gfp_t gfp_mask; unsigned long nr_to_scan; int nid; struct mem_cgroup *memcg; } ; 27 struct shrinker { unsigned long int (*count_objects)(struct shrinker *, struct shrink_control *); unsigned long int (*scan_objects)(struct shrinker *, struct shrink_control *); int seeks; long batch; unsigned long flags; struct list_head list; atomic_long_t *nr_deferred; } ; 80 struct list_lru_one { struct list_head list; long nr_items; } ; 32 struct list_lru_memcg { struct list_lru_one *lru[0U]; } ; 37 struct list_lru_node { spinlock_t lock; struct list_lru_one lru; struct list_lru_memcg *memcg_lrus; } ; 47 struct list_lru { struct list_lru_node *node; struct list_head list; } ; 63 union __anonunion____missing_field_name_304 { struct list_head private_list; struct callback_head callback_head; } ; 63 struct radix_tree_node { unsigned char shift; unsigned char offset; unsigned char count; unsigned char exceptional; struct radix_tree_node *parent; void *private_data; union __anonunion____missing_field_name_304 __annonCompField57; void *slots[64U]; unsigned long tags[3U][1U]; } ; 105 struct radix_tree_root { gfp_t gfp_mask; struct radix_tree_node *rnode; } ; 519 enum pid_type { PIDTYPE_PID = 0, PIDTYPE_PGID = 1, PIDTYPE_SID = 2, PIDTYPE_MAX = 3 } ; 526 struct pid_namespace ; 526 struct upid { int nr; struct pid_namespace *ns; struct hlist_node pid_chain; } ; 56 struct pid { atomic_t count; unsigned int level; struct hlist_head tasks[3U]; struct callback_head rcu; struct upid numbers[1U]; } ; 68 struct pid_link { struct hlist_node node; struct pid *pid; } ; 22 struct kernel_cap_struct { __u32 cap[2U]; } ; 25 typedef struct kernel_cap_struct kernel_cap_t; 45 struct fiemap_extent { __u64 fe_logical; __u64 fe_physical; __u64 fe_length; __u64 fe_reserved64[2U]; __u32 fe_flags; __u32 fe_reserved[3U]; } ; 38 enum migrate_mode { MIGRATE_ASYNC = 0, MIGRATE_SYNC_LIGHT = 1, MIGRATE_SYNC = 2 } ; 44 enum rcu_sync_type { RCU_SYNC = 0, RCU_SCHED_SYNC = 1, RCU_BH_SYNC = 2 } ; 50 struct rcu_sync { int gp_state; int gp_count; wait_queue_head_t gp_wait; int cb_state; struct callback_head cb_head; enum rcu_sync_type gp_type; } ; 66 struct percpu_rw_semaphore { struct rcu_sync rss; unsigned int *read_count; struct rw_semaphore rw_sem; wait_queue_head_t writer; int readers_block; } ; 144 struct delayed_call { void (*fn)(void *); void *arg; } ; 282 struct backing_dev_info ; 283 struct bdi_writeback ; 285 struct export_operations ; 288 struct kiocb ; 289 struct pipe_inode_info ; 290 struct poll_table_struct ; 291 struct kstatfs ; 292 struct swap_info_struct ; 293 struct iov_iter ; 294 struct fscrypt_info ; 295 struct fscrypt_operations ; 76 struct iattr { unsigned int ia_valid; umode_t ia_mode; kuid_t ia_uid; kgid_t ia_gid; loff_t ia_size; struct timespec ia_atime; struct timespec ia_mtime; struct timespec ia_ctime; struct file *ia_file; } ; 213 struct dquot ; 214 struct kqid ; 19 typedef __kernel_uid32_t projid_t; 23 struct __anonstruct_kprojid_t_308 { projid_t val; } ; 23 typedef struct __anonstruct_kprojid_t_308 kprojid_t; 181 enum quota_type { USRQUOTA = 0, GRPQUOTA = 1, PRJQUOTA = 2 } ; 66 typedef long long qsize_t; 67 union __anonunion____missing_field_name_309 { kuid_t uid; kgid_t gid; kprojid_t projid; } ; 67 struct kqid { union __anonunion____missing_field_name_309 __annonCompField58; enum quota_type type; } ; 194 struct mem_dqblk { qsize_t dqb_bhardlimit; qsize_t dqb_bsoftlimit; qsize_t dqb_curspace; qsize_t dqb_rsvspace; qsize_t dqb_ihardlimit; qsize_t dqb_isoftlimit; qsize_t dqb_curinodes; time64_t dqb_btime; time64_t dqb_itime; } ; 216 struct quota_format_type ; 217 struct mem_dqinfo { struct quota_format_type *dqi_format; int dqi_fmt_id; struct list_head dqi_dirty_list; unsigned long dqi_flags; unsigned int dqi_bgrace; unsigned int dqi_igrace; qsize_t dqi_max_spc_limit; qsize_t dqi_max_ino_limit; void *dqi_priv; } ; 282 struct dquot { struct hlist_node dq_hash; struct list_head dq_inuse; struct list_head dq_free; struct list_head dq_dirty; struct mutex dq_lock; atomic_t dq_count; wait_queue_head_t dq_wait_unused; struct super_block *dq_sb; struct kqid dq_id; loff_t dq_off; unsigned long dq_flags; struct mem_dqblk dq_dqb; } ; 309 struct quota_format_ops { int (*check_quota_file)(struct super_block *, int); int (*read_file_info)(struct super_block *, int); int (*write_file_info)(struct super_block *, int); int (*free_file_info)(struct super_block *, int); int (*read_dqblk)(struct dquot *); int (*commit_dqblk)(struct dquot *); int (*release_dqblk)(struct dquot *); int (*get_next_id)(struct super_block *, struct kqid *); } ; 321 struct dquot_operations { int (*write_dquot)(struct dquot *); struct dquot * (*alloc_dquot)(struct super_block *, int); void (*destroy_dquot)(struct dquot *); int (*acquire_dquot)(struct dquot *); int (*release_dquot)(struct dquot *); int (*mark_dirty)(struct dquot *); int (*write_info)(struct super_block *, int); qsize_t * (*get_reserved_space)(struct inode *); int (*get_projid)(struct inode *, kprojid_t *); int (*get_next_id)(struct super_block *, struct kqid *); } ; 338 struct qc_dqblk { int d_fieldmask; u64 d_spc_hardlimit; u64 d_spc_softlimit; u64 d_ino_hardlimit; u64 d_ino_softlimit; u64 d_space; u64 d_ino_count; s64 d_ino_timer; s64 d_spc_timer; int d_ino_warns; int d_spc_warns; u64 d_rt_spc_hardlimit; u64 d_rt_spc_softlimit; u64 d_rt_space; s64 d_rt_spc_timer; int d_rt_spc_warns; } ; 361 struct qc_type_state { unsigned int flags; unsigned int spc_timelimit; unsigned int ino_timelimit; unsigned int rt_spc_timelimit; unsigned int spc_warnlimit; unsigned int ino_warnlimit; unsigned int rt_spc_warnlimit; unsigned long long ino; blkcnt_t blocks; blkcnt_t nextents; } ; 407 struct qc_state { unsigned int s_incoredqs; struct qc_type_state s_state[3U]; } ; 418 struct qc_info { int i_fieldmask; unsigned int i_flags; unsigned int i_spc_timelimit; unsigned int i_ino_timelimit; unsigned int i_rt_spc_timelimit; unsigned int i_spc_warnlimit; unsigned int i_ino_warnlimit; unsigned int i_rt_spc_warnlimit; } ; 431 struct quotactl_ops { int (*quota_on)(struct super_block *, int, int, const struct path *); int (*quota_off)(struct super_block *, int); int (*quota_enable)(struct super_block *, unsigned int); int (*quota_disable)(struct super_block *, unsigned int); int (*quota_sync)(struct super_block *, int); int (*set_info)(struct super_block *, int, struct qc_info *); int (*get_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *); int (*get_nextdqblk)(struct super_block *, struct kqid *, struct qc_dqblk *); int (*set_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *); int (*get_state)(struct super_block *, struct qc_state *); int (*rm_xquota)(struct super_block *, unsigned int); } ; 447 struct quota_format_type { int qf_fmt_id; const struct quota_format_ops *qf_ops; struct module *qf_owner; struct quota_format_type *qf_next; } ; 511 struct quota_info { unsigned int flags; struct mutex dqio_mutex; struct inode *files[3U]; struct mem_dqinfo info[3U]; const struct quota_format_ops *ops[3U]; } ; 540 struct writeback_control ; 541 struct kiocb { struct file *ki_filp; loff_t ki_pos; void (*ki_complete)(struct kiocb *, long, long); void *private; int ki_flags; } ; 317 struct address_space_operations { int (*writepage)(struct page *, struct writeback_control *); int (*readpage)(struct file *, struct page *); int (*writepages)(struct address_space *, struct writeback_control *); int (*set_page_dirty)(struct page *); int (*readpages)(struct file *, struct address_space *, struct list_head *, unsigned int); int (*write_begin)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page **, void **); int (*write_end)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page *, void *); sector_t (*bmap)(struct address_space *, sector_t ); void (*invalidatepage)(struct page *, unsigned int, unsigned int); int (*releasepage)(struct page *, gfp_t ); void (*freepage)(struct page *); ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *); int (*migratepage)(struct address_space *, struct page *, struct page *, enum migrate_mode ); bool (*isolate_page)(struct page *, isolate_mode_t ); void (*putback_page)(struct page *); int (*launder_page)(struct page *); int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long); void (*is_dirty_writeback)(struct page *, bool *, bool *); int (*error_remove_page)(struct address_space *, struct page *); int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *); void (*swap_deactivate)(struct file *); } ; 376 struct address_space { struct inode *host; struct radix_tree_root page_tree; spinlock_t tree_lock; atomic_t i_mmap_writable; struct rb_root i_mmap; struct rw_semaphore i_mmap_rwsem; unsigned long nrpages; unsigned long nrexceptional; unsigned long writeback_index; const struct address_space_operations *a_ops; unsigned long flags; spinlock_t private_lock; gfp_t gfp_mask; struct list_head private_list; void *private_data; } ; 398 struct request_queue ; 399 struct hd_struct ; 399 struct gendisk ; 399 struct block_device { dev_t bd_dev; int bd_openers; struct inode *bd_inode; struct super_block *bd_super; struct mutex bd_mutex; void *bd_claiming; void *bd_holder; int bd_holders; bool bd_write_holder; struct list_head bd_holder_disks; struct block_device *bd_contains; unsigned int bd_block_size; struct hd_struct *bd_part; unsigned int bd_part_count; int bd_invalidated; struct gendisk *bd_disk; struct request_queue *bd_queue; struct list_head bd_list; unsigned long bd_private; int bd_fsfreeze_count; struct mutex bd_fsfreeze_mutex; } ; 514 struct posix_acl ; 541 struct inode_operations ; 541 union __anonunion____missing_field_name_314 { const unsigned int i_nlink; unsigned int __i_nlink; } ; 541 union __anonunion____missing_field_name_315 { struct hlist_head i_dentry; struct callback_head i_rcu; } ; 541 struct file_lock_context ; 541 struct cdev ; 541 union __anonunion____missing_field_name_316 { struct pipe_inode_info *i_pipe; struct block_device *i_bdev; struct cdev *i_cdev; char *i_link; unsigned int i_dir_seq; } ; 541 struct inode { umode_t i_mode; unsigned short i_opflags; kuid_t i_uid; kgid_t i_gid; unsigned int i_flags; struct posix_acl *i_acl; struct posix_acl *i_default_acl; const struct inode_operations *i_op; struct super_block *i_sb; struct address_space *i_mapping; void *i_security; unsigned long i_ino; union __anonunion____missing_field_name_314 __annonCompField59; dev_t i_rdev; loff_t i_size; struct timespec i_atime; struct timespec i_mtime; struct timespec i_ctime; spinlock_t i_lock; unsigned short i_bytes; unsigned int i_blkbits; blkcnt_t i_blocks; unsigned long i_state; struct rw_semaphore i_rwsem; unsigned long dirtied_when; unsigned long dirtied_time_when; struct hlist_node i_hash; struct list_head i_io_list; struct bdi_writeback *i_wb; int i_wb_frn_winner; u16 i_wb_frn_avg_time; u16 i_wb_frn_history; struct list_head i_lru; struct list_head i_sb_list; struct list_head i_wb_list; union __anonunion____missing_field_name_315 __annonCompField60; u64 i_version; atomic_t i_count; atomic_t i_dio_count; atomic_t i_writecount; atomic_t i_readcount; const struct file_operations *i_fop; struct file_lock_context *i_flctx; struct address_space i_data; struct list_head i_devices; union __anonunion____missing_field_name_316 __annonCompField61; __u32 i_generation; __u32 i_fsnotify_mask; struct hlist_head i_fsnotify_marks; struct fscrypt_info *i_crypt_info; void *i_private; } ; 797 struct fown_struct { rwlock_t lock; struct pid *pid; enum pid_type pid_type; kuid_t uid; kuid_t euid; int signum; } ; 805 struct file_ra_state { unsigned long start; unsigned int size; unsigned int async_size; unsigned int ra_pages; unsigned int mmap_miss; loff_t prev_pos; } ; 828 union __anonunion_f_u_317 { struct llist_node fu_llist; struct callback_head fu_rcuhead; } ; 828 struct file { union __anonunion_f_u_317 f_u; struct path f_path; struct inode *f_inode; const struct file_operations *f_op; spinlock_t f_lock; atomic_long_t f_count; unsigned int f_flags; fmode_t f_mode; struct mutex f_pos_lock; loff_t f_pos; struct fown_struct f_owner; const struct cred *f_cred; struct file_ra_state f_ra; u64 f_version; void *f_security; void *private_data; struct list_head f_ep_links; struct list_head f_tfile_llink; struct address_space *f_mapping; } ; 913 typedef void *fl_owner_t; 914 struct file_lock ; 915 struct file_lock_operations { void (*fl_copy_lock)(struct file_lock *, struct file_lock *); void (*fl_release_private)(struct file_lock *); } ; 921 struct lock_manager_operations { int (*lm_compare_owner)(struct file_lock *, struct file_lock *); unsigned long int (*lm_owner_key)(struct file_lock *); fl_owner_t (*lm_get_owner)(fl_owner_t ); void (*lm_put_owner)(fl_owner_t ); void (*lm_notify)(struct file_lock *); int (*lm_grant)(struct file_lock *, int); bool (*lm_break)(struct file_lock *); int (*lm_change)(struct file_lock *, int, struct list_head *); void (*lm_setup)(struct file_lock *, void **); } ; 948 struct nlm_lockowner ; 949 struct nfs_lock_info { u32 state; struct nlm_lockowner *owner; struct list_head list; } ; 14 struct nfs4_lock_state ; 15 struct nfs4_lock_info { struct nfs4_lock_state *owner; } ; 19 struct fasync_struct ; 19 struct __anonstruct_afs_319 { struct list_head link; int state; } ; 19 union __anonunion_fl_u_318 { struct nfs_lock_info nfs_fl; struct nfs4_lock_info nfs4_fl; struct __anonstruct_afs_319 afs; } ; 19 struct file_lock { struct file_lock *fl_next; struct list_head fl_list; struct hlist_node fl_link; struct list_head fl_block; fl_owner_t fl_owner; unsigned int fl_flags; unsigned char fl_type; unsigned int fl_pid; int fl_link_cpu; struct pid *fl_nspid; wait_queue_head_t fl_wait; struct file *fl_file; loff_t fl_start; loff_t fl_end; struct fasync_struct *fl_fasync; unsigned long fl_break_time; unsigned long fl_downgrade_time; const struct file_lock_operations *fl_ops; const struct lock_manager_operations *fl_lmops; union __anonunion_fl_u_318 fl_u; } ; 1001 struct file_lock_context { spinlock_t flc_lock; struct list_head flc_flock; struct list_head flc_posix; struct list_head flc_lease; } ; 1068 struct files_struct ; 1221 struct fasync_struct { spinlock_t fa_lock; int magic; int fa_fd; struct fasync_struct *fa_next; struct file *fa_file; struct callback_head fa_rcu; } ; 1256 struct sb_writers { int frozen; wait_queue_head_t wait_unfrozen; struct percpu_rw_semaphore rw_sem[3U]; } ; 1286 struct super_operations ; 1286 struct xattr_handler ; 1286 struct mtd_info ; 1286 struct super_block { struct list_head s_list; dev_t s_dev; unsigned char s_blocksize_bits; unsigned long s_blocksize; loff_t s_maxbytes; struct file_system_type *s_type; const struct super_operations *s_op; const struct dquot_operations *dq_op; const struct quotactl_ops *s_qcop; const struct export_operations *s_export_op; unsigned long s_flags; unsigned long s_iflags; unsigned long s_magic; struct dentry *s_root; struct rw_semaphore s_umount; int s_count; atomic_t s_active; void *s_security; const struct xattr_handler **s_xattr; const struct fscrypt_operations *s_cop; struct hlist_bl_head s_anon; struct list_head s_mounts; struct block_device *s_bdev; struct backing_dev_info *s_bdi; struct mtd_info *s_mtd; struct hlist_node s_instances; unsigned int s_quota_types; struct quota_info s_dquot; struct sb_writers s_writers; char s_id[32U]; u8 s_uuid[16U]; void *s_fs_info; unsigned int s_max_links; fmode_t s_mode; u32 s_time_gran; struct mutex s_vfs_rename_mutex; char *s_subtype; char *s_options; const struct dentry_operations *s_d_op; int cleancache_poolid; struct shrinker s_shrink; atomic_long_t s_remove_count; int s_readonly_remount; struct workqueue_struct *s_dio_done_wq; struct hlist_head s_pins; struct user_namespace *s_user_ns; struct list_lru s_dentry_lru; struct list_lru s_inode_lru; struct callback_head rcu; struct work_struct destroy_work; struct mutex s_sync_lock; int s_stack_depth; spinlock_t s_inode_list_lock; struct list_head s_inodes; spinlock_t s_inode_wblist_lock; struct list_head s_inodes_wb; } ; 1570 struct fiemap_extent_info { unsigned int fi_flags; unsigned int fi_extents_mapped; unsigned int fi_extents_max; struct fiemap_extent *fi_extents_start; } ; 1583 struct dir_context ; 1608 struct dir_context { int (*actor)(struct dir_context *, const char *, int, loff_t , u64 , unsigned int); loff_t pos; } ; 1615 struct file_operations { struct module *owner; loff_t (*llseek)(struct file *, loff_t , int); ssize_t (*read)(struct file *, char *, size_t , loff_t *); ssize_t (*write)(struct file *, const char *, size_t , loff_t *); ssize_t (*read_iter)(struct kiocb *, struct iov_iter *); ssize_t (*write_iter)(struct kiocb *, struct iov_iter *); int (*iterate)(struct file *, struct dir_context *); int (*iterate_shared)(struct file *, struct dir_context *); unsigned int (*poll)(struct file *, struct poll_table_struct *); long int (*unlocked_ioctl)(struct file *, unsigned int, unsigned long); long int (*compat_ioctl)(struct file *, unsigned int, unsigned long); int (*mmap)(struct file *, struct vm_area_struct *); int (*open)(struct inode *, struct file *); int (*flush)(struct file *, fl_owner_t ); int (*release)(struct inode *, struct file *); int (*fsync)(struct file *, loff_t , loff_t , int); int (*fasync)(int, struct file *, int); int (*lock)(struct file *, int, struct file_lock *); ssize_t (*sendpage)(struct file *, struct page *, int, size_t , loff_t *, int); unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); int (*check_flags)(int); int (*flock)(struct file *, int, struct file_lock *); ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t , unsigned int); ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t , unsigned int); int (*setlease)(struct file *, long, struct file_lock **, void **); long int (*fallocate)(struct file *, int, loff_t , loff_t ); void (*show_fdinfo)(struct seq_file *, struct file *); ssize_t (*copy_file_range)(struct file *, loff_t , struct file *, loff_t , size_t , unsigned int); int (*clone_file_range)(struct file *, loff_t , struct file *, loff_t , u64 ); ssize_t (*dedupe_file_range)(struct file *, u64 , u64 , struct file *, u64 ); } ; 1683 struct inode_operations { struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int); const char * (*get_link)(struct dentry *, struct inode *, struct delayed_call *); int (*permission)(struct inode *, int); struct posix_acl * (*get_acl)(struct inode *, int); int (*readlink)(struct dentry *, char *, int); int (*create)(struct inode *, struct dentry *, umode_t , bool ); int (*link)(struct dentry *, struct inode *, struct dentry *); int (*unlink)(struct inode *, struct dentry *); int (*symlink)(struct inode *, struct dentry *, const char *); int (*mkdir)(struct inode *, struct dentry *, umode_t ); int (*rmdir)(struct inode *, struct dentry *); int (*mknod)(struct inode *, struct dentry *, umode_t , dev_t ); int (*rename)(struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); int (*setattr)(struct dentry *, struct iattr *); int (*getattr)(struct vfsmount *, struct dentry *, struct kstat *); ssize_t (*listxattr)(struct dentry *, char *, size_t ); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 , u64 ); int (*update_time)(struct inode *, struct timespec *, int); int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t , int *); int (*tmpfile)(struct inode *, struct dentry *, umode_t ); int (*set_acl)(struct inode *, struct posix_acl *, int); } ; 1753 struct super_operations { struct inode * (*alloc_inode)(struct super_block *); void (*destroy_inode)(struct inode *); void (*dirty_inode)(struct inode *, int); int (*write_inode)(struct inode *, struct writeback_control *); int (*drop_inode)(struct inode *); void (*evict_inode)(struct inode *); void (*put_super)(struct super_block *); int (*sync_fs)(struct super_block *, int); int (*freeze_super)(struct super_block *); int (*freeze_fs)(struct super_block *); int (*thaw_super)(struct super_block *); int (*unfreeze_fs)(struct super_block *); int (*statfs)(struct dentry *, struct kstatfs *); int (*remount_fs)(struct super_block *, int *, char *); void (*umount_begin)(struct super_block *); int (*show_options)(struct seq_file *, struct dentry *); int (*show_devname)(struct seq_file *, struct dentry *); int (*show_path)(struct seq_file *, struct dentry *); int (*show_stats)(struct seq_file *, struct dentry *); ssize_t (*quota_read)(struct super_block *, int, char *, size_t , loff_t ); ssize_t (*quota_write)(struct super_block *, int, const char *, size_t , loff_t ); struct dquot ** (*get_dquots)(struct inode *); int (*bdev_try_to_free_page)(struct super_block *, struct page *, gfp_t ); long int (*nr_cached_objects)(struct super_block *, struct shrink_control *); long int (*free_cached_objects)(struct super_block *, struct shrink_control *); } ; 1995 struct file_system_type { const char *name; int fs_flags; struct dentry * (*mount)(struct file_system_type *, int, const char *, void *); void (*kill_sb)(struct super_block *); struct module *owner; struct file_system_type *next; struct hlist_head fs_supers; struct lock_class_key s_lock_key; struct lock_class_key s_umount_key; struct lock_class_key s_vfs_rename_key; struct lock_class_key s_writers_key[3U]; struct lock_class_key i_lock_key; struct lock_class_key i_mutex_key; struct lock_class_key i_mutex_dir_key; } ; 3167 struct assoc_array_ptr ; 3167 struct assoc_array { struct assoc_array_ptr *root; unsigned long nr_leaves_on_tree; } ; 31 typedef int32_t key_serial_t; 34 typedef uint32_t key_perm_t; 35 struct key ; 36 struct user_struct ; 37 struct signal_struct ; 38 struct key_type ; 42 struct keyring_index_key { struct key_type *type; const char *description; size_t desc_len; } ; 91 union key_payload { void *rcu_data0; void *data[4U]; } ; 128 union __anonunion____missing_field_name_320 { struct list_head graveyard_link; struct rb_node serial_node; } ; 128 struct key_user ; 128 union __anonunion____missing_field_name_321 { time_t expiry; time_t revoked_at; } ; 128 struct __anonstruct____missing_field_name_323 { struct key_type *type; char *description; } ; 128 union __anonunion____missing_field_name_322 { struct keyring_index_key index_key; struct __anonstruct____missing_field_name_323 __annonCompField64; } ; 128 struct __anonstruct____missing_field_name_325 { struct list_head name_link; struct assoc_array keys; } ; 128 union __anonunion____missing_field_name_324 { union key_payload payload; struct __anonstruct____missing_field_name_325 __annonCompField66; int reject_error; } ; 128 struct key { atomic_t usage; key_serial_t serial; union __anonunion____missing_field_name_320 __annonCompField62; struct rw_semaphore sem; struct key_user *user; void *security; union __anonunion____missing_field_name_321 __annonCompField63; time_t last_used_at; kuid_t uid; kgid_t gid; key_perm_t perm; unsigned short quotalen; unsigned short datalen; unsigned long flags; union __anonunion____missing_field_name_322 __annonCompField65; union __anonunion____missing_field_name_324 __annonCompField67; int (*restrict_link)(struct key *, const struct key_type *, const union key_payload *); } ; 377 struct audit_context ; 27 struct group_info { atomic_t usage; int ngroups; kgid_t gid[0U]; } ; 85 struct cred { atomic_t usage; atomic_t subscribers; void *put_addr; unsigned int magic; kuid_t uid; kgid_t gid; kuid_t suid; kgid_t sgid; kuid_t euid; kgid_t egid; kuid_t fsuid; kgid_t fsgid; unsigned int securebits; kernel_cap_t cap_inheritable; kernel_cap_t cap_permitted; kernel_cap_t cap_effective; kernel_cap_t cap_bset; kernel_cap_t cap_ambient; unsigned char jit_keyring; struct key *session_keyring; struct key *process_keyring; struct key *thread_keyring; struct key *request_key_auth; void *security; struct user_struct *user; struct user_namespace *user_ns; struct group_info *group_info; struct callback_head rcu; } ; 368 struct seq_file { char *buf; size_t size; size_t from; size_t count; size_t pad_until; loff_t index; loff_t read_pos; u64 version; struct mutex lock; const struct seq_operations *op; int poll_event; const struct file *file; void *private; } ; 30 struct seq_operations { void * (*start)(struct seq_file *, loff_t *); void (*stop)(struct seq_file *, void *); void * (*next)(struct seq_file *, void *, loff_t *); int (*show)(struct seq_file *, void *); } ; 222 struct pinctrl ; 223 struct pinctrl_state ; 194 struct dev_pin_info { struct pinctrl *p; struct pinctrl_state *default_state; struct pinctrl_state *init_state; struct pinctrl_state *sleep_state; struct pinctrl_state *idle_state; } ; 84 struct plist_node { int prio; struct list_head prio_list; struct list_head node_list; } ; 4 typedef unsigned long cputime_t; 26 struct sem_undo_list ; 26 struct sysv_sem { struct sem_undo_list *undo_list; } ; 26 struct sysv_shm { struct list_head shm_clist; } ; 24 struct __anonstruct_sigset_t_326 { unsigned long sig[1U]; } ; 24 typedef struct __anonstruct_sigset_t_326 sigset_t; 25 struct siginfo ; 17 typedef void __signalfn_t(int); 18 typedef __signalfn_t *__sighandler_t; 20 typedef void __restorefn_t(); 21 typedef __restorefn_t *__sigrestore_t; 38 union sigval { int sival_int; void *sival_ptr; } ; 10 typedef union sigval sigval_t; 11 struct __anonstruct__kill_328 { __kernel_pid_t _pid; __kernel_uid32_t _uid; } ; 11 struct __anonstruct__timer_329 { __kernel_timer_t _tid; int _overrun; char _pad[0U]; sigval_t _sigval; int _sys_private; } ; 11 struct __anonstruct__rt_330 { __kernel_pid_t _pid; __kernel_uid32_t _uid; sigval_t _sigval; } ; 11 struct __anonstruct__sigchld_331 { __kernel_pid_t _pid; __kernel_uid32_t _uid; int _status; __kernel_clock_t _utime; __kernel_clock_t _stime; } ; 11 struct __anonstruct__addr_bnd_334 { void *_lower; void *_upper; } ; 11 union __anonunion____missing_field_name_333 { struct __anonstruct__addr_bnd_334 _addr_bnd; __u32 _pkey; } ; 11 struct __anonstruct__sigfault_332 { void *_addr; short _addr_lsb; union __anonunion____missing_field_name_333 __annonCompField68; } ; 11 struct __anonstruct__sigpoll_335 { long _band; int _fd; } ; 11 struct __anonstruct__sigsys_336 { void *_call_addr; int _syscall; unsigned int _arch; } ; 11 union __anonunion__sifields_327 { int _pad[28U]; struct __anonstruct__kill_328 _kill; struct __anonstruct__timer_329 _timer; struct __anonstruct__rt_330 _rt; struct __anonstruct__sigchld_331 _sigchld; struct __anonstruct__sigfault_332 _sigfault; struct __anonstruct__sigpoll_335 _sigpoll; struct __anonstruct__sigsys_336 _sigsys; } ; 11 struct siginfo { int si_signo; int si_errno; int si_code; union __anonunion__sifields_327 _sifields; } ; 118 typedef struct siginfo siginfo_t; 22 struct sigpending { struct list_head list; sigset_t signal; } ; 274 struct sigaction { __sighandler_t sa_handler; unsigned long sa_flags; __sigrestore_t sa_restorer; sigset_t sa_mask; } ; 288 struct k_sigaction { struct sigaction sa; } ; 43 struct seccomp_filter ; 44 struct seccomp { int mode; struct seccomp_filter *filter; } ; 40 struct rt_mutex_waiter ; 41 struct rlimit { __kernel_ulong_t rlim_cur; __kernel_ulong_t rlim_max; } ; 11 struct timerqueue_node { struct rb_node node; ktime_t expires; } ; 12 struct timerqueue_head { struct rb_root head; struct timerqueue_node *next; } ; 50 struct hrtimer_clock_base ; 51 struct hrtimer_cpu_base ; 60 enum hrtimer_restart { HRTIMER_NORESTART = 0, HRTIMER_RESTART = 1 } ; 65 struct hrtimer { struct timerqueue_node node; ktime_t _softexpires; enum hrtimer_restart (*function)(struct hrtimer *); struct hrtimer_clock_base *base; u8 state; u8 is_rel; int start_pid; void *start_site; char start_comm[16U]; } ; 125 struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base; int index; clockid_t clockid; struct timerqueue_head active; ktime_t (*get_time)(); ktime_t offset; } ; 158 struct hrtimer_cpu_base { raw_spinlock_t lock; seqcount_t seq; struct hrtimer *running; unsigned int cpu; unsigned int active_bases; unsigned int clock_was_set_seq; bool migration_enabled; bool nohz_active; unsigned char in_hrtirq; unsigned char hres_active; unsigned char hang_detected; ktime_t expires_next; struct hrtimer *next_timer; unsigned int nr_events; unsigned int nr_retries; unsigned int nr_hangs; unsigned int max_hang_time; struct hrtimer_clock_base clock_base[4U]; } ; 12 enum kcov_mode { KCOV_MODE_DISABLED = 0, KCOV_MODE_TRACE = 1 } ; 17 struct task_io_accounting { u64 rchar; u64 wchar; u64 syscr; u64 syscw; u64 read_bytes; u64 write_bytes; u64 cancelled_write_bytes; } ; 45 struct latency_record { unsigned long backtrace[12U]; unsigned int count; unsigned long time; unsigned long max; } ; 41 struct percpu_ref ; 55 typedef void percpu_ref_func_t(struct percpu_ref *); 68 struct percpu_ref { atomic_long_t count; unsigned long percpu_count_ptr; percpu_ref_func_t *release; percpu_ref_func_t *confirm_switch; bool force_atomic; struct callback_head rcu; } ; 607 struct cgroup ; 14 struct bpf_prog ; 14 struct cgroup_bpf { struct bpf_prog *prog[3U]; struct bpf_prog *effective[3U]; } ; 44 struct cgroup_root ; 45 struct cgroup_subsys ; 46 struct cgroup_taskset ; 90 struct cgroup_file { struct kernfs_node *kn; } ; 91 struct cgroup_subsys_state { struct cgroup *cgroup; struct cgroup_subsys *ss; struct percpu_ref refcnt; struct cgroup_subsys_state *parent; struct list_head sibling; struct list_head children; int id; unsigned int flags; u64 serial_nr; atomic_t online_cnt; struct callback_head callback_head; struct work_struct destroy_work; } ; 142 struct css_set { atomic_t refcount; struct hlist_node hlist; struct list_head tasks; struct list_head mg_tasks; struct list_head cgrp_links; struct cgroup *dfl_cgrp; struct cgroup_subsys_state *subsys[13U]; struct list_head mg_preload_node; struct list_head mg_node; struct cgroup *mg_src_cgrp; struct cgroup *mg_dst_cgrp; struct css_set *mg_dst_cset; struct list_head e_cset_node[13U]; struct list_head task_iters; bool dead; struct callback_head callback_head; } ; 222 struct cgroup { struct cgroup_subsys_state self; unsigned long flags; int id; int level; int populated_cnt; struct kernfs_node *kn; struct cgroup_file procs_file; struct cgroup_file events_file; u16 subtree_control; u16 subtree_ss_mask; u16 old_subtree_control; u16 old_subtree_ss_mask; struct cgroup_subsys_state *subsys[13U]; struct cgroup_root *root; struct list_head cset_links; struct list_head e_csets[13U]; struct list_head pidlists; struct mutex pidlist_mutex; wait_queue_head_t offline_waitq; struct work_struct release_agent_work; struct cgroup_bpf bpf; int ancestor_ids[]; } ; 310 struct cgroup_root { struct kernfs_root *kf_root; unsigned int subsys_mask; int hierarchy_id; struct cgroup cgrp; int cgrp_ancestor_id_storage; atomic_t nr_cgrps; struct list_head root_list; unsigned int flags; struct idr cgroup_idr; char release_agent_path[4096U]; char name[64U]; } ; 349 struct cftype { char name[64U]; unsigned long private; size_t max_write_len; unsigned int flags; unsigned int file_offset; struct cgroup_subsys *ss; struct list_head node; struct kernfs_ops *kf_ops; u64 (*read_u64)(struct cgroup_subsys_state *, struct cftype *); s64 (*read_s64)(struct cgroup_subsys_state *, struct cftype *); int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); int (*write_u64)(struct cgroup_subsys_state *, struct cftype *, u64 ); int (*write_s64)(struct cgroup_subsys_state *, struct cftype *, s64 ); ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); struct lock_class_key lockdep_key; } ; 434 struct cgroup_subsys { struct cgroup_subsys_state * (*css_alloc)(struct cgroup_subsys_state *); int (*css_online)(struct cgroup_subsys_state *); void (*css_offline)(struct cgroup_subsys_state *); void (*css_released)(struct cgroup_subsys_state *); void (*css_free)(struct cgroup_subsys_state *); void (*css_reset)(struct cgroup_subsys_state *); int (*can_attach)(struct cgroup_taskset *); void (*cancel_attach)(struct cgroup_taskset *); void (*attach)(struct cgroup_taskset *); void (*post_attach)(); int (*can_fork)(struct task_struct *); void (*cancel_fork)(struct task_struct *); void (*fork)(struct task_struct *); void (*exit)(struct task_struct *); void (*free)(struct task_struct *); void (*bind)(struct cgroup_subsys_state *); bool early_init; bool implicit_on_dfl; bool broken_hierarchy; bool warned_broken_hierarchy; int id; const char *name; const char *legacy_name; struct cgroup_root *root; struct idr css_idr; struct list_head cfts; struct cftype *dfl_cftypes; struct cftype *legacy_cftypes; unsigned int depends_on; } ; 128 struct futex_pi_state ; 129 struct robust_list_head ; 130 struct bio_list ; 131 struct fs_struct ; 132 struct perf_event_context ; 133 struct blk_plug ; 134 struct nameidata ; 188 struct cfs_rq ; 189 struct task_group ; 515 struct sighand_struct { atomic_t count; struct k_sigaction action[64U]; spinlock_t siglock; wait_queue_head_t signalfd_wqh; } ; 563 struct pacct_struct { int ac_flag; long ac_exitcode; unsigned long ac_mem; cputime_t ac_utime; cputime_t ac_stime; unsigned long ac_minflt; unsigned long ac_majflt; } ; 571 struct cpu_itimer { cputime_t expires; cputime_t incr; u32 error; u32 incr_error; } ; 578 struct prev_cputime { cputime_t utime; cputime_t stime; raw_spinlock_t lock; } ; 603 struct task_cputime { cputime_t utime; cputime_t stime; unsigned long long sum_exec_runtime; } ; 619 struct task_cputime_atomic { atomic64_t utime; atomic64_t stime; atomic64_t sum_exec_runtime; } ; 641 struct thread_group_cputimer { struct task_cputime_atomic cputime_atomic; bool running; bool checking_timer; } ; 686 struct autogroup ; 687 struct tty_struct ; 687 struct taskstats ; 687 struct tty_audit_buf ; 687 struct signal_struct { atomic_t sigcnt; atomic_t live; int nr_threads; struct list_head thread_head; wait_queue_head_t wait_chldexit; struct task_struct *curr_target; struct sigpending shared_pending; int group_exit_code; int notify_count; struct task_struct *group_exit_task; int group_stop_count; unsigned int flags; unsigned char is_child_subreaper; unsigned char has_child_subreaper; int posix_timer_id; struct list_head posix_timers; struct hrtimer real_timer; struct pid *leader_pid; ktime_t it_real_incr; struct cpu_itimer it[2U]; struct thread_group_cputimer cputimer; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; struct pid *tty_old_pgrp; int leader; struct tty_struct *tty; struct autogroup *autogroup; seqlock_t stats_lock; cputime_t utime; cputime_t stime; cputime_t cutime; cputime_t cstime; cputime_t gtime; cputime_t cgtime; struct prev_cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; unsigned long cnvcsw; unsigned long cnivcsw; unsigned long min_flt; unsigned long maj_flt; unsigned long cmin_flt; unsigned long cmaj_flt; unsigned long inblock; unsigned long oublock; unsigned long cinblock; unsigned long coublock; unsigned long maxrss; unsigned long cmaxrss; struct task_io_accounting ioac; unsigned long long sum_sched_runtime; struct rlimit rlim[16U]; struct pacct_struct pacct; struct taskstats *stats; unsigned int audit_tty; struct tty_audit_buf *tty_audit_buf; bool oom_flag_origin; short oom_score_adj; short oom_score_adj_min; struct mm_struct *oom_mm; struct mutex cred_guard_mutex; } ; 863 struct user_struct { atomic_t __count; atomic_t processes; atomic_t sigpending; atomic_t inotify_watches; atomic_t inotify_devs; atomic_t fanotify_listeners; atomic_long_t epoll_watches; unsigned long mq_bytes; unsigned long locked_shm; unsigned long unix_inflight; atomic_long_t pipe_bufs; struct key *uid_keyring; struct key *session_keyring; struct hlist_node uidhash_node; kuid_t uid; atomic_long_t locked_vm; } ; 908 struct reclaim_state ; 909 struct sched_info { unsigned long pcount; unsigned long long run_delay; unsigned long long last_arrival; unsigned long long last_queued; } ; 924 struct task_delay_info { spinlock_t lock; unsigned int flags; u64 blkio_start; u64 blkio_delay; u64 swapin_delay; u32 blkio_count; u32 swapin_count; u64 freepages_start; u64 freepages_delay; u32 freepages_count; } ; 981 struct wake_q_node { struct wake_q_node *next; } ; 1226 struct io_context ; 1261 struct load_weight { unsigned long weight; u32 inv_weight; } ; 1269 struct sched_avg { u64 last_update_time; u64 load_sum; u32 util_sum; u32 period_contrib; unsigned long load_avg; unsigned long util_avg; } ; 1327 struct sched_statistics { u64 wait_start; u64 wait_max; u64 wait_count; u64 wait_sum; u64 iowait_count; u64 iowait_sum; u64 sleep_start; u64 sleep_max; s64 sum_sleep_runtime; u64 block_start; u64 block_max; u64 exec_max; u64 slice_max; u64 nr_migrations_cold; u64 nr_failed_migrations_affine; u64 nr_failed_migrations_running; u64 nr_failed_migrations_hot; u64 nr_forced_migrations; u64 nr_wakeups; u64 nr_wakeups_sync; u64 nr_wakeups_migrate; u64 nr_wakeups_local; u64 nr_wakeups_remote; u64 nr_wakeups_affine; u64 nr_wakeups_affine_attempts; u64 nr_wakeups_passive; u64 nr_wakeups_idle; } ; 1362 struct sched_entity { struct load_weight load; struct rb_node run_node; struct list_head group_node; unsigned int on_rq; u64 exec_start; u64 sum_exec_runtime; u64 vruntime; u64 prev_sum_exec_runtime; u64 nr_migrations; struct sched_statistics statistics; int depth; struct sched_entity *parent; struct cfs_rq *cfs_rq; struct cfs_rq *my_q; struct sched_avg avg; } ; 1399 struct rt_rq ; 1399 struct sched_rt_entity { struct list_head run_list; unsigned long timeout; unsigned long watchdog_stamp; unsigned int time_slice; unsigned short on_rq; unsigned short on_list; struct sched_rt_entity *back; struct sched_rt_entity *parent; struct rt_rq *rt_rq; struct rt_rq *my_q; } ; 1417 struct sched_dl_entity { struct rb_node rb_node; u64 dl_runtime; u64 dl_deadline; u64 dl_period; u64 dl_bw; s64 runtime; u64 deadline; unsigned int flags; int dl_throttled; int dl_boosted; int dl_yielded; struct hrtimer dl_timer; } ; 1481 struct tlbflush_unmap_batch { struct cpumask cpumask; bool flush_required; bool writable; } ; 1500 struct sched_class ; 1500 struct compat_robust_list_head ; 1500 struct numa_group ; 1500 struct kcov ; 1500 struct task_struct { struct thread_info thread_info; volatile long state; void *stack; atomic_t usage; unsigned int flags; unsigned int ptrace; struct llist_node wake_entry; int on_cpu; unsigned int cpu; unsigned int wakee_flips; unsigned long wakee_flip_decay_ts; struct task_struct *last_wakee; int wake_cpu; int on_rq; int prio; int static_prio; int normal_prio; unsigned int rt_priority; const struct sched_class *sched_class; struct sched_entity se; struct sched_rt_entity rt; struct task_group *sched_task_group; struct sched_dl_entity dl; struct hlist_head preempt_notifiers; unsigned int policy; int nr_cpus_allowed; cpumask_t cpus_allowed; unsigned long rcu_tasks_nvcsw; bool rcu_tasks_holdout; struct list_head rcu_tasks_holdout_list; int rcu_tasks_idle_cpu; struct sched_info sched_info; struct list_head tasks; struct plist_node pushable_tasks; struct rb_node pushable_dl_tasks; struct mm_struct *mm; struct mm_struct *active_mm; u32 vmacache_seqnum; struct vm_area_struct *vmacache[4U]; struct task_rss_stat rss_stat; int exit_state; int exit_code; int exit_signal; int pdeath_signal; unsigned long jobctl; unsigned int personality; unsigned char sched_reset_on_fork; unsigned char sched_contributes_to_load; unsigned char sched_migrated; unsigned char sched_remote_wakeup; unsigned char; unsigned char in_execve; unsigned char in_iowait; unsigned char restore_sigmask; unsigned char memcg_may_oom; unsigned char memcg_kmem_skip_account; unsigned char brk_randomized; unsigned long atomic_flags; struct restart_block restart_block; pid_t pid; pid_t tgid; struct task_struct *real_parent; struct task_struct *parent; struct list_head children; struct list_head sibling; struct task_struct *group_leader; struct list_head ptraced; struct list_head ptrace_entry; struct pid_link pids[3U]; struct list_head thread_group; struct list_head thread_node; struct completion *vfork_done; int *set_child_tid; int *clear_child_tid; cputime_t utime; cputime_t stime; cputime_t gtime; struct prev_cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; u64 start_time; u64 real_start_time; unsigned long min_flt; unsigned long maj_flt; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; const struct cred *ptracer_cred; const struct cred *real_cred; const struct cred *cred; char comm[16U]; struct nameidata *nameidata; struct sysv_sem sysvsem; struct sysv_shm sysvshm; unsigned long last_switch_count; struct fs_struct *fs; struct files_struct *files; struct nsproxy *nsproxy; struct signal_struct *signal; struct sighand_struct *sighand; sigset_t blocked; sigset_t real_blocked; sigset_t saved_sigmask; struct sigpending pending; unsigned long sas_ss_sp; size_t sas_ss_size; unsigned int sas_ss_flags; struct callback_head *task_works; struct audit_context *audit_context; kuid_t loginuid; unsigned int sessionid; struct seccomp seccomp; u32 parent_exec_id; u32 self_exec_id; spinlock_t alloc_lock; raw_spinlock_t pi_lock; struct wake_q_node wake_q; struct rb_root pi_waiters; struct rb_node *pi_waiters_leftmost; struct rt_mutex_waiter *pi_blocked_on; struct mutex_waiter *blocked_on; unsigned int irq_events; unsigned long hardirq_enable_ip; unsigned long hardirq_disable_ip; unsigned int hardirq_enable_event; unsigned int hardirq_disable_event; int hardirqs_enabled; int hardirq_context; unsigned long softirq_disable_ip; unsigned long softirq_enable_ip; unsigned int softirq_disable_event; unsigned int softirq_enable_event; int softirqs_enabled; int softirq_context; u64 curr_chain_key; int lockdep_depth; unsigned int lockdep_recursion; struct held_lock held_locks[48U]; gfp_t lockdep_reclaim_gfp; unsigned int in_ubsan; void *journal_info; struct bio_list *bio_list; struct blk_plug *plug; struct reclaim_state *reclaim_state; struct backing_dev_info *backing_dev_info; struct io_context *io_context; unsigned long ptrace_message; siginfo_t *last_siginfo; struct task_io_accounting ioac; u64 acct_rss_mem1; u64 acct_vm_mem1; cputime_t acct_timexpd; nodemask_t mems_allowed; seqcount_t mems_allowed_seq; int cpuset_mem_spread_rotor; int cpuset_slab_spread_rotor; struct css_set *cgroups; struct list_head cg_list; int closid; struct robust_list_head *robust_list; struct compat_robust_list_head *compat_robust_list; struct list_head pi_state_list; struct futex_pi_state *pi_state_cache; struct perf_event_context *perf_event_ctxp[2U]; struct mutex perf_event_mutex; struct list_head perf_event_list; struct mempolicy *mempolicy; short il_next; short pref_node_fork; int numa_scan_seq; unsigned int numa_scan_period; unsigned int numa_scan_period_max; int numa_preferred_nid; unsigned long numa_migrate_retry; u64 node_stamp; u64 last_task_numa_placement; u64 last_sum_exec_runtime; struct callback_head numa_work; struct list_head numa_entry; struct numa_group *numa_group; unsigned long *numa_faults; unsigned long total_numa_faults; unsigned long numa_faults_locality[3U]; unsigned long numa_pages_migrated; struct tlbflush_unmap_batch tlb_ubc; struct callback_head rcu; struct pipe_inode_info *splice_pipe; struct page_frag task_frag; struct task_delay_info *delays; int make_it_fail; int nr_dirtied; int nr_dirtied_pause; unsigned long dirty_paused_when; int latency_record_count; struct latency_record latency_record[32U]; u64 timer_slack_ns; u64 default_timer_slack_ns; unsigned int kasan_depth; unsigned long trace; unsigned long trace_recursion; enum kcov_mode kcov_mode; unsigned int kcov_size; void *kcov_area; struct kcov *kcov; struct mem_cgroup *memcg_in_oom; gfp_t memcg_oom_gfp_mask; int memcg_oom_order; unsigned int memcg_nr_pages_over_high; struct uprobe_task *utask; unsigned int sequential_io; unsigned int sequential_io_avg; unsigned long task_state_change; int pagefault_disabled; struct task_struct *oom_reaper_list; atomic_t stack_refcount; struct thread_struct thread; } ; 76 struct dma_map_ops ; 76 struct dev_archdata { struct dma_map_ops *dma_ops; void *iommu; } ; 24 struct device_private ; 25 struct device_driver ; 26 struct driver_private ; 27 struct class ; 28 struct subsys_private ; 29 struct bus_type ; 30 struct device_node ; 31 struct fwnode_handle ; 32 struct iommu_ops ; 33 struct iommu_group ; 34 struct iommu_fwspec ; 62 struct device_attribute ; 62 struct bus_type { const char *name; const char *dev_name; struct device *dev_root; struct device_attribute *dev_attrs; const struct attribute_group **bus_groups; const struct attribute_group **dev_groups; const struct attribute_group **drv_groups; int (*match)(struct device *, struct device_driver *); int (*uevent)(struct device *, struct kobj_uevent_env *); int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*online)(struct device *); int (*offline)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct dev_pm_ops *pm; const struct iommu_ops *iommu_ops; struct subsys_private *p; struct lock_class_key lock_key; } ; 143 struct device_type ; 202 enum probe_type { PROBE_DEFAULT_STRATEGY = 0, PROBE_PREFER_ASYNCHRONOUS = 1, PROBE_FORCE_SYNCHRONOUS = 2 } ; 208 struct of_device_id ; 208 struct acpi_device_id ; 208 struct device_driver { const char *name; struct bus_type *bus; struct module *owner; const char *mod_name; bool suppress_bind_attrs; enum probe_type probe_type; const struct of_device_id *of_match_table; const struct acpi_device_id *acpi_match_table; int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct attribute_group **groups; const struct dev_pm_ops *pm; struct driver_private *p; } ; 358 struct class_attribute ; 358 struct class { const char *name; struct module *owner; struct class_attribute *class_attrs; const struct attribute_group **class_groups; const struct attribute_group **dev_groups; struct kobject *dev_kobj; int (*dev_uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *); void (*class_release)(struct class *); void (*dev_release)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct kobj_ns_type_operations *ns_type; const void * (*namespace)(struct device *); const struct dev_pm_ops *pm; struct subsys_private *p; } ; 453 struct class_attribute { struct attribute attr; ssize_t (*show)(struct class *, struct class_attribute *, char *); ssize_t (*store)(struct class *, struct class_attribute *, const char *, size_t ); } ; 530 struct device_type { const char *name; const struct attribute_group **groups; int (*uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *, kuid_t *, kgid_t *); void (*release)(struct device *); const struct dev_pm_ops *pm; } ; 551 struct device_attribute { struct attribute attr; ssize_t (*show)(struct device *, struct device_attribute *, char *); ssize_t (*store)(struct device *, struct device_attribute *, const char *, size_t ); } ; 723 struct device_dma_parameters { unsigned int max_segment_size; unsigned long segment_boundary_mask; } ; 786 enum dl_dev_state { DL_DEV_NO_DRIVER = 0, DL_DEV_PROBING = 1, DL_DEV_DRIVER_BOUND = 2, DL_DEV_UNBINDING = 3 } ; 793 struct dev_links_info { struct list_head suppliers; struct list_head consumers; enum dl_dev_state status; } ; 813 struct irq_domain ; 813 struct dma_coherent_mem ; 813 struct cma ; 813 struct device { struct device *parent; struct device_private *p; struct kobject kobj; const char *init_name; const struct device_type *type; struct mutex mutex; struct bus_type *bus; struct device_driver *driver; void *platform_data; void *driver_data; struct dev_links_info links; struct dev_pm_info power; struct dev_pm_domain *pm_domain; struct irq_domain *msi_domain; struct dev_pin_info *pins; struct list_head msi_list; int numa_node; u64 *dma_mask; u64 coherent_dma_mask; unsigned long dma_pfn_offset; struct device_dma_parameters *dma_parms; struct list_head dma_pools; struct dma_coherent_mem *dma_mem; struct cma *cma_area; struct dev_archdata archdata; struct device_node *of_node; struct fwnode_handle *fwnode; dev_t devt; u32 id; spinlock_t devres_lock; struct list_head devres_head; struct klist_node knode_class; struct class *class; const struct attribute_group **groups; void (*release)(struct device *); struct iommu_group *iommu_group; struct iommu_fwspec *iommu_fwspec; bool offline_disabled; bool offline; } ; 971 struct wakeup_source { const char *name; struct list_head entry; spinlock_t lock; struct wake_irq *wakeirq; struct timer_list timer; unsigned long timer_expires; ktime_t total_time; ktime_t max_time; ktime_t last_time; ktime_t start_prevent_time; ktime_t prevent_sleep_time; unsigned long event_count; unsigned long active_count; unsigned long relax_count; unsigned long expire_count; unsigned long wakeup_count; bool active; bool autosleep_enabled; } ; 60 struct exception_table_entry { int insn; int fixup; int handler; } ; 103 struct pollfd { int fd; short events; short revents; } ; 32 struct poll_table_struct { void (*_qproc)(struct file *, wait_queue_head_t *, struct poll_table_struct *); unsigned long _key; } ; 652 struct cdev { struct kobject kobj; struct module *owner; const struct file_operations *ops; struct list_head list; dev_t dev; unsigned int count; } ; 25 struct __anonstruct_uuid_le_377 { __u8 b[16U]; } ; 25 typedef struct __anonstruct_uuid_le_377 uuid_le; 13 typedef unsigned long kernel_ulong_t; 187 struct acpi_device_id { __u8 id[9U]; kernel_ulong_t driver_data; __u32 cls; __u32 cls_msk; } ; 230 struct of_device_id { char name[32U]; char type[32U]; char compatible[128U]; const void *data; } ; 675 enum fwnode_type { FWNODE_INVALID = 0, FWNODE_OF = 1, FWNODE_ACPI = 2, FWNODE_ACPI_DATA = 3, FWNODE_ACPI_STATIC = 4, FWNODE_PDATA = 5, FWNODE_IRQCHIP = 6 } ; 685 struct fwnode_handle { enum fwnode_type type; struct fwnode_handle *secondary; } ; 32 typedef u32 phandle; 34 struct property { char *name; int length; void *value; struct property *next; unsigned long _flags; unsigned int unique_id; struct bin_attribute attr; } ; 44 struct device_node { const char *name; const char *type; phandle phandle; const char *full_name; struct fwnode_handle fwnode; struct property *properties; struct property *deadprops; struct device_node *parent; struct device_node *child; struct device_node *sibling; struct kobject kobj; unsigned long _flags; void *data; } ; 65 struct of_phandle_args { struct device_node *np; int args_count; uint32_t args[16U]; } ; 273 struct vm_fault { struct vm_area_struct *vma; unsigned int flags; gfp_t gfp_mask; unsigned long pgoff; unsigned long address; pmd_t *pmd; pte_t orig_pte; struct page *cow_page; struct mem_cgroup *memcg; struct page *page; pte_t *pte; spinlock_t *ptl; pgtable_t prealloc_pte; } ; 322 struct vm_operations_struct { void (*open)(struct vm_area_struct *); void (*close)(struct vm_area_struct *); int (*mremap)(struct vm_area_struct *); int (*fault)(struct vm_area_struct *, struct vm_fault *); int (*pmd_fault)(struct vm_area_struct *, unsigned long, pmd_t *, unsigned int); void (*map_pages)(struct vm_fault *, unsigned long, unsigned long); int (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *); int (*pfn_mkwrite)(struct vm_area_struct *, struct vm_fault *); int (*access)(struct vm_area_struct *, unsigned long, void *, int, int); const char * (*name)(struct vm_area_struct *); int (*set_policy)(struct vm_area_struct *, struct mempolicy *); struct mempolicy * (*get_policy)(struct vm_area_struct *, unsigned long); struct page * (*find_special_page)(struct vm_area_struct *, unsigned long); } ; 2439 struct scatterlist { unsigned long sg_magic; unsigned long page_link; unsigned int offset; unsigned int length; dma_addr_t dma_address; unsigned int dma_length; } ; 21 struct sg_table { struct scatterlist *sgl; unsigned int nents; unsigned int orig_nents; } ; 406 struct trace_enum_map { const char *system; const char *enum_string; unsigned long enum_value; } ; 96 enum dma_data_direction { DMA_BIDIRECTIONAL = 0, DMA_TO_DEVICE = 1, DMA_FROM_DEVICE = 2, DMA_NONE = 3 } ; 158 struct dma_map_ops { void * (*alloc)(struct device *, size_t , dma_addr_t *, gfp_t , unsigned long); void (*free)(struct device *, size_t , void *, dma_addr_t , unsigned long); int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t , size_t , unsigned long); int (*get_sgtable)(struct device *, struct sg_table *, void *, dma_addr_t , size_t , unsigned long); dma_addr_t (*map_page)(struct device *, struct page *, unsigned long, size_t , enum dma_data_direction , unsigned long); void (*unmap_page)(struct device *, dma_addr_t , size_t , enum dma_data_direction , unsigned long); int (*map_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , unsigned long); void (*unmap_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , unsigned long); dma_addr_t (*map_resource)(struct device *, phys_addr_t , size_t , enum dma_data_direction , unsigned long); void (*unmap_resource)(struct device *, dma_addr_t , size_t , enum dma_data_direction , unsigned long); void (*sync_single_for_cpu)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_single_for_device)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_sg_for_cpu)(struct device *, struct scatterlist *, int, enum dma_data_direction ); void (*sync_sg_for_device)(struct device *, struct scatterlist *, int, enum dma_data_direction ); int (*mapping_error)(struct device *, dma_addr_t ); int (*dma_supported)(struct device *, u64 ); int (*set_dma_mask)(struct device *, u64 ); int is_phys; } ; 160 struct iommu_domain ; 47 struct iommu_domain_geometry { dma_addr_t aperture_start; dma_addr_t aperture_end; bool force_aperture; } ; 54 struct iommu_domain { unsigned int type; const struct iommu_ops *ops; unsigned long pgsize_bitmap; int (*handler)(struct iommu_domain *, struct device *, unsigned long, int, void *); void *handler_token; struct iommu_domain_geometry geometry; void *iova_cookie; } ; 88 enum iommu_cap { IOMMU_CAP_CACHE_COHERENCY = 0, IOMMU_CAP_INTR_REMAP = 1, IOMMU_CAP_NOEXEC = 2 } ; 94 enum iommu_attr { DOMAIN_ATTR_GEOMETRY = 0, DOMAIN_ATTR_PAGING = 1, DOMAIN_ATTR_WINDOWS = 2, DOMAIN_ATTR_FSL_PAMU_STASH = 3, DOMAIN_ATTR_FSL_PAMU_ENABLE = 4, DOMAIN_ATTR_FSL_PAMUV1 = 5, DOMAIN_ATTR_NESTING = 6, DOMAIN_ATTR_MAX = 7 } ; 105 struct iommu_dm_region { struct list_head list; phys_addr_t start; size_t length; int prot; } ; 133 struct iommu_ops { bool (*capable)(enum iommu_cap ); struct iommu_domain * (*domain_alloc)(unsigned int); void (*domain_free)(struct iommu_domain *); int (*attach_dev)(struct iommu_domain *, struct device *); void (*detach_dev)(struct iommu_domain *, struct device *); int (*map)(struct iommu_domain *, unsigned long, phys_addr_t , size_t , int); size_t (*unmap)(struct iommu_domain *, unsigned long, size_t ); size_t (*map_sg)(struct iommu_domain *, unsigned long, struct scatterlist *, unsigned int, int); phys_addr_t (*iova_to_phys)(struct iommu_domain *, dma_addr_t ); int (*add_device)(struct device *); void (*remove_device)(struct device *); struct iommu_group * (*device_group)(struct device *); int (*domain_get_attr)(struct iommu_domain *, enum iommu_attr , void *); int (*domain_set_attr)(struct iommu_domain *, enum iommu_attr , void *); void (*get_dm_regions)(struct device *, struct list_head *); void (*put_dm_regions)(struct device *, struct list_head *); void (*apply_dm_region)(struct device *, struct iommu_domain *, struct iommu_dm_region *); int (*domain_window_enable)(struct iommu_domain *, u32 , phys_addr_t , u64 , int); void (*domain_window_disable)(struct iommu_domain *, u32 ); int (*domain_set_windows)(struct iommu_domain *, u32 ); u32 (*domain_get_windows)(struct iommu_domain *); int (*of_xlate)(struct device *, struct of_phandle_args *); unsigned long pgsize_bitmap; } ; 334 struct iommu_fwspec { const struct iommu_ops *ops; struct fwnode_handle *iommu_fwnode; void *iommu_priv; unsigned int num_ids; u32 ids[1U]; } ; 139 struct vfio_device_info { __u32 argsz; __u32 flags; __u32 num_regions; __u32 num_irqs; } ; 204 struct vfio_region_info { __u32 argsz; __u32 flags; __u32 index; __u32 cap_offset; __u64 size; __u64 offset; } ; 292 struct vfio_irq_info { __u32 argsz; __u32 flags; __u32 index; __u32 count; } ; 343 struct vfio_irq_set { __u32 argsz; __u32 flags; __u32 index; __u32 start; __u32 count; __u8 data[]; } ; 174 struct eventfd_ctx ; 88 struct parent_ops ; 88 struct parent_device { struct device *dev; const struct parent_ops *ops; struct kref ref; struct mutex lock; struct list_head next; struct kset *mdev_types_kset; struct list_head type_list; } ; 28 struct mdev_device { struct device dev; struct parent_device *parent; uuid_le uuid; void *driver_data; struct kref ref; struct list_head next; struct kobject *type_kobj; } ; 41 struct parent_ops { struct module *owner; const struct attribute_group **dev_attr_groups; const struct attribute_group **mdev_attr_groups; struct attribute_group **supported_type_groups; int (*create)(struct kobject *, struct mdev_device *); int (*remove)(struct mdev_device *); int (*open)(struct mdev_device *); void (*release)(struct mdev_device *); ssize_t (*read)(struct mdev_device *, char *, size_t , loff_t *); ssize_t (*write)(struct mdev_device *, const char *, size_t , loff_t *); ssize_t (*ioctl)(struct mdev_device *, unsigned int, unsigned long); int (*mmap)(struct mdev_device *, struct vm_area_struct *); } ; 110 struct mdev_type_attribute { struct attribute attr; ssize_t (*show)(struct kobject *, struct device *, char *); ssize_t (*store)(struct kobject *, struct device *, const char *, size_t ); } ; 41 struct mtty_dev { dev_t vd_devt; struct class *vd_class; struct cdev vd_cdev; struct idr vd_idr; struct device dev; } ; 83 struct mdev_region_info { u64 start; u64 phys_start; u32 size; u64 vfio_offset; } ; 90 struct rxtx { u8 fifo[16U]; u8 head; u8 tail; u8 count; } ; 121 struct serial_port { u8 uart_reg[8U]; struct rxtx rxtx; bool dlab; bool overrun; u16 divisor; u8 fcr; u8 max_fifo_size; u8 intr_trigger_level; } ; 132 struct mdev_state { int irq_fd; struct eventfd_ctx *intx_evtfd; struct eventfd_ctx *msi_evtfd; int irq_index; u8 *vconfig; struct mutex ops_lock; struct mdev_device *mdev; struct mdev_region_info region_info[9U]; u32 bar_mask[9U]; struct list_head next; struct serial_port s[2U]; struct mutex rxtx_lock; struct vfio_device_info dev_info; int nr_ports; } ; 67 typedef int ldv_func_ret_type___0; 1 void * __builtin_memcpy(void *, const void *, unsigned long); 1 unsigned long int __builtin_object_size(void *, int); 1 long int __builtin_expect(long, long); 266 void __write_once_size(volatile void *p, void *res, int size); 34 extern struct module __this_module; 178 int printk(const char *, ...); 63 void __dynamic_dev_dbg(struct _ddebug *, const struct device *, const char *, ...); 259 void __might_fault(const char *, int); 413 int sprintf(char *, const char *, ...); 416 int snprintf(char *, size_t , const char *, ...); 3 bool ldv_is_err(const void *ptr); 6 long int ldv_ptr_err(const void *ptr); 11 void * ldv_create_class(); 13 void ldv_unregister_class(); 16 int ldv_register_chrdev_region(); 17 void ldv_unregister_chrdev_region(); 25 void INIT_LIST_HEAD(struct list_head *list); 32 bool __list_add_valid(struct list_head *, struct list_head *, struct list_head *); 35 bool __list_del_entry_valid(struct list_head *); 55 void __list_add(struct list_head *new, struct list_head *prev, struct list_head *next); 76 void list_add(struct list_head *new, struct list_head *head); 102 void __list_del(struct list_head *prev, struct list_head *next); 114 void __list_del_entry(struct list_head *entry); 122 void list_del(struct list_head *entry); 66 void warn_slowpath_fmt(const char *, const int, const char *, ...); 12 void * memdup_user(const void *, size_t ); 32 void * __memcpy(void *, const void *, size_t ); 57 void * __memset(void *, int, size_t ); 63 int memcmp(const void *, const void *, size_t ); 67 int strcmp(const char *, const char *); 32 long int PTR_ERR(const void *ptr); 41 bool IS_ERR(const void *ptr); 89 void __check_object_size(const void *, unsigned long, bool ); 92 void check_object_size(const void *ptr, unsigned long n, bool to_user); 130 void __mutex_init(struct mutex *, const char *, struct lock_class_key *); 152 void mutex_lock_nested(struct mutex *, unsigned int); 188 void mutex_unlock(struct mutex *); 113 void idr_destroy(struct idr *); 114 void idr_init(struct idr *); 87 const char * kobject_name(const struct kobject *kobj); 2427 int alloc_chrdev_region(dev_t *, unsigned int, unsigned int, const char *); 2430 int ldv_alloc_chrdev_region_7(dev_t *ldv_func_arg1, unsigned int ldv_func_arg2, unsigned int ldv_func_arg3, const char *ldv_func_arg4); 2438 void unregister_chrdev_region(dev_t , unsigned int); 2441 void ldv_unregister_chrdev_region_9(dev_t ldv_func_arg1, unsigned int ldv_func_arg2); 2445 void ldv_unregister_chrdev_region_10(dev_t ldv_func_arg1, unsigned int ldv_func_arg2); 522 void class_destroy(struct class *); 525 void ldv_class_destroy_8(struct class *ldv_func_arg1); 529 void ldv_class_destroy_11(struct class *ldv_func_arg1); 974 const char * dev_name(const struct device *dev); 984 int dev_set_name(struct device *, const char *, ...); 1112 int device_register(struct device *); 1113 void device_unregister(struct device *); 1223 const char * dev_driver_string(const struct device *); 5 void kasan_check_read(const void *, unsigned int); 6 void kasan_check_write(const void *, unsigned int); 678 unsigned long int _copy_from_user(void *, const void *, unsigned int); 680 unsigned long int _copy_to_user(void *, const void *, unsigned int); 686 void copy_user_overflow(int size, unsigned long count); 692 unsigned long int copy_from_user(void *to, const void *from, unsigned long n); 712 unsigned long int copy_to_user(void *to, const void *from, unsigned long n); 154 void kfree(const void *); 330 void * __kmalloc(size_t , gfp_t ); 478 void * kmalloc(size_t size, gfp_t flags); 634 void * kzalloc(size_t size, gfp_t flags); 21 void cdev_init(struct cdev *, const struct file_operations *); 27 int cdev_add(struct cdev *, dev_t , unsigned int); 29 void cdev_del(struct cdev *); 27 int uuid_le_cmp(const uuid_le u1, const uuid_le u2); 148 int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *, int, int, size_t *); 147 void * mdev_get_drvdata(struct mdev_device *mdev); 152 void mdev_set_drvdata(struct mdev_device *mdev, void *data); 161 int mdev_register_device(struct device *, const struct parent_ops *); 163 void mdev_unregister_device(struct device *); 34 void eventfd_ctx_put(struct eventfd_ctx *); 36 struct eventfd_ctx * eventfd_ctx_fdget(int); 38 __u64 eventfd_signal(struct eventfd_ctx *, __u64 ); 82 struct mtty_dev mtty_dev = { }; 151 struct mutex mdev_list_lock = { }; 152 struct list_head mdev_devices_list = { }; 154 const struct file_operations vd_fops = { &__this_module, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; 160 int mtty_trigger_interrupt(uuid_le uuid); 163 struct mdev_state * find_mdev_state_by_uuid(uuid_le uuid); 175 void dump_buffer(char *buf, uint32_t count); 189 void mtty_create_config_space(struct mdev_state *mdev_state); 253 void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset, char *buf, u32 count); 307 void handle_bar_write(unsigned int index, struct mdev_state *mdev_state, u16 offset, char *buf, u32 count); 475 void handle_bar_read(unsigned int index, struct mdev_state *mdev_state, u16 offset, char *buf, u32 count); 613 void mdev_read_base(struct mdev_state *mdev_state); 650 ssize_t mdev_access(struct mdev_device *mdev, char *buf, size_t count, loff_t pos, bool is_write); 727 int mtty_create(struct kobject *kobj, struct mdev_device *mdev); 777 int mtty_remove(struct mdev_device *mdev); 799 int mtty_reset(struct mdev_device *mdev); 815 ssize_t mtty_read(struct mdev_device *mdev, char *buf, size_t count, loff_t *ppos); 874 ssize_t mtty_write(struct mdev_device *mdev, const char *buf, size_t count, loff_t *ppos); 931 int mtty_set_irqs(struct mdev_device *mdev, uint32_t flags, unsigned int index, unsigned int start, unsigned int count, void *data); 1067 int mtty_get_region_info(struct mdev_device *mdev, struct vfio_region_info *region_info, u16 *cap_type_id, void **cap_type); 1113 int mtty_get_irq_info(struct mdev_device *mdev, struct vfio_irq_info *irq_info); 1137 int mtty_get_device_info(struct mdev_device *mdev, struct vfio_device_info *dev_info); 1147 long int mtty_ioctl(struct mdev_device *mdev, unsigned int cmd, unsigned long arg); 1263 int mtty_open(struct mdev_device *mdev); 1269 void mtty_close(struct mdev_device *mdev); 1275 ssize_t sample_mtty_dev_show(struct device *dev, struct device_attribute *attr, char *buf); 1281 struct device_attribute dev_attr_sample_mtty_dev = { { "sample_mtty_dev", 292U, (_Bool)0, 0, { { { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 } } } }, &sample_mtty_dev_show, 0 }; 1283 struct attribute *mtty_dev_attrs[2U] = { &(dev_attr_sample_mtty_dev.attr), (struct attribute *)0 }; 1288 const struct attribute_group mtty_dev_group = { "mtty_dev", 0, 0, (struct attribute **)(&mtty_dev_attrs), 0 }; 1293 const struct attribute_group *mtty_dev_groups[2U] = { &mtty_dev_group, (const struct attribute_group *)0 }; 1299 ssize_t sample_mdev_dev_show(struct device *dev, struct device_attribute *attr, char *buf); 1310 struct device_attribute dev_attr_sample_mdev_dev = { { "sample_mdev_dev", 292U, (_Bool)0, 0, { { { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 } } } }, &sample_mdev_dev_show, 0 }; 1312 struct attribute *mdev_dev_attrs[2U] = { &(dev_attr_sample_mdev_dev.attr), (struct attribute *)0 }; 1317 const struct attribute_group mdev_dev_group = { "vendor", 0, 0, (struct attribute **)(&mdev_dev_attrs), 0 }; 1322 const struct attribute_group *mdev_dev_groups[2U] = { &mdev_dev_group, (const struct attribute_group *)0 }; 1328 ssize_t name_show(struct kobject *kobj, struct device *dev, char *buf); 1344 struct mdev_type_attribute mdev_type_attr_name = { { "name", 292U, (_Bool)0, 0, { { { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 } } } }, &name_show, 0 }; 1347 ssize_t available_instances_show(struct kobject *kobj, struct device *dev, char *buf); 1372 struct mdev_type_attribute mdev_type_attr_available_instances = { { "available_instances", 292U, (_Bool)0, 0, { { { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 } } } }, &available_instances_show, 0 }; 1375 ssize_t device_api_show(struct kobject *kobj, struct device *dev, char *buf); 1381 struct mdev_type_attribute mdev_type_attr_device_api = { { "device_api", 292U, (_Bool)0, 0, { { { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 } } } }, &device_api_show, 0 }; 1383 struct attribute *mdev_types_attrs[4U] = { &(mdev_type_attr_name.attr), &(mdev_type_attr_device_api.attr), &(mdev_type_attr_available_instances.attr), (struct attribute *)0 }; 1390 struct attribute_group mdev_type_group1 = { "1", 0, 0, (struct attribute **)(&mdev_types_attrs), 0 }; 1395 struct attribute_group mdev_type_group2 = { "2", 0, 0, (struct attribute **)(&mdev_types_attrs), 0 }; 1400 struct attribute_group *mdev_type_groups[3U] = { &mdev_type_group1, &mdev_type_group2, (struct attribute_group *)0 }; 1406 struct parent_ops mdev_fops = { &__this_module, (const struct attribute_group **)(&mtty_dev_groups), (const struct attribute_group **)(&mdev_dev_groups), (struct attribute_group **)(&mdev_type_groups), &mtty_create, &mtty_remove, &mtty_open, &mtty_close, &mtty_read, &mtty_write, &mtty_ioctl, 0 }; 1420 void mtty_device_release(struct device *dev); 1425 int mtty_dev_init(); 1484 void mtty_dev_exit(); 1522 void ldv_check_final_state(); 1525 void ldv_check_return_value(int); 1531 void ldv_initialize(); 1534 void ldv_handler_precall(); 1537 int nondet_int(); 1540 int LDV_IN_INTERRUPT = 0; 1543 void ldv_main0_sequence_infinite_withcheck_stateful(); 10 void ldv_error(); 20 void ldv_stop(); 25 int ldv_undef_int(); 26 void * ldv_undef_ptr(); 39 int ldv_undef_int_nonpositive(); 14 void * ldv_err_ptr(long error); 28 bool ldv_is_err_or_null(const void *ptr); 28 int ldv_usb_gadget_class = 0; 31 int ldv_usb_gadget_chrdev = 0; 34 int ldv_usb_gadget = 0; 61 int ldv_register_class(); 95 int ldv_register_chrdev(int major); 158 int ldv_register_usb_gadget(); 179 void ldv_unregister_usb_gadget(); return ; } { 1545 struct kobject *var_group1; 1546 struct mdev_device *var_group2; 1547 int res_mtty_open_19; 1548 char *var_mtty_read_11_p1; 1549 unsigned long var_mtty_read_11_p2; 1550 loff_t *var_mtty_read_11_p3; 1551 const char *var_mtty_write_12_p1; 1552 unsigned long var_mtty_write_12_p2; 1553 loff_t *var_mtty_write_12_p3; 1554 unsigned int var_mtty_ioctl_18_p1; 1555 unsigned long var_mtty_ioctl_18_p2; 1556 int ldv_s_mdev_fops_parent_ops; 1557 int tmp; 1558 int tmp___0; 1559 int tmp___1; 1971 ldv_s_mdev_fops_parent_ops = 0; 1909 LDV_IN_INTERRUPT = 1; 1918 ldv_initialize() { /* Function call is skipped due to function is undefined */} 1968 ldv_handler_precall() { /* Function call is skipped due to function is undefined */} { 1427 int ret; 1428 void *tmp; 1429 _Bool tmp___0; 1430 int tmp___1; 1431 struct lock_class_key __key; 1427 ret = 0; 1429 printk("\016mtty_dev: %s\n", "mtty_dev_init") { /* Function call is skipped due to function is undefined */} 1431 __memset((void *)(&mtty_dev), 0, 1928UL) { /* Function call is skipped due to function is undefined */} 1433 idr_init(&(mtty_dev.vd_idr)) { /* Function call is skipped due to function is undefined */} { 68 int ldv_func_res; 69 int tmp; 70 int tmp___0; 69 tmp = alloc_chrdev_region(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4) { /* Function call is skipped due to function is undefined */} 69 ldv_func_res = tmp; { } 126 int is_reg; { 41 int ret; 42 int tmp; 41 tmp = ldv_undef_int() { /* Function call is skipped due to function is undefined */} 41 ret = tmp; } 139 ldv_usb_gadget_chrdev = 1; } 1442 cdev_init(&(mtty_dev.vd_cdev), &vd_fops) { /* Function call is skipped due to function is undefined */} 1443 cdev_add(&(mtty_dev.vd_cdev), mtty_dev.vd_devt, 1048575U) { /* Function call is skipped due to function is undefined */} 1445 printk("\016major_number:%d\n", (mtty_dev.vd_devt) >> 20) { /* Function call is skipped due to function is undefined */} { 40 void *is_got; 43 is_got = ldv_undef_ptr() { /* Function call is skipped due to function is undefined */} } 1447 mtty_dev.vd_class = (struct class *)tmp; 1450 printk("\vError: failed to register mtty_dev class\n") { /* Function call is skipped due to function is undefined */} 1451 goto failed1; 1477 cdev_del(&(mtty_dev.vd_cdev)) { /* Function call is skipped due to function is undefined */} { 87 unregister_chrdev_region(ldv_func_arg1, ldv_func_arg2) { /* Function call is skipped due to function is undefined */} } 1479 all_done:; } 1975 goto ldv_37281; 1975 tmp___1 = nondet_int() { /* Function call is skipped due to function is undefined */} 1983 ldv_module_exit:; 2475 ldv_handler_precall() { /* Function call is skipped due to function is undefined */} { } 1486 mtty_dev.dev.bus = (struct bus_type *)0; 1487 mdev_unregister_device(&(mtty_dev.dev)) { /* Function call is skipped due to function is undefined */} 1489 device_unregister(&(mtty_dev.dev)) { /* Function call is skipped due to function is undefined */} 1490 idr_destroy(&(mtty_dev.vd_idr)) { /* Function call is skipped due to function is undefined */} 1491 cdev_del(&(mtty_dev.vd_cdev)) { /* Function call is skipped due to function is undefined */} { } 95 unregister_chrdev_region(ldv_func_arg1, ldv_func_arg2) { /* Function call is skipped due to function is undefined */} { }} | Source code 1
2 /*
3 * Mediated virtual PCI serial host device driver
4 *
5 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
6 * Author: Neo Jia <cjia@nvidia.com>
7 * Kirti Wankhede <kwankhede@nvidia.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * Sample driver that creates mdev device that simulates serial port over PCI
14 * card.
15 *
16 */
17
18 #include <linux/init.h>
19 #include <linux/module.h>
20 #include <linux/device.h>
21 #include <linux/kernel.h>
22 #include <linux/fs.h>
23 #include <linux/poll.h>
24 #include <linux/slab.h>
25 #include <linux/cdev.h>
26 #include <linux/sched.h>
27 #include <linux/wait.h>
28 #include <linux/uuid.h>
29 #include <linux/vfio.h>
30 #include <linux/iommu.h>
31 #include <linux/sysfs.h>
32 #include <linux/ctype.h>
33 #include <linux/file.h>
34 #include <linux/mdev.h>
35 #include <linux/pci.h>
36 #include <linux/serial.h>
37 #include <uapi/linux/serial_reg.h>
38 #include <linux/eventfd.h>
39 /*
40 * #defines
41 */
42
43 #define VERSION_STRING "0.1"
44 #define DRIVER_AUTHOR "NVIDIA Corporation"
45
46 #define MTTY_CLASS_NAME "mtty"
47
48 #define MTTY_NAME "mtty"
49
50 #define MTTY_STRING_LEN 16
51
52 #define MTTY_CONFIG_SPACE_SIZE 0xff
53 #define MTTY_IO_BAR_SIZE 0x8
54 #define MTTY_MMIO_BAR_SIZE 0x100000
55
56 #define STORE_LE16(addr, val) (*(u16 *)addr = val)
57 #define STORE_LE32(addr, val) (*(u32 *)addr = val)
58
59 #define MAX_FIFO_SIZE 16
60
61 #define CIRCULAR_BUF_INC_IDX(idx) (idx = (idx + 1) & (MAX_FIFO_SIZE - 1))
62
63 #define MTTY_VFIO_PCI_OFFSET_SHIFT 40
64
65 #define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off) (off >> MTTY_VFIO_PCI_OFFSET_SHIFT)
66 #define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \
67 ((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT)
68 #define MTTY_VFIO_PCI_OFFSET_MASK \
69 (((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1)
70 #define MAX_MTTYS 24
71
72 /*
73 * Global Structures
74 */
75
76 struct mtty_dev {
77 dev_t vd_devt;
78 struct class *vd_class;
79 struct cdev vd_cdev;
80 struct idr vd_idr;
81 struct device dev;
82 } mtty_dev;
83
84 struct mdev_region_info {
85 u64 start;
86 u64 phys_start;
87 u32 size;
88 u64 vfio_offset;
89 };
90
91 #if defined(DEBUG_REGS)
92 const char *wr_reg[] = {
93 "TX",
94 "IER",
95 "FCR",
96 "LCR",
97 "MCR",
98 "LSR",
99 "MSR",
100 "SCR"
101 };
102
103 const char *rd_reg[] = {
104 "RX",
105 "IER",
106 "IIR",
107 "LCR",
108 "MCR",
109 "LSR",
110 "MSR",
111 "SCR"
112 };
113 #endif
114
115 /* loop back buffer */
116 struct rxtx {
117 u8 fifo[MAX_FIFO_SIZE];
118 u8 head, tail;
119 u8 count;
120 };
121
122 struct serial_port {
123 u8 uart_reg[8]; /* 8 registers */
124 struct rxtx rxtx; /* loop back buffer */
125 bool dlab;
126 bool overrun;
127 u16 divisor;
128 u8 fcr; /* FIFO control register */
129 u8 max_fifo_size;
130 u8 intr_trigger_level; /* interrupt trigger level */
131 };
132
133 /* State of each mdev device */
134 struct mdev_state {
135 int irq_fd;
136 struct eventfd_ctx *intx_evtfd;
137 struct eventfd_ctx *msi_evtfd;
138 int irq_index;
139 u8 *vconfig;
140 struct mutex ops_lock;
141 struct mdev_device *mdev;
142 struct mdev_region_info region_info[VFIO_PCI_NUM_REGIONS];
143 u32 bar_mask[VFIO_PCI_NUM_REGIONS];
144 struct list_head next;
145 struct serial_port s[2];
146 struct mutex rxtx_lock;
147 struct vfio_device_info dev_info;
148 int nr_ports;
149 };
150
151 struct mutex mdev_list_lock;
152 struct list_head mdev_devices_list;
153
154 static const struct file_operations vd_fops = {
155 .owner = THIS_MODULE,
156 };
157
158 /* function prototypes */
159
160 static int mtty_trigger_interrupt(uuid_le uuid);
161
162 /* Helper functions */
163 static struct mdev_state *find_mdev_state_by_uuid(uuid_le uuid)
164 {
165 struct mdev_state *mds;
166
167 list_for_each_entry(mds, &mdev_devices_list, next) {
168 if (uuid_le_cmp(mds->mdev->uuid, uuid) == 0)
169 return mds;
170 }
171
172 return NULL;
173 }
174
175 void dump_buffer(char *buf, uint32_t count)
176 {
177 #if defined(DEBUG)
178 int i;
179
180 pr_info("Buffer:\n");
181 for (i = 0; i < count; i++) {
182 pr_info("%2x ", *(buf + i));
183 if ((i + 1) % 16 == 0)
184 pr_info("\n");
185 }
186 #endif
187 }
188
189 static void mtty_create_config_space(struct mdev_state *mdev_state)
190 {
191 /* PCI dev ID */
192 STORE_LE32((u32 *) &mdev_state->vconfig[0x0], 0x32534348);
193
194 /* Control: I/O+, Mem-, BusMaster- */
195 STORE_LE16((u16 *) &mdev_state->vconfig[0x4], 0x0001);
196
197 /* Status: capabilities list absent */
198 STORE_LE16((u16 *) &mdev_state->vconfig[0x6], 0x0200);
199
200 /* Rev ID */
201 mdev_state->vconfig[0x8] = 0x10;
202
203 /* programming interface class : 16550-compatible serial controller */
204 mdev_state->vconfig[0x9] = 0x02;
205
206 /* Sub class : 00 */
207 mdev_state->vconfig[0xa] = 0x00;
208
209 /* Base class : Simple Communication controllers */
210 mdev_state->vconfig[0xb] = 0x07;
211
212 /* base address registers */
213 /* BAR0: IO space */
214 STORE_LE32((u32 *) &mdev_state->vconfig[0x10], 0x000001);
215 mdev_state->bar_mask[0] = ~(MTTY_IO_BAR_SIZE) + 1;
216
217 if (mdev_state->nr_ports == 2) {
218 /* BAR1: IO space */
219 STORE_LE32((u32 *) &mdev_state->vconfig[0x14], 0x000001);
220 mdev_state->bar_mask[1] = ~(MTTY_IO_BAR_SIZE) + 1;
221 }
222
223 /* Subsystem ID */
224 STORE_LE32((u32 *) &mdev_state->vconfig[0x2c], 0x32534348);
225
226 mdev_state->vconfig[0x34] = 0x00; /* Cap Ptr */
227 mdev_state->vconfig[0x3d] = 0x01; /* interrupt pin (INTA#) */
228
229 /* Vendor specific data */
230 mdev_state->vconfig[0x40] = 0x23;
231 mdev_state->vconfig[0x43] = 0x80;
232 mdev_state->vconfig[0x44] = 0x23;
233 mdev_state->vconfig[0x48] = 0x23;
234 mdev_state->vconfig[0x4c] = 0x23;
235
236 mdev_state->vconfig[0x60] = 0x50;
237 mdev_state->vconfig[0x61] = 0x43;
238 mdev_state->vconfig[0x62] = 0x49;
239 mdev_state->vconfig[0x63] = 0x20;
240 mdev_state->vconfig[0x64] = 0x53;
241 mdev_state->vconfig[0x65] = 0x65;
242 mdev_state->vconfig[0x66] = 0x72;
243 mdev_state->vconfig[0x67] = 0x69;
244 mdev_state->vconfig[0x68] = 0x61;
245 mdev_state->vconfig[0x69] = 0x6c;
246 mdev_state->vconfig[0x6a] = 0x2f;
247 mdev_state->vconfig[0x6b] = 0x55;
248 mdev_state->vconfig[0x6c] = 0x41;
249 mdev_state->vconfig[0x6d] = 0x52;
250 mdev_state->vconfig[0x6e] = 0x54;
251 }
252
253 static void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset,
254 char *buf, u32 count)
255 {
256 u32 cfg_addr, bar_mask, bar_index = 0;
257
258 switch (offset) {
259 case 0x04: /* device control */
260 case 0x06: /* device status */
261 /* do nothing */
262 break;
263 case 0x3c: /* interrupt line */
264 mdev_state->vconfig[0x3c] = buf[0];
265 break;
266 case 0x3d:
267 /*
268 * Interrupt Pin is hardwired to INTA.
269 * This field is write protected by hardware
270 */
271 break;
272 case 0x10: /* BAR0 */
273 case 0x14: /* BAR1 */
274 if (offset == 0x10)
275 bar_index = 0;
276 else if (offset == 0x14)
277 bar_index = 1;
278
279 if ((mdev_state->nr_ports == 1) && (bar_index == 1)) {
280 STORE_LE32(&mdev_state->vconfig[offset], 0);
281 break;
282 }
283
284 cfg_addr = *(u32 *)buf;
285 pr_info("BAR%d addr 0x%x\n", bar_index, cfg_addr);
286
287 if (cfg_addr == 0xffffffff) {
288 bar_mask = mdev_state->bar_mask[bar_index];
289 cfg_addr = (cfg_addr & bar_mask);
290 }
291
292 cfg_addr |= (mdev_state->vconfig[offset] & 0x3ul);
293 STORE_LE32(&mdev_state->vconfig[offset], cfg_addr);
294 break;
295 case 0x18: /* BAR2 */
296 case 0x1c: /* BAR3 */
297 case 0x20: /* BAR4 */
298 STORE_LE32(&mdev_state->vconfig[offset], 0);
299 break;
300 default:
301 pr_info("PCI config write @0x%x of %d bytes not handled\n",
302 offset, count);
303 break;
304 }
305 }
306
307 static void handle_bar_write(unsigned int index, struct mdev_state *mdev_state,
308 u16 offset, char *buf, u32 count)
309 {
310 u8 data = *buf;
311
312 /* Handle data written by guest */
313 switch (offset) {
314 case UART_TX:
315 /* if DLAB set, data is LSB of divisor */
316 if (mdev_state->s[index].dlab) {
317 mdev_state->s[index].divisor |= data;
318 break;
319 }
320
321 mutex_lock(&mdev_state->rxtx_lock);
322
323 /* save in TX buffer */
324 if (mdev_state->s[index].rxtx.count <
325 mdev_state->s[index].max_fifo_size) {
326 mdev_state->s[index].rxtx.fifo[
327 mdev_state->s[index].rxtx.head] = data;
328 mdev_state->s[index].rxtx.count++;
329 CIRCULAR_BUF_INC_IDX(mdev_state->s[index].rxtx.head);
330 mdev_state->s[index].overrun = false;
331
332 /*
333 * Trigger interrupt if receive data interrupt is
334 * enabled and fifo reached trigger level
335 */
336 if ((mdev_state->s[index].uart_reg[UART_IER] &
337 UART_IER_RDI) &&
338 (mdev_state->s[index].rxtx.count ==
339 mdev_state->s[index].intr_trigger_level)) {
340 /* trigger interrupt */
341 #if defined(DEBUG_INTR)
342 pr_err("Serial port %d: Fifo level trigger\n",
343 index);
344 #endif
345 mtty_trigger_interrupt(mdev_state->mdev->uuid);
346 }
347 } else {
348 #if defined(DEBUG_INTR)
349 pr_err("Serial port %d: Buffer Overflow\n", index);
350 #endif
351 mdev_state->s[index].overrun = true;
352
353 /*
354 * Trigger interrupt if receiver line status interrupt
355 * is enabled
356 */
357 if (mdev_state->s[index].uart_reg[UART_IER] &
358 UART_IER_RLSI)
359 mtty_trigger_interrupt(mdev_state->mdev->uuid);
360 }
361 mutex_unlock(&mdev_state->rxtx_lock);
362 break;
363
364 case UART_IER:
365 /* if DLAB set, data is MSB of divisor */
366 if (mdev_state->s[index].dlab)
367 mdev_state->s[index].divisor |= (u16)data << 8;
368 else {
369 mdev_state->s[index].uart_reg[offset] = data;
370 mutex_lock(&mdev_state->rxtx_lock);
371 if ((data & UART_IER_THRI) &&
372 (mdev_state->s[index].rxtx.head ==
373 mdev_state->s[index].rxtx.tail)) {
374 #if defined(DEBUG_INTR)
375 pr_err("Serial port %d: IER_THRI write\n",
376 index);
377 #endif
378 mtty_trigger_interrupt(mdev_state->mdev->uuid);
379 }
380
381 mutex_unlock(&mdev_state->rxtx_lock);
382 }
383
384 break;
385
386 case UART_FCR:
387 mdev_state->s[index].fcr = data;
388
389 mutex_lock(&mdev_state->rxtx_lock);
390 if (data & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT)) {
391 /* clear loop back FIFO */
392 mdev_state->s[index].rxtx.count = 0;
393 mdev_state->s[index].rxtx.head = 0;
394 mdev_state->s[index].rxtx.tail = 0;
395 }
396 mutex_unlock(&mdev_state->rxtx_lock);
397
398 switch (data & UART_FCR_TRIGGER_MASK) {
399 case UART_FCR_TRIGGER_1:
400 mdev_state->s[index].intr_trigger_level = 1;
401 break;
402
403 case UART_FCR_TRIGGER_4:
404 mdev_state->s[index].intr_trigger_level = 4;
405 break;
406
407 case UART_FCR_TRIGGER_8:
408 mdev_state->s[index].intr_trigger_level = 8;
409 break;
410
411 case UART_FCR_TRIGGER_14:
412 mdev_state->s[index].intr_trigger_level = 14;
413 break;
414 }
415
416 /*
417 * Set trigger level to 1 otherwise or implement timer with
418 * timeout of 4 characters and on expiring that timer set
419 * Recevice data timeout in IIR register
420 */
421 mdev_state->s[index].intr_trigger_level = 1;
422 if (data & UART_FCR_ENABLE_FIFO)
423 mdev_state->s[index].max_fifo_size = MAX_FIFO_SIZE;
424 else {
425 mdev_state->s[index].max_fifo_size = 1;
426 mdev_state->s[index].intr_trigger_level = 1;
427 }
428
429 break;
430
431 case UART_LCR:
432 if (data & UART_LCR_DLAB) {
433 mdev_state->s[index].dlab = true;
434 mdev_state->s[index].divisor = 0;
435 } else
436 mdev_state->s[index].dlab = false;
437
438 mdev_state->s[index].uart_reg[offset] = data;
439 break;
440
441 case UART_MCR:
442 mdev_state->s[index].uart_reg[offset] = data;
443
444 if ((mdev_state->s[index].uart_reg[UART_IER] & UART_IER_MSI) &&
445 (data & UART_MCR_OUT2)) {
446 #if defined(DEBUG_INTR)
447 pr_err("Serial port %d: MCR_OUT2 write\n", index);
448 #endif
449 mtty_trigger_interrupt(mdev_state->mdev->uuid);
450 }
451
452 if ((mdev_state->s[index].uart_reg[UART_IER] & UART_IER_MSI) &&
453 (data & (UART_MCR_RTS | UART_MCR_DTR))) {
454 #if defined(DEBUG_INTR)
455 pr_err("Serial port %d: MCR RTS/DTR write\n", index);
456 #endif
457 mtty_trigger_interrupt(mdev_state->mdev->uuid);
458 }
459 break;
460
461 case UART_LSR:
462 case UART_MSR:
463 /* do nothing */
464 break;
465
466 case UART_SCR:
467 mdev_state->s[index].uart_reg[offset] = data;
468 break;
469
470 default:
471 break;
472 }
473 }
474
475 static void handle_bar_read(unsigned int index, struct mdev_state *mdev_state,
476 u16 offset, char *buf, u32 count)
477 {
478 /* Handle read requests by guest */
479 switch (offset) {
480 case UART_RX:
481 /* if DLAB set, data is LSB of divisor */
482 if (mdev_state->s[index].dlab) {
483 *buf = (u8)mdev_state->s[index].divisor;
484 break;
485 }
486
487 mutex_lock(&mdev_state->rxtx_lock);
488 /* return data in tx buffer */
489 if (mdev_state->s[index].rxtx.head !=
490 mdev_state->s[index].rxtx.tail) {
491 *buf = mdev_state->s[index].rxtx.fifo[
492 mdev_state->s[index].rxtx.tail];
493 mdev_state->s[index].rxtx.count--;
494 CIRCULAR_BUF_INC_IDX(mdev_state->s[index].rxtx.tail);
495 }
496
497 if (mdev_state->s[index].rxtx.head ==
498 mdev_state->s[index].rxtx.tail) {
499 /*
500 * Trigger interrupt if tx buffer empty interrupt is
501 * enabled and fifo is empty
502 */
503 #if defined(DEBUG_INTR)
504 pr_err("Serial port %d: Buffer Empty\n", index);
505 #endif
506 if (mdev_state->s[index].uart_reg[UART_IER] &
507 UART_IER_THRI)
508 mtty_trigger_interrupt(mdev_state->mdev->uuid);
509 }
510 mutex_unlock(&mdev_state->rxtx_lock);
511
512 break;
513
514 case UART_IER:
515 if (mdev_state->s[index].dlab) {
516 *buf = (u8)(mdev_state->s[index].divisor >> 8);
517 break;
518 }
519 *buf = mdev_state->s[index].uart_reg[offset] & 0x0f;
520 break;
521
522 case UART_IIR:
523 {
524 u8 ier = mdev_state->s[index].uart_reg[UART_IER];
525 *buf = 0;
526
527 mutex_lock(&mdev_state->rxtx_lock);
528 /* Interrupt priority 1: Parity, overrun, framing or break */
529 if ((ier & UART_IER_RLSI) && mdev_state->s[index].overrun)
530 *buf |= UART_IIR_RLSI;
531
532 /* Interrupt priority 2: Fifo trigger level reached */
533 if ((ier & UART_IER_RDI) &&
534 (mdev_state->s[index].rxtx.count ==
535 mdev_state->s[index].intr_trigger_level))
536 *buf |= UART_IIR_RDI;
537
538 /* Interrupt priotiry 3: transmitter holding register empty */
539 if ((ier & UART_IER_THRI) &&
540 (mdev_state->s[index].rxtx.head ==
541 mdev_state->s[index].rxtx.tail))
542 *buf |= UART_IIR_THRI;
543
544 /* Interrupt priotiry 4: Modem status: CTS, DSR, RI or DCD */
545 if ((ier & UART_IER_MSI) &&
546 (mdev_state->s[index].uart_reg[UART_MCR] &
547 (UART_MCR_RTS | UART_MCR_DTR)))
548 *buf |= UART_IIR_MSI;
549
550 /* bit0: 0=> interrupt pending, 1=> no interrupt is pending */
551 if (*buf == 0)
552 *buf = UART_IIR_NO_INT;
553
554 /* set bit 6 & 7 to be 16550 compatible */
555 *buf |= 0xC0;
556 mutex_unlock(&mdev_state->rxtx_lock);
557 }
558 break;
559
560 case UART_LCR:
561 case UART_MCR:
562 *buf = mdev_state->s[index].uart_reg[offset];
563 break;
564
565 case UART_LSR:
566 {
567 u8 lsr = 0;
568
569 mutex_lock(&mdev_state->rxtx_lock);
570 /* atleast one char in FIFO */
571 if (mdev_state->s[index].rxtx.head !=
572 mdev_state->s[index].rxtx.tail)
573 lsr |= UART_LSR_DR;
574
575 /* if FIFO overrun */
576 if (mdev_state->s[index].overrun)
577 lsr |= UART_LSR_OE;
578
579 /* transmit FIFO empty and tramsitter empty */
580 if (mdev_state->s[index].rxtx.head ==
581 mdev_state->s[index].rxtx.tail)
582 lsr |= UART_LSR_TEMT | UART_LSR_THRE;
583
584 mutex_unlock(&mdev_state->rxtx_lock);
585 *buf = lsr;
586 break;
587 }
588 case UART_MSR:
589 *buf = UART_MSR_DSR | UART_MSR_DDSR | UART_MSR_DCD;
590
591 mutex_lock(&mdev_state->rxtx_lock);
592 /* if AFE is 1 and FIFO have space, set CTS bit */
593 if (mdev_state->s[index].uart_reg[UART_MCR] &
594 UART_MCR_AFE) {
595 if (mdev_state->s[index].rxtx.count <
596 mdev_state->s[index].max_fifo_size)
597 *buf |= UART_MSR_CTS | UART_MSR_DCTS;
598 } else
599 *buf |= UART_MSR_CTS | UART_MSR_DCTS;
600 mutex_unlock(&mdev_state->rxtx_lock);
601
602 break;
603
604 case UART_SCR:
605 *buf = mdev_state->s[index].uart_reg[offset];
606 break;
607
608 default:
609 break;
610 }
611 }
612
613 static void mdev_read_base(struct mdev_state *mdev_state)
614 {
615 int index, pos;
616 u32 start_lo, start_hi;
617 u32 mem_type;
618
619 pos = PCI_BASE_ADDRESS_0;
620
621 for (index = 0; index <= VFIO_PCI_BAR5_REGION_INDEX; index++) {
622
623 if (!mdev_state->region_info[index].size)
624 continue;
625
626 start_lo = (*(u32 *)(mdev_state->vconfig + pos)) &
627 PCI_BASE_ADDRESS_MEM_MASK;
628 mem_type = (*(u32 *)(mdev_state->vconfig + pos)) &
629 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
630
631 switch (mem_type) {
632 case PCI_BASE_ADDRESS_MEM_TYPE_64:
633 start_hi = (*(u32 *)(mdev_state->vconfig + pos + 4));
634 pos += 4;
635 break;
636 case PCI_BASE_ADDRESS_MEM_TYPE_32:
637 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
638 /* 1M mem BAR treated as 32-bit BAR */
639 default:
640 /* mem unknown type treated as 32-bit BAR */
641 start_hi = 0;
642 break;
643 }
644 pos += 4;
645 mdev_state->region_info[index].start = ((u64)start_hi << 32) |
646 start_lo;
647 }
648 }
649
650 static ssize_t mdev_access(struct mdev_device *mdev, char *buf, size_t count,
651 loff_t pos, bool is_write)
652 {
653 struct mdev_state *mdev_state;
654 unsigned int index;
655 loff_t offset;
656 int ret = 0;
657
658 if (!mdev || !buf)
659 return -EINVAL;
660
661 mdev_state = mdev_get_drvdata(mdev);
662 if (!mdev_state) {
663 pr_err("%s mdev_state not found\n", __func__);
664 return -EINVAL;
665 }
666
667 mutex_lock(&mdev_state->ops_lock);
668
669 index = MTTY_VFIO_PCI_OFFSET_TO_INDEX(pos);
670 offset = pos & MTTY_VFIO_PCI_OFFSET_MASK;
671 switch (index) {
672 case VFIO_PCI_CONFIG_REGION_INDEX:
673
674 #if defined(DEBUG)
675 pr_info("%s: PCI config space %s at offset 0x%llx\n",
676 __func__, is_write ? "write" : "read", offset);
677 #endif
678 if (is_write) {
679 dump_buffer(buf, count);
680 handle_pci_cfg_write(mdev_state, offset, buf, count);
681 } else {
682 memcpy(buf, (mdev_state->vconfig + offset), count);
683 dump_buffer(buf, count);
684 }
685
686 break;
687
688 case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
689 if (!mdev_state->region_info[index].start)
690 mdev_read_base(mdev_state);
691
692 if (is_write) {
693 dump_buffer(buf, count);
694
695 #if defined(DEBUG_REGS)
696 pr_info("%s: BAR%d WR @0x%llx %s val:0x%02x dlab:%d\n",
697 __func__, index, offset, wr_reg[offset],
698 (u8)*buf, mdev_state->s[index].dlab);
699 #endif
700 handle_bar_write(index, mdev_state, offset, buf, count);
701 } else {
702 handle_bar_read(index, mdev_state, offset, buf, count);
703 dump_buffer(buf, count);
704
705 #if defined(DEBUG_REGS)
706 pr_info("%s: BAR%d RD @0x%llx %s val:0x%02x dlab:%d\n",
707 __func__, index, offset, rd_reg[offset],
708 (u8)*buf, mdev_state->s[index].dlab);
709 #endif
710 }
711 break;
712
713 default:
714 ret = -1;
715 goto accessfailed;
716 }
717
718 ret = count;
719
720
721 accessfailed:
722 mutex_unlock(&mdev_state->ops_lock);
723
724 return ret;
725 }
726
727 int mtty_create(struct kobject *kobj, struct mdev_device *mdev)
728 {
729 struct mdev_state *mdev_state;
730 char name[MTTY_STRING_LEN];
731 int nr_ports = 0, i;
732
733 if (!mdev)
734 return -EINVAL;
735
736 for (i = 0; i < 2; i++) {
737 snprintf(name, MTTY_STRING_LEN, "%s-%d",
738 dev_driver_string(mdev->parent->dev), i + 1);
739 if (!strcmp(kobj->name, name)) {
740 nr_ports = i + 1;
741 break;
742 }
743 }
744
745 if (!nr_ports)
746 return -EINVAL;
747
748 mdev_state = kzalloc(sizeof(struct mdev_state), GFP_KERNEL);
749 if (mdev_state == NULL)
750 return -ENOMEM;
751
752 mdev_state->nr_ports = nr_ports;
753 mdev_state->irq_index = -1;
754 mdev_state->s[0].max_fifo_size = MAX_FIFO_SIZE;
755 mdev_state->s[1].max_fifo_size = MAX_FIFO_SIZE;
756 mutex_init(&mdev_state->rxtx_lock);
757 mdev_state->vconfig = kzalloc(MTTY_CONFIG_SPACE_SIZE, GFP_KERNEL);
758
759 if (mdev_state->vconfig == NULL) {
760 kfree(mdev_state);
761 return -ENOMEM;
762 }
763
764 mutex_init(&mdev_state->ops_lock);
765 mdev_state->mdev = mdev;
766 mdev_set_drvdata(mdev, mdev_state);
767
768 mtty_create_config_space(mdev_state);
769
770 mutex_lock(&mdev_list_lock);
771 list_add(&mdev_state->next, &mdev_devices_list);
772 mutex_unlock(&mdev_list_lock);
773
774 return 0;
775 }
776
777 int mtty_remove(struct mdev_device *mdev)
778 {
779 struct mdev_state *mds, *tmp_mds;
780 struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
781 int ret = -EINVAL;
782
783 mutex_lock(&mdev_list_lock);
784 list_for_each_entry_safe(mds, tmp_mds, &mdev_devices_list, next) {
785 if (mdev_state == mds) {
786 list_del(&mdev_state->next);
787 mdev_set_drvdata(mdev, NULL);
788 kfree(mdev_state->vconfig);
789 kfree(mdev_state);
790 ret = 0;
791 break;
792 }
793 }
794 mutex_unlock(&mdev_list_lock);
795
796 return ret;
797 }
798
799 int mtty_reset(struct mdev_device *mdev)
800 {
801 struct mdev_state *mdev_state;
802
803 if (!mdev)
804 return -EINVAL;
805
806 mdev_state = mdev_get_drvdata(mdev);
807 if (!mdev_state)
808 return -EINVAL;
809
810 pr_info("%s: called\n", __func__);
811
812 return 0;
813 }
814
815 ssize_t mtty_read(struct mdev_device *mdev, char __user *buf, size_t count,
816 loff_t *ppos)
817 {
818 unsigned int done = 0;
819 int ret;
820
821 while (count) {
822 size_t filled;
823
824 if (count >= 4 && !(*ppos % 4)) {
825 u32 val;
826
827 ret = mdev_access(mdev, (char *)&val, sizeof(val),
828 *ppos, false);
829 if (ret <= 0)
830 goto read_err;
831
832 if (copy_to_user(buf, &val, sizeof(val)))
833 goto read_err;
834
835 filled = 4;
836 } else if (count >= 2 && !(*ppos % 2)) {
837 u16 val;
838
839 ret = mdev_access(mdev, (char *)&val, sizeof(val),
840 *ppos, false);
841 if (ret <= 0)
842 goto read_err;
843
844 if (copy_to_user(buf, &val, sizeof(val)))
845 goto read_err;
846
847 filled = 2;
848 } else {
849 u8 val;
850
851 ret = mdev_access(mdev, (char *)&val, sizeof(val),
852 *ppos, false);
853 if (ret <= 0)
854 goto read_err;
855
856 if (copy_to_user(buf, &val, sizeof(val)))
857 goto read_err;
858
859 filled = 1;
860 }
861
862 count -= filled;
863 done += filled;
864 *ppos += filled;
865 buf += filled;
866 }
867
868 return done;
869
870 read_err:
871 return -EFAULT;
872 }
873
874 ssize_t mtty_write(struct mdev_device *mdev, const char __user *buf,
875 size_t count, loff_t *ppos)
876 {
877 unsigned int done = 0;
878 int ret;
879
880 while (count) {
881 size_t filled;
882
883 if (count >= 4 && !(*ppos % 4)) {
884 u32 val;
885
886 if (copy_from_user(&val, buf, sizeof(val)))
887 goto write_err;
888
889 ret = mdev_access(mdev, (char *)&val, sizeof(val),
890 *ppos, true);
891 if (ret <= 0)
892 goto write_err;
893
894 filled = 4;
895 } else if (count >= 2 && !(*ppos % 2)) {
896 u16 val;
897
898 if (copy_from_user(&val, buf, sizeof(val)))
899 goto write_err;
900
901 ret = mdev_access(mdev, (char *)&val, sizeof(val),
902 *ppos, true);
903 if (ret <= 0)
904 goto write_err;
905
906 filled = 2;
907 } else {
908 u8 val;
909
910 if (copy_from_user(&val, buf, sizeof(val)))
911 goto write_err;
912
913 ret = mdev_access(mdev, (char *)&val, sizeof(val),
914 *ppos, true);
915 if (ret <= 0)
916 goto write_err;
917
918 filled = 1;
919 }
920 count -= filled;
921 done += filled;
922 *ppos += filled;
923 buf += filled;
924 }
925
926 return done;
927 write_err:
928 return -EFAULT;
929 }
930
931 static int mtty_set_irqs(struct mdev_device *mdev, uint32_t flags,
932 unsigned int index, unsigned int start,
933 unsigned int count, void *data)
934 {
935 int ret = 0;
936 struct mdev_state *mdev_state;
937
938 if (!mdev)
939 return -EINVAL;
940
941 mdev_state = mdev_get_drvdata(mdev);
942 if (!mdev_state)
943 return -EINVAL;
944
945 mutex_lock(&mdev_state->ops_lock);
946 switch (index) {
947 case VFIO_PCI_INTX_IRQ_INDEX:
948 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
949 case VFIO_IRQ_SET_ACTION_MASK:
950 case VFIO_IRQ_SET_ACTION_UNMASK:
951 break;
952 case VFIO_IRQ_SET_ACTION_TRIGGER:
953 {
954 if (flags & VFIO_IRQ_SET_DATA_NONE) {
955 pr_info("%s: disable INTx\n", __func__);
956 if (mdev_state->intx_evtfd)
957 eventfd_ctx_put(mdev_state->intx_evtfd);
958 break;
959 }
960
961 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
962 int fd = *(int *)data;
963
964 if (fd > 0) {
965 struct eventfd_ctx *evt;
966
967 evt = eventfd_ctx_fdget(fd);
968 if (IS_ERR(evt)) {
969 ret = PTR_ERR(evt);
970 break;
971 }
972 mdev_state->intx_evtfd = evt;
973 mdev_state->irq_fd = fd;
974 mdev_state->irq_index = index;
975 break;
976 }
977 }
978 break;
979 }
980 }
981 break;
982 case VFIO_PCI_MSI_IRQ_INDEX:
983 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
984 case VFIO_IRQ_SET_ACTION_MASK:
985 case VFIO_IRQ_SET_ACTION_UNMASK:
986 break;
987 case VFIO_IRQ_SET_ACTION_TRIGGER:
988 if (flags & VFIO_IRQ_SET_DATA_NONE) {
989 if (mdev_state->msi_evtfd)
990 eventfd_ctx_put(mdev_state->msi_evtfd);
991 pr_info("%s: disable MSI\n", __func__);
992 mdev_state->irq_index = VFIO_PCI_INTX_IRQ_INDEX;
993 break;
994 }
995 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
996 int fd = *(int *)data;
997 struct eventfd_ctx *evt;
998
999 if (fd <= 0)
1000 break;
1001
1002 if (mdev_state->msi_evtfd)
1003 break;
1004
1005 evt = eventfd_ctx_fdget(fd);
1006 if (IS_ERR(evt)) {
1007 ret = PTR_ERR(evt);
1008 break;
1009 }
1010 mdev_state->msi_evtfd = evt;
1011 mdev_state->irq_fd = fd;
1012 mdev_state->irq_index = index;
1013 }
1014 break;
1015 }
1016 break;
1017 case VFIO_PCI_MSIX_IRQ_INDEX:
1018 pr_info("%s: MSIX_IRQ\n", __func__);
1019 break;
1020 case VFIO_PCI_ERR_IRQ_INDEX:
1021 pr_info("%s: ERR_IRQ\n", __func__);
1022 break;
1023 case VFIO_PCI_REQ_IRQ_INDEX:
1024 pr_info("%s: REQ_IRQ\n", __func__);
1025 break;
1026 }
1027
1028 mutex_unlock(&mdev_state->ops_lock);
1029 return ret;
1030 }
1031
1032 static int mtty_trigger_interrupt(uuid_le uuid)
1033 {
1034 int ret = -1;
1035 struct mdev_state *mdev_state;
1036
1037 mdev_state = find_mdev_state_by_uuid(uuid);
1038
1039 if (!mdev_state) {
1040 pr_info("%s: mdev not found\n", __func__);
1041 return -EINVAL;
1042 }
1043
1044 if ((mdev_state->irq_index == VFIO_PCI_MSI_IRQ_INDEX) &&
1045 (!mdev_state->msi_evtfd))
1046 return -EINVAL;
1047 else if ((mdev_state->irq_index == VFIO_PCI_INTX_IRQ_INDEX) &&
1048 (!mdev_state->intx_evtfd)) {
1049 pr_info("%s: Intr eventfd not found\n", __func__);
1050 return -EINVAL;
1051 }
1052
1053 if (mdev_state->irq_index == VFIO_PCI_MSI_IRQ_INDEX)
1054 ret = eventfd_signal(mdev_state->msi_evtfd, 1);
1055 else
1056 ret = eventfd_signal(mdev_state->intx_evtfd, 1);
1057
1058 #if defined(DEBUG_INTR)
1059 pr_info("Intx triggered\n");
1060 #endif
1061 if (ret != 1)
1062 pr_err("%s: eventfd signal failed (%d)\n", __func__, ret);
1063
1064 return ret;
1065 }
1066
1067 int mtty_get_region_info(struct mdev_device *mdev,
1068 struct vfio_region_info *region_info,
1069 u16 *cap_type_id, void **cap_type)
1070 {
1071 unsigned int size = 0;
1072 struct mdev_state *mdev_state;
1073 int bar_index;
1074
1075 if (!mdev)
1076 return -EINVAL;
1077
1078 mdev_state = mdev_get_drvdata(mdev);
1079 if (!mdev_state)
1080 return -EINVAL;
1081
1082 mutex_lock(&mdev_state->ops_lock);
1083 bar_index = region_info->index;
1084
1085 switch (bar_index) {
1086 case VFIO_PCI_CONFIG_REGION_INDEX:
1087 size = MTTY_CONFIG_SPACE_SIZE;
1088 break;
1089 case VFIO_PCI_BAR0_REGION_INDEX:
1090 size = MTTY_IO_BAR_SIZE;
1091 break;
1092 case VFIO_PCI_BAR1_REGION_INDEX:
1093 if (mdev_state->nr_ports == 2)
1094 size = MTTY_IO_BAR_SIZE;
1095 break;
1096 default:
1097 size = 0;
1098 break;
1099 }
1100
1101 mdev_state->region_info[bar_index].size = size;
1102 mdev_state->region_info[bar_index].vfio_offset =
1103 MTTY_VFIO_PCI_INDEX_TO_OFFSET(bar_index);
1104
1105 region_info->size = size;
1106 region_info->offset = MTTY_VFIO_PCI_INDEX_TO_OFFSET(bar_index);
1107 region_info->flags = VFIO_REGION_INFO_FLAG_READ |
1108 VFIO_REGION_INFO_FLAG_WRITE;
1109 mutex_unlock(&mdev_state->ops_lock);
1110 return 0;
1111 }
1112
1113 int mtty_get_irq_info(struct mdev_device *mdev, struct vfio_irq_info *irq_info)
1114 {
1115 switch (irq_info->index) {
1116 case VFIO_PCI_INTX_IRQ_INDEX:
1117 case VFIO_PCI_MSI_IRQ_INDEX:
1118 case VFIO_PCI_REQ_IRQ_INDEX:
1119 break;
1120
1121 default:
1122 return -EINVAL;
1123 }
1124
1125 irq_info->flags = VFIO_IRQ_INFO_EVENTFD;
1126 irq_info->count = 1;
1127
1128 if (irq_info->index == VFIO_PCI_INTX_IRQ_INDEX)
1129 irq_info->flags |= (VFIO_IRQ_INFO_MASKABLE |
1130 VFIO_IRQ_INFO_AUTOMASKED);
1131 else
1132 irq_info->flags |= VFIO_IRQ_INFO_NORESIZE;
1133
1134 return 0;
1135 }
1136
1137 int mtty_get_device_info(struct mdev_device *mdev,
1138 struct vfio_device_info *dev_info)
1139 {
1140 dev_info->flags = VFIO_DEVICE_FLAGS_PCI;
1141 dev_info->num_regions = VFIO_PCI_NUM_REGIONS;
1142 dev_info->num_irqs = VFIO_PCI_NUM_IRQS;
1143
1144 return 0;
1145 }
1146
1147 static long mtty_ioctl(struct mdev_device *mdev, unsigned int cmd,
1148 unsigned long arg)
1149 {
1150 int ret = 0;
1151 unsigned long minsz;
1152 struct mdev_state *mdev_state;
1153
1154 if (!mdev)
1155 return -EINVAL;
1156
1157 mdev_state = mdev_get_drvdata(mdev);
1158 if (!mdev_state)
1159 return -ENODEV;
1160
1161 switch (cmd) {
1162 case VFIO_DEVICE_GET_INFO:
1163 {
1164 struct vfio_device_info info;
1165
1166 minsz = offsetofend(struct vfio_device_info, num_irqs);
1167
1168 if (copy_from_user(&info, (void __user *)arg, minsz))
1169 return -EFAULT;
1170
1171 if (info.argsz < minsz)
1172 return -EINVAL;
1173
1174 ret = mtty_get_device_info(mdev, &info);
1175 if (ret)
1176 return ret;
1177
1178 memcpy(&mdev_state->dev_info, &info, sizeof(info));
1179
1180 return copy_to_user((void __user *)arg, &info, minsz);
1181 }
1182 case VFIO_DEVICE_GET_REGION_INFO:
1183 {
1184 struct vfio_region_info info;
1185 u16 cap_type_id = 0;
1186 void *cap_type = NULL;
1187
1188 minsz = offsetofend(struct vfio_region_info, offset);
1189
1190 if (copy_from_user(&info, (void __user *)arg, minsz))
1191 return -EFAULT;
1192
1193 if (info.argsz < minsz)
1194 return -EINVAL;
1195
1196 ret = mtty_get_region_info(mdev, &info, &cap_type_id,
1197 &cap_type);
1198 if (ret)
1199 return ret;
1200
1201 return copy_to_user((void __user *)arg, &info, minsz);
1202 }
1203
1204 case VFIO_DEVICE_GET_IRQ_INFO:
1205 {
1206 struct vfio_irq_info info;
1207
1208 minsz = offsetofend(struct vfio_irq_info, count);
1209
1210 if (copy_from_user(&info, (void __user *)arg, minsz))
1211 return -EFAULT;
1212
1213 if ((info.argsz < minsz) ||
1214 (info.index >= mdev_state->dev_info.num_irqs))
1215 return -EINVAL;
1216
1217 ret = mtty_get_irq_info(mdev, &info);
1218 if (ret)
1219 return ret;
1220
1221 if (info.count == -1)
1222 return -EINVAL;
1223
1224 return copy_to_user((void __user *)arg, &info, minsz);
1225 }
1226 case VFIO_DEVICE_SET_IRQS:
1227 {
1228 struct vfio_irq_set hdr;
1229 u8 *data = NULL, *ptr = NULL;
1230 size_t data_size = 0;
1231
1232 minsz = offsetofend(struct vfio_irq_set, count);
1233
1234 if (copy_from_user(&hdr, (void __user *)arg, minsz))
1235 return -EFAULT;
1236
1237 ret = vfio_set_irqs_validate_and_prepare(&hdr,
1238 mdev_state->dev_info.num_irqs,
1239 VFIO_PCI_NUM_IRQS,
1240 &data_size);
1241 if (ret)
1242 return ret;
1243
1244 if (data_size) {
1245 ptr = data = memdup_user((void __user *)(arg + minsz),
1246 data_size);
1247 if (IS_ERR(data))
1248 return PTR_ERR(data);
1249 }
1250
1251 ret = mtty_set_irqs(mdev, hdr.flags, hdr.index, hdr.start,
1252 hdr.count, data);
1253
1254 kfree(ptr);
1255 return ret;
1256 }
1257 case VFIO_DEVICE_RESET:
1258 return mtty_reset(mdev);
1259 }
1260 return -ENOTTY;
1261 }
1262
1263 int mtty_open(struct mdev_device *mdev)
1264 {
1265 pr_info("%s\n", __func__);
1266 return 0;
1267 }
1268
1269 void mtty_close(struct mdev_device *mdev)
1270 {
1271 pr_info("%s\n", __func__);
1272 }
1273
1274 static ssize_t
1275 sample_mtty_dev_show(struct device *dev, struct device_attribute *attr,
1276 char *buf)
1277 {
1278 return sprintf(buf, "This is phy device\n");
1279 }
1280
1281 static DEVICE_ATTR_RO(sample_mtty_dev);
1282
1283 static struct attribute *mtty_dev_attrs[] = {
1284 &dev_attr_sample_mtty_dev.attr,
1285 NULL,
1286 };
1287
1288 static const struct attribute_group mtty_dev_group = {
1289 .name = "mtty_dev",
1290 .attrs = mtty_dev_attrs,
1291 };
1292
1293 const struct attribute_group *mtty_dev_groups[] = {
1294 &mtty_dev_group,
1295 NULL,
1296 };
1297
1298 static ssize_t
1299 sample_mdev_dev_show(struct device *dev, struct device_attribute *attr,
1300 char *buf)
1301 {
1302 struct mdev_device *mdev = to_mdev_device(dev);
1303
1304 if (mdev)
1305 return sprintf(buf, "This is MDEV %s\n", dev_name(&mdev->dev));
1306
1307 return sprintf(buf, "\n");
1308 }
1309
1310 static DEVICE_ATTR_RO(sample_mdev_dev);
1311
1312 static struct attribute *mdev_dev_attrs[] = {
1313 &dev_attr_sample_mdev_dev.attr,
1314 NULL,
1315 };
1316
1317 static const struct attribute_group mdev_dev_group = {
1318 .name = "vendor",
1319 .attrs = mdev_dev_attrs,
1320 };
1321
1322 const struct attribute_group *mdev_dev_groups[] = {
1323 &mdev_dev_group,
1324 NULL,
1325 };
1326
1327 static ssize_t
1328 name_show(struct kobject *kobj, struct device *dev, char *buf)
1329 {
1330 char name[MTTY_STRING_LEN];
1331 int i;
1332 const char *name_str[2] = {"Single port serial", "Dual port serial"};
1333
1334 for (i = 0; i < 2; i++) {
1335 snprintf(name, MTTY_STRING_LEN, "%s-%d",
1336 dev_driver_string(dev), i + 1);
1337 if (!strcmp(kobj->name, name))
1338 return sprintf(buf, "%s\n", name_str[i]);
1339 }
1340
1341 return -EINVAL;
1342 }
1343
1344 MDEV_TYPE_ATTR_RO(name);
1345
1346 static ssize_t
1347 available_instances_show(struct kobject *kobj, struct device *dev, char *buf)
1348 {
1349 char name[MTTY_STRING_LEN];
1350 int i;
1351 struct mdev_state *mds;
1352 int ports = 0, used = 0;
1353
1354 for (i = 0; i < 2; i++) {
1355 snprintf(name, MTTY_STRING_LEN, "%s-%d",
1356 dev_driver_string(dev), i + 1);
1357 if (!strcmp(kobj->name, name)) {
1358 ports = i + 1;
1359 break;
1360 }
1361 }
1362
1363 if (!ports)
1364 return -EINVAL;
1365
1366 list_for_each_entry(mds, &mdev_devices_list, next)
1367 used += mds->nr_ports;
1368
1369 return sprintf(buf, "%d\n", (MAX_MTTYS - used)/ports);
1370 }
1371
1372 MDEV_TYPE_ATTR_RO(available_instances);
1373
1374
1375 static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
1376 char *buf)
1377 {
1378 return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
1379 }
1380
1381 MDEV_TYPE_ATTR_RO(device_api);
1382
1383 static struct attribute *mdev_types_attrs[] = {
1384 &mdev_type_attr_name.attr,
1385 &mdev_type_attr_device_api.attr,
1386 &mdev_type_attr_available_instances.attr,
1387 NULL,
1388 };
1389
1390 static struct attribute_group mdev_type_group1 = {
1391 .name = "1",
1392 .attrs = mdev_types_attrs,
1393 };
1394
1395 static struct attribute_group mdev_type_group2 = {
1396 .name = "2",
1397 .attrs = mdev_types_attrs,
1398 };
1399
1400 struct attribute_group *mdev_type_groups[] = {
1401 &mdev_type_group1,
1402 &mdev_type_group2,
1403 NULL,
1404 };
1405
1406 struct parent_ops mdev_fops = {
1407 .owner = THIS_MODULE,
1408 .dev_attr_groups = mtty_dev_groups,
1409 .mdev_attr_groups = mdev_dev_groups,
1410 .supported_type_groups = mdev_type_groups,
1411 .create = mtty_create,
1412 .remove = mtty_remove,
1413 .open = mtty_open,
1414 .release = mtty_close,
1415 .read = mtty_read,
1416 .write = mtty_write,
1417 .ioctl = mtty_ioctl,
1418 };
1419
1420 static void mtty_device_release(struct device *dev)
1421 {
1422 dev_dbg(dev, "mtty: released\n");
1423 }
1424
1425 static int __init mtty_dev_init(void)
1426 {
1427 int ret = 0;
1428
1429 pr_info("mtty_dev: %s\n", __func__);
1430
1431 memset(&mtty_dev, 0, sizeof(mtty_dev));
1432
1433 idr_init(&mtty_dev.vd_idr);
1434
1435 ret = alloc_chrdev_region(&mtty_dev.vd_devt, 0, MINORMASK, MTTY_NAME);
1436
1437 if (ret < 0) {
1438 pr_err("Error: failed to register mtty_dev, err:%d\n", ret);
1439 return ret;
1440 }
1441
1442 cdev_init(&mtty_dev.vd_cdev, &vd_fops);
1443 cdev_add(&mtty_dev.vd_cdev, mtty_dev.vd_devt, MINORMASK);
1444
1445 pr_info("major_number:%d\n", MAJOR(mtty_dev.vd_devt));
1446
1447 mtty_dev.vd_class = class_create(THIS_MODULE, MTTY_CLASS_NAME);
1448
1449 if (IS_ERR(mtty_dev.vd_class)) {
1450 pr_err("Error: failed to register mtty_dev class\n");
1451 goto failed1;
1452 }
1453
1454 mtty_dev.dev.class = mtty_dev.vd_class;
1455 mtty_dev.dev.release = mtty_device_release;
1456 dev_set_name(&mtty_dev.dev, "%s", MTTY_NAME);
1457
1458 ret = device_register(&mtty_dev.dev);
1459 if (ret)
1460 goto failed2;
1461
1462 if (mdev_register_device(&mtty_dev.dev, &mdev_fops) != 0)
1463 goto failed3;
1464
1465 mutex_init(&mdev_list_lock);
1466 INIT_LIST_HEAD(&mdev_devices_list);
1467
1468 goto all_done;
1469
1470 failed3:
1471
1472 device_unregister(&mtty_dev.dev);
1473 failed2:
1474 class_destroy(mtty_dev.vd_class);
1475
1476 failed1:
1477 cdev_del(&mtty_dev.vd_cdev);
1478 unregister_chrdev_region(mtty_dev.vd_devt, MINORMASK);
1479
1480 all_done:
1481 return ret;
1482 }
1483
1484 static void __exit mtty_dev_exit(void)
1485 {
1486 mtty_dev.dev.bus = NULL;
1487 mdev_unregister_device(&mtty_dev.dev);
1488
1489 device_unregister(&mtty_dev.dev);
1490 idr_destroy(&mtty_dev.vd_idr);
1491 cdev_del(&mtty_dev.vd_cdev);
1492 unregister_chrdev_region(mtty_dev.vd_devt, MINORMASK);
1493 class_destroy(mtty_dev.vd_class);
1494 mtty_dev.vd_class = NULL;
1495 pr_info("mtty_dev: Unloaded!\n");
1496 }
1497
1498 module_init(mtty_dev_init)
1499 module_exit(mtty_dev_exit)
1500
1501 MODULE_LICENSE("GPL v2");
1502 MODULE_INFO(supported, "Test driver that simulate serial port over PCI");
1503 MODULE_VERSION(VERSION_STRING);
1504 MODULE_AUTHOR(DRIVER_AUTHOR);
1505
1506
1507
1508
1509
1510 /* LDV_COMMENT_BEGIN_MAIN */
1511 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful
1512
1513 /*###########################################################################*/
1514
1515 /*############## Driver Environment Generator 0.2 output ####################*/
1516
1517 /*###########################################################################*/
1518
1519
1520
1521 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
1522 void ldv_check_final_state(void);
1523
1524 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
1525 void ldv_check_return_value(int res);
1526
1527 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
1528 void ldv_check_return_value_probe(int res);
1529
1530 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
1531 void ldv_initialize(void);
1532
1533 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
1534 void ldv_handler_precall(void);
1535
1536 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
1537 int nondet_int(void);
1538
1539 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
1540 int LDV_IN_INTERRUPT;
1541
1542 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
1543 void ldv_main0_sequence_infinite_withcheck_stateful(void) {
1544
1545
1546
1547 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
1548 /*============================= VARIABLE DECLARATION PART =============================*/
1549 /** STRUCT: struct type: parent_ops, struct name: mdev_fops **/
1550 /* content: int mtty_create(struct kobject *kobj, struct mdev_device *mdev)*/
1551 /* LDV_COMMENT_BEGIN_PREP */
1552 #define VERSION_STRING "0.1"
1553 #define DRIVER_AUTHOR "NVIDIA Corporation"
1554 #define MTTY_CLASS_NAME "mtty"
1555 #define MTTY_NAME "mtty"
1556 #define MTTY_STRING_LEN 16
1557 #define MTTY_CONFIG_SPACE_SIZE 0xff
1558 #define MTTY_IO_BAR_SIZE 0x8
1559 #define MTTY_MMIO_BAR_SIZE 0x100000
1560 #define STORE_LE16(addr, val) (*(u16 *)addr = val)
1561 #define STORE_LE32(addr, val) (*(u32 *)addr = val)
1562 #define MAX_FIFO_SIZE 16
1563 #define CIRCULAR_BUF_INC_IDX(idx) (idx = (idx + 1) & (MAX_FIFO_SIZE - 1))
1564 #define MTTY_VFIO_PCI_OFFSET_SHIFT 40
1565 #define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off) (off >> MTTY_VFIO_PCI_OFFSET_SHIFT)
1566 #define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \
1567 ((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT)
1568 #define MTTY_VFIO_PCI_OFFSET_MASK \
1569 (((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1)
1570 #define MAX_MTTYS 24
1571 #if defined(DEBUG_REGS)
1572 #endif
1573 #if defined(DEBUG)
1574 #endif
1575 #if defined(DEBUG_INTR)
1576 #endif
1577 #if defined(DEBUG_INTR)
1578 #endif
1579 #if defined(DEBUG_INTR)
1580 #endif
1581 #if defined(DEBUG_INTR)
1582 #endif
1583 #if defined(DEBUG_INTR)
1584 #endif
1585 #if defined(DEBUG_INTR)
1586 #endif
1587 #if defined(DEBUG)
1588 #endif
1589 #if defined(DEBUG_REGS)
1590 #endif
1591 #if defined(DEBUG_REGS)
1592 #endif
1593 /* LDV_COMMENT_END_PREP */
1594 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtty_create" */
1595 struct kobject * var_group1;
1596 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtty_create" */
1597 struct mdev_device * var_group2;
1598 /* LDV_COMMENT_BEGIN_PREP */
1599 #if defined(DEBUG_INTR)
1600 #endif
1601 /* LDV_COMMENT_END_PREP */
1602 /* content: int mtty_remove(struct mdev_device *mdev)*/
1603 /* LDV_COMMENT_BEGIN_PREP */
1604 #define VERSION_STRING "0.1"
1605 #define DRIVER_AUTHOR "NVIDIA Corporation"
1606 #define MTTY_CLASS_NAME "mtty"
1607 #define MTTY_NAME "mtty"
1608 #define MTTY_STRING_LEN 16
1609 #define MTTY_CONFIG_SPACE_SIZE 0xff
1610 #define MTTY_IO_BAR_SIZE 0x8
1611 #define MTTY_MMIO_BAR_SIZE 0x100000
1612 #define STORE_LE16(addr, val) (*(u16 *)addr = val)
1613 #define STORE_LE32(addr, val) (*(u32 *)addr = val)
1614 #define MAX_FIFO_SIZE 16
1615 #define CIRCULAR_BUF_INC_IDX(idx) (idx = (idx + 1) & (MAX_FIFO_SIZE - 1))
1616 #define MTTY_VFIO_PCI_OFFSET_SHIFT 40
1617 #define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off) (off >> MTTY_VFIO_PCI_OFFSET_SHIFT)
1618 #define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \
1619 ((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT)
1620 #define MTTY_VFIO_PCI_OFFSET_MASK \
1621 (((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1)
1622 #define MAX_MTTYS 24
1623 #if defined(DEBUG_REGS)
1624 #endif
1625 #if defined(DEBUG)
1626 #endif
1627 #if defined(DEBUG_INTR)
1628 #endif
1629 #if defined(DEBUG_INTR)
1630 #endif
1631 #if defined(DEBUG_INTR)
1632 #endif
1633 #if defined(DEBUG_INTR)
1634 #endif
1635 #if defined(DEBUG_INTR)
1636 #endif
1637 #if defined(DEBUG_INTR)
1638 #endif
1639 #if defined(DEBUG)
1640 #endif
1641 #if defined(DEBUG_REGS)
1642 #endif
1643 #if defined(DEBUG_REGS)
1644 #endif
1645 /* LDV_COMMENT_END_PREP */
1646 /* LDV_COMMENT_BEGIN_PREP */
1647 #if defined(DEBUG_INTR)
1648 #endif
1649 /* LDV_COMMENT_END_PREP */
1650 /* content: int mtty_open(struct mdev_device *mdev)*/
1651 /* LDV_COMMENT_BEGIN_PREP */
1652 #define VERSION_STRING "0.1"
1653 #define DRIVER_AUTHOR "NVIDIA Corporation"
1654 #define MTTY_CLASS_NAME "mtty"
1655 #define MTTY_NAME "mtty"
1656 #define MTTY_STRING_LEN 16
1657 #define MTTY_CONFIG_SPACE_SIZE 0xff
1658 #define MTTY_IO_BAR_SIZE 0x8
1659 #define MTTY_MMIO_BAR_SIZE 0x100000
1660 #define STORE_LE16(addr, val) (*(u16 *)addr = val)
1661 #define STORE_LE32(addr, val) (*(u32 *)addr = val)
1662 #define MAX_FIFO_SIZE 16
1663 #define CIRCULAR_BUF_INC_IDX(idx) (idx = (idx + 1) & (MAX_FIFO_SIZE - 1))
1664 #define MTTY_VFIO_PCI_OFFSET_SHIFT 40
1665 #define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off) (off >> MTTY_VFIO_PCI_OFFSET_SHIFT)
1666 #define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \
1667 ((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT)
1668 #define MTTY_VFIO_PCI_OFFSET_MASK \
1669 (((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1)
1670 #define MAX_MTTYS 24
1671 #if defined(DEBUG_REGS)
1672 #endif
1673 #if defined(DEBUG)
1674 #endif
1675 #if defined(DEBUG_INTR)
1676 #endif
1677 #if defined(DEBUG_INTR)
1678 #endif
1679 #if defined(DEBUG_INTR)
1680 #endif
1681 #if defined(DEBUG_INTR)
1682 #endif
1683 #if defined(DEBUG_INTR)
1684 #endif
1685 #if defined(DEBUG_INTR)
1686 #endif
1687 #if defined(DEBUG)
1688 #endif
1689 #if defined(DEBUG_REGS)
1690 #endif
1691 #if defined(DEBUG_REGS)
1692 #endif
1693 #if defined(DEBUG_INTR)
1694 #endif
1695 /* LDV_COMMENT_END_PREP */
1696 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "mtty_open" */
1697 int res_mtty_open_19;
1698 /* content: void mtty_close(struct mdev_device *mdev)*/
1699 /* LDV_COMMENT_BEGIN_PREP */
1700 #define VERSION_STRING "0.1"
1701 #define DRIVER_AUTHOR "NVIDIA Corporation"
1702 #define MTTY_CLASS_NAME "mtty"
1703 #define MTTY_NAME "mtty"
1704 #define MTTY_STRING_LEN 16
1705 #define MTTY_CONFIG_SPACE_SIZE 0xff
1706 #define MTTY_IO_BAR_SIZE 0x8
1707 #define MTTY_MMIO_BAR_SIZE 0x100000
1708 #define STORE_LE16(addr, val) (*(u16 *)addr = val)
1709 #define STORE_LE32(addr, val) (*(u32 *)addr = val)
1710 #define MAX_FIFO_SIZE 16
1711 #define CIRCULAR_BUF_INC_IDX(idx) (idx = (idx + 1) & (MAX_FIFO_SIZE - 1))
1712 #define MTTY_VFIO_PCI_OFFSET_SHIFT 40
1713 #define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off) (off >> MTTY_VFIO_PCI_OFFSET_SHIFT)
1714 #define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \
1715 ((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT)
1716 #define MTTY_VFIO_PCI_OFFSET_MASK \
1717 (((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1)
1718 #define MAX_MTTYS 24
1719 #if defined(DEBUG_REGS)
1720 #endif
1721 #if defined(DEBUG)
1722 #endif
1723 #if defined(DEBUG_INTR)
1724 #endif
1725 #if defined(DEBUG_INTR)
1726 #endif
1727 #if defined(DEBUG_INTR)
1728 #endif
1729 #if defined(DEBUG_INTR)
1730 #endif
1731 #if defined(DEBUG_INTR)
1732 #endif
1733 #if defined(DEBUG_INTR)
1734 #endif
1735 #if defined(DEBUG)
1736 #endif
1737 #if defined(DEBUG_REGS)
1738 #endif
1739 #if defined(DEBUG_REGS)
1740 #endif
1741 #if defined(DEBUG_INTR)
1742 #endif
1743 /* LDV_COMMENT_END_PREP */
1744 /* content: ssize_t mtty_read(struct mdev_device *mdev, char __user *buf, size_t count, loff_t *ppos)*/
1745 /* LDV_COMMENT_BEGIN_PREP */
1746 #define VERSION_STRING "0.1"
1747 #define DRIVER_AUTHOR "NVIDIA Corporation"
1748 #define MTTY_CLASS_NAME "mtty"
1749 #define MTTY_NAME "mtty"
1750 #define MTTY_STRING_LEN 16
1751 #define MTTY_CONFIG_SPACE_SIZE 0xff
1752 #define MTTY_IO_BAR_SIZE 0x8
1753 #define MTTY_MMIO_BAR_SIZE 0x100000
1754 #define STORE_LE16(addr, val) (*(u16 *)addr = val)
1755 #define STORE_LE32(addr, val) (*(u32 *)addr = val)
1756 #define MAX_FIFO_SIZE 16
1757 #define CIRCULAR_BUF_INC_IDX(idx) (idx = (idx + 1) & (MAX_FIFO_SIZE - 1))
1758 #define MTTY_VFIO_PCI_OFFSET_SHIFT 40
1759 #define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off) (off >> MTTY_VFIO_PCI_OFFSET_SHIFT)
1760 #define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \
1761 ((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT)
1762 #define MTTY_VFIO_PCI_OFFSET_MASK \
1763 (((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1)
1764 #define MAX_MTTYS 24
1765 #if defined(DEBUG_REGS)
1766 #endif
1767 #if defined(DEBUG)
1768 #endif
1769 #if defined(DEBUG_INTR)
1770 #endif
1771 #if defined(DEBUG_INTR)
1772 #endif
1773 #if defined(DEBUG_INTR)
1774 #endif
1775 #if defined(DEBUG_INTR)
1776 #endif
1777 #if defined(DEBUG_INTR)
1778 #endif
1779 #if defined(DEBUG_INTR)
1780 #endif
1781 #if defined(DEBUG)
1782 #endif
1783 #if defined(DEBUG_REGS)
1784 #endif
1785 #if defined(DEBUG_REGS)
1786 #endif
1787 /* LDV_COMMENT_END_PREP */
1788 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtty_read" */
1789 char __user * var_mtty_read_11_p1;
1790 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtty_read" */
1791 size_t var_mtty_read_11_p2;
1792 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtty_read" */
1793 loff_t * var_mtty_read_11_p3;
1794 /* LDV_COMMENT_BEGIN_PREP */
1795 #if defined(DEBUG_INTR)
1796 #endif
1797 /* LDV_COMMENT_END_PREP */
1798 /* content: ssize_t mtty_write(struct mdev_device *mdev, const char __user *buf, size_t count, loff_t *ppos)*/
1799 /* LDV_COMMENT_BEGIN_PREP */
1800 #define VERSION_STRING "0.1"
1801 #define DRIVER_AUTHOR "NVIDIA Corporation"
1802 #define MTTY_CLASS_NAME "mtty"
1803 #define MTTY_NAME "mtty"
1804 #define MTTY_STRING_LEN 16
1805 #define MTTY_CONFIG_SPACE_SIZE 0xff
1806 #define MTTY_IO_BAR_SIZE 0x8
1807 #define MTTY_MMIO_BAR_SIZE 0x100000
1808 #define STORE_LE16(addr, val) (*(u16 *)addr = val)
1809 #define STORE_LE32(addr, val) (*(u32 *)addr = val)
1810 #define MAX_FIFO_SIZE 16
1811 #define CIRCULAR_BUF_INC_IDX(idx) (idx = (idx + 1) & (MAX_FIFO_SIZE - 1))
1812 #define MTTY_VFIO_PCI_OFFSET_SHIFT 40
1813 #define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off) (off >> MTTY_VFIO_PCI_OFFSET_SHIFT)
1814 #define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \
1815 ((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT)
1816 #define MTTY_VFIO_PCI_OFFSET_MASK \
1817 (((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1)
1818 #define MAX_MTTYS 24
1819 #if defined(DEBUG_REGS)
1820 #endif
1821 #if defined(DEBUG)
1822 #endif
1823 #if defined(DEBUG_INTR)
1824 #endif
1825 #if defined(DEBUG_INTR)
1826 #endif
1827 #if defined(DEBUG_INTR)
1828 #endif
1829 #if defined(DEBUG_INTR)
1830 #endif
1831 #if defined(DEBUG_INTR)
1832 #endif
1833 #if defined(DEBUG_INTR)
1834 #endif
1835 #if defined(DEBUG)
1836 #endif
1837 #if defined(DEBUG_REGS)
1838 #endif
1839 #if defined(DEBUG_REGS)
1840 #endif
1841 /* LDV_COMMENT_END_PREP */
1842 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtty_write" */
1843 const char __user * var_mtty_write_12_p1;
1844 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtty_write" */
1845 size_t var_mtty_write_12_p2;
1846 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtty_write" */
1847 loff_t * var_mtty_write_12_p3;
1848 /* LDV_COMMENT_BEGIN_PREP */
1849 #if defined(DEBUG_INTR)
1850 #endif
1851 /* LDV_COMMENT_END_PREP */
1852 /* content: static long mtty_ioctl(struct mdev_device *mdev, unsigned int cmd, unsigned long arg)*/
1853 /* LDV_COMMENT_BEGIN_PREP */
1854 #define VERSION_STRING "0.1"
1855 #define DRIVER_AUTHOR "NVIDIA Corporation"
1856 #define MTTY_CLASS_NAME "mtty"
1857 #define MTTY_NAME "mtty"
1858 #define MTTY_STRING_LEN 16
1859 #define MTTY_CONFIG_SPACE_SIZE 0xff
1860 #define MTTY_IO_BAR_SIZE 0x8
1861 #define MTTY_MMIO_BAR_SIZE 0x100000
1862 #define STORE_LE16(addr, val) (*(u16 *)addr = val)
1863 #define STORE_LE32(addr, val) (*(u32 *)addr = val)
1864 #define MAX_FIFO_SIZE 16
1865 #define CIRCULAR_BUF_INC_IDX(idx) (idx = (idx + 1) & (MAX_FIFO_SIZE - 1))
1866 #define MTTY_VFIO_PCI_OFFSET_SHIFT 40
1867 #define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off) (off >> MTTY_VFIO_PCI_OFFSET_SHIFT)
1868 #define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \
1869 ((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT)
1870 #define MTTY_VFIO_PCI_OFFSET_MASK \
1871 (((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1)
1872 #define MAX_MTTYS 24
1873 #if defined(DEBUG_REGS)
1874 #endif
1875 #if defined(DEBUG)
1876 #endif
1877 #if defined(DEBUG_INTR)
1878 #endif
1879 #if defined(DEBUG_INTR)
1880 #endif
1881 #if defined(DEBUG_INTR)
1882 #endif
1883 #if defined(DEBUG_INTR)
1884 #endif
1885 #if defined(DEBUG_INTR)
1886 #endif
1887 #if defined(DEBUG_INTR)
1888 #endif
1889 #if defined(DEBUG)
1890 #endif
1891 #if defined(DEBUG_REGS)
1892 #endif
1893 #if defined(DEBUG_REGS)
1894 #endif
1895 #if defined(DEBUG_INTR)
1896 #endif
1897 /* LDV_COMMENT_END_PREP */
1898 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtty_ioctl" */
1899 unsigned int var_mtty_ioctl_18_p1;
1900 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtty_ioctl" */
1901 unsigned long var_mtty_ioctl_18_p2;
1902
1903
1904
1905
1906 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
1907 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
1908 /*============================= VARIABLE INITIALIZING PART =============================*/
1909 LDV_IN_INTERRUPT=1;
1910
1911
1912
1913
1914 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
1915 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
1916 /*============================= FUNCTION CALL SECTION =============================*/
1917 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
1918 ldv_initialize();
1919
1920 /** INIT: init_type: ST_MODULE_INIT **/
1921 /* content: static int __init mtty_dev_init(void)*/
1922 /* LDV_COMMENT_BEGIN_PREP */
1923 #define VERSION_STRING "0.1"
1924 #define DRIVER_AUTHOR "NVIDIA Corporation"
1925 #define MTTY_CLASS_NAME "mtty"
1926 #define MTTY_NAME "mtty"
1927 #define MTTY_STRING_LEN 16
1928 #define MTTY_CONFIG_SPACE_SIZE 0xff
1929 #define MTTY_IO_BAR_SIZE 0x8
1930 #define MTTY_MMIO_BAR_SIZE 0x100000
1931 #define STORE_LE16(addr, val) (*(u16 *)addr = val)
1932 #define STORE_LE32(addr, val) (*(u32 *)addr = val)
1933 #define MAX_FIFO_SIZE 16
1934 #define CIRCULAR_BUF_INC_IDX(idx) (idx = (idx + 1) & (MAX_FIFO_SIZE - 1))
1935 #define MTTY_VFIO_PCI_OFFSET_SHIFT 40
1936 #define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off) (off >> MTTY_VFIO_PCI_OFFSET_SHIFT)
1937 #define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \
1938 ((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT)
1939 #define MTTY_VFIO_PCI_OFFSET_MASK \
1940 (((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1)
1941 #define MAX_MTTYS 24
1942 #if defined(DEBUG_REGS)
1943 #endif
1944 #if defined(DEBUG)
1945 #endif
1946 #if defined(DEBUG_INTR)
1947 #endif
1948 #if defined(DEBUG_INTR)
1949 #endif
1950 #if defined(DEBUG_INTR)
1951 #endif
1952 #if defined(DEBUG_INTR)
1953 #endif
1954 #if defined(DEBUG_INTR)
1955 #endif
1956 #if defined(DEBUG_INTR)
1957 #endif
1958 #if defined(DEBUG)
1959 #endif
1960 #if defined(DEBUG_REGS)
1961 #endif
1962 #if defined(DEBUG_REGS)
1963 #endif
1964 #if defined(DEBUG_INTR)
1965 #endif
1966 /* LDV_COMMENT_END_PREP */
1967 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver init function after driver loading to kernel. This function declared as "MODULE_INIT(function name)". */
1968 ldv_handler_precall();
1969 if(mtty_dev_init())
1970 goto ldv_final;
1971 int ldv_s_mdev_fops_parent_ops = 0;
1972
1973
1974
1975 while( nondet_int()
1976 || !(ldv_s_mdev_fops_parent_ops == 0)
1977 ) {
1978
1979 switch(nondet_int()) {
1980
1981 case 0: {
1982
1983 /** STRUCT: struct type: parent_ops, struct name: mdev_fops **/
1984 if(ldv_s_mdev_fops_parent_ops==0) {
1985
1986 /* content: int mtty_open(struct mdev_device *mdev)*/
1987 /* LDV_COMMENT_BEGIN_PREP */
1988 #define VERSION_STRING "0.1"
1989 #define DRIVER_AUTHOR "NVIDIA Corporation"
1990 #define MTTY_CLASS_NAME "mtty"
1991 #define MTTY_NAME "mtty"
1992 #define MTTY_STRING_LEN 16
1993 #define MTTY_CONFIG_SPACE_SIZE 0xff
1994 #define MTTY_IO_BAR_SIZE 0x8
1995 #define MTTY_MMIO_BAR_SIZE 0x100000
1996 #define STORE_LE16(addr, val) (*(u16 *)addr = val)
1997 #define STORE_LE32(addr, val) (*(u32 *)addr = val)
1998 #define MAX_FIFO_SIZE 16
1999 #define CIRCULAR_BUF_INC_IDX(idx) (idx = (idx + 1) & (MAX_FIFO_SIZE - 1))
2000 #define MTTY_VFIO_PCI_OFFSET_SHIFT 40
2001 #define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off) (off >> MTTY_VFIO_PCI_OFFSET_SHIFT)
2002 #define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \
2003 ((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT)
2004 #define MTTY_VFIO_PCI_OFFSET_MASK \
2005 (((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1)
2006 #define MAX_MTTYS 24
2007 #if defined(DEBUG_REGS)
2008 #endif
2009 #if defined(DEBUG)
2010 #endif
2011 #if defined(DEBUG_INTR)
2012 #endif
2013 #if defined(DEBUG_INTR)
2014 #endif
2015 #if defined(DEBUG_INTR)
2016 #endif
2017 #if defined(DEBUG_INTR)
2018 #endif
2019 #if defined(DEBUG_INTR)
2020 #endif
2021 #if defined(DEBUG_INTR)
2022 #endif
2023 #if defined(DEBUG)
2024 #endif
2025 #if defined(DEBUG_REGS)
2026 #endif
2027 #if defined(DEBUG_REGS)
2028 #endif
2029 #if defined(DEBUG_INTR)
2030 #endif
2031 /* LDV_COMMENT_END_PREP */
2032 /* LDV_COMMENT_FUNCTION_CALL Function from field "open" from driver structure with callbacks "mdev_fops". Standart function test for correct return result. */
2033 ldv_handler_precall();
2034 res_mtty_open_19 = mtty_open( var_group2);
2035 ldv_check_return_value(res_mtty_open_19);
2036 if(res_mtty_open_19)
2037 goto ldv_module_exit;
2038 ldv_s_mdev_fops_parent_ops++;
2039
2040 }
2041
2042 }
2043
2044 break;
2045 case 1: {
2046
2047 /** STRUCT: struct type: parent_ops, struct name: mdev_fops **/
2048 if(ldv_s_mdev_fops_parent_ops==1) {
2049
2050 /* content: void mtty_close(struct mdev_device *mdev)*/
2051 /* LDV_COMMENT_BEGIN_PREP */
2052 #define VERSION_STRING "0.1"
2053 #define DRIVER_AUTHOR "NVIDIA Corporation"
2054 #define MTTY_CLASS_NAME "mtty"
2055 #define MTTY_NAME "mtty"
2056 #define MTTY_STRING_LEN 16
2057 #define MTTY_CONFIG_SPACE_SIZE 0xff
2058 #define MTTY_IO_BAR_SIZE 0x8
2059 #define MTTY_MMIO_BAR_SIZE 0x100000
2060 #define STORE_LE16(addr, val) (*(u16 *)addr = val)
2061 #define STORE_LE32(addr, val) (*(u32 *)addr = val)
2062 #define MAX_FIFO_SIZE 16
2063 #define CIRCULAR_BUF_INC_IDX(idx) (idx = (idx + 1) & (MAX_FIFO_SIZE - 1))
2064 #define MTTY_VFIO_PCI_OFFSET_SHIFT 40
2065 #define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off) (off >> MTTY_VFIO_PCI_OFFSET_SHIFT)
2066 #define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \
2067 ((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT)
2068 #define MTTY_VFIO_PCI_OFFSET_MASK \
2069 (((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1)
2070 #define MAX_MTTYS 24
2071 #if defined(DEBUG_REGS)
2072 #endif
2073 #if defined(DEBUG)
2074 #endif
2075 #if defined(DEBUG_INTR)
2076 #endif
2077 #if defined(DEBUG_INTR)
2078 #endif
2079 #if defined(DEBUG_INTR)
2080 #endif
2081 #if defined(DEBUG_INTR)
2082 #endif
2083 #if defined(DEBUG_INTR)
2084 #endif
2085 #if defined(DEBUG_INTR)
2086 #endif
2087 #if defined(DEBUG)
2088 #endif
2089 #if defined(DEBUG_REGS)
2090 #endif
2091 #if defined(DEBUG_REGS)
2092 #endif
2093 #if defined(DEBUG_INTR)
2094 #endif
2095 /* LDV_COMMENT_END_PREP */
2096 /* LDV_COMMENT_FUNCTION_CALL Function from field "release" from driver structure with callbacks "mdev_fops" */
2097 ldv_handler_precall();
2098 mtty_close( var_group2);
2099 ldv_s_mdev_fops_parent_ops++;
2100
2101 }
2102
2103 }
2104
2105 break;
2106 case 2: {
2107
2108 /** STRUCT: struct type: parent_ops, struct name: mdev_fops **/
2109 if(ldv_s_mdev_fops_parent_ops==2) {
2110
2111 /* content: int mtty_remove(struct mdev_device *mdev)*/
2112 /* LDV_COMMENT_BEGIN_PREP */
2113 #define VERSION_STRING "0.1"
2114 #define DRIVER_AUTHOR "NVIDIA Corporation"
2115 #define MTTY_CLASS_NAME "mtty"
2116 #define MTTY_NAME "mtty"
2117 #define MTTY_STRING_LEN 16
2118 #define MTTY_CONFIG_SPACE_SIZE 0xff
2119 #define MTTY_IO_BAR_SIZE 0x8
2120 #define MTTY_MMIO_BAR_SIZE 0x100000
2121 #define STORE_LE16(addr, val) (*(u16 *)addr = val)
2122 #define STORE_LE32(addr, val) (*(u32 *)addr = val)
2123 #define MAX_FIFO_SIZE 16
2124 #define CIRCULAR_BUF_INC_IDX(idx) (idx = (idx + 1) & (MAX_FIFO_SIZE - 1))
2125 #define MTTY_VFIO_PCI_OFFSET_SHIFT 40
2126 #define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off) (off >> MTTY_VFIO_PCI_OFFSET_SHIFT)
2127 #define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \
2128 ((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT)
2129 #define MTTY_VFIO_PCI_OFFSET_MASK \
2130 (((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1)
2131 #define MAX_MTTYS 24
2132 #if defined(DEBUG_REGS)
2133 #endif
2134 #if defined(DEBUG)
2135 #endif
2136 #if defined(DEBUG_INTR)
2137 #endif
2138 #if defined(DEBUG_INTR)
2139 #endif
2140 #if defined(DEBUG_INTR)
2141 #endif
2142 #if defined(DEBUG_INTR)
2143 #endif
2144 #if defined(DEBUG_INTR)
2145 #endif
2146 #if defined(DEBUG_INTR)
2147 #endif
2148 #if defined(DEBUG)
2149 #endif
2150 #if defined(DEBUG_REGS)
2151 #endif
2152 #if defined(DEBUG_REGS)
2153 #endif
2154 /* LDV_COMMENT_END_PREP */
2155 /* LDV_COMMENT_FUNCTION_CALL Function from field "remove" from driver structure with callbacks "mdev_fops" */
2156 ldv_handler_precall();
2157 mtty_remove( var_group2);
2158 /* LDV_COMMENT_BEGIN_PREP */
2159 #if defined(DEBUG_INTR)
2160 #endif
2161 /* LDV_COMMENT_END_PREP */
2162 ldv_s_mdev_fops_parent_ops=0;
2163
2164 }
2165
2166 }
2167
2168 break;
2169 case 3: {
2170
2171 /** STRUCT: struct type: parent_ops, struct name: mdev_fops **/
2172
2173
2174 /* content: int mtty_create(struct kobject *kobj, struct mdev_device *mdev)*/
2175 /* LDV_COMMENT_BEGIN_PREP */
2176 #define VERSION_STRING "0.1"
2177 #define DRIVER_AUTHOR "NVIDIA Corporation"
2178 #define MTTY_CLASS_NAME "mtty"
2179 #define MTTY_NAME "mtty"
2180 #define MTTY_STRING_LEN 16
2181 #define MTTY_CONFIG_SPACE_SIZE 0xff
2182 #define MTTY_IO_BAR_SIZE 0x8
2183 #define MTTY_MMIO_BAR_SIZE 0x100000
2184 #define STORE_LE16(addr, val) (*(u16 *)addr = val)
2185 #define STORE_LE32(addr, val) (*(u32 *)addr = val)
2186 #define MAX_FIFO_SIZE 16
2187 #define CIRCULAR_BUF_INC_IDX(idx) (idx = (idx + 1) & (MAX_FIFO_SIZE - 1))
2188 #define MTTY_VFIO_PCI_OFFSET_SHIFT 40
2189 #define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off) (off >> MTTY_VFIO_PCI_OFFSET_SHIFT)
2190 #define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \
2191 ((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT)
2192 #define MTTY_VFIO_PCI_OFFSET_MASK \
2193 (((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1)
2194 #define MAX_MTTYS 24
2195 #if defined(DEBUG_REGS)
2196 #endif
2197 #if defined(DEBUG)
2198 #endif
2199 #if defined(DEBUG_INTR)
2200 #endif
2201 #if defined(DEBUG_INTR)
2202 #endif
2203 #if defined(DEBUG_INTR)
2204 #endif
2205 #if defined(DEBUG_INTR)
2206 #endif
2207 #if defined(DEBUG_INTR)
2208 #endif
2209 #if defined(DEBUG_INTR)
2210 #endif
2211 #if defined(DEBUG)
2212 #endif
2213 #if defined(DEBUG_REGS)
2214 #endif
2215 #if defined(DEBUG_REGS)
2216 #endif
2217 /* LDV_COMMENT_END_PREP */
2218 /* LDV_COMMENT_FUNCTION_CALL Function from field "create" from driver structure with callbacks "mdev_fops" */
2219 ldv_handler_precall();
2220 mtty_create( var_group1, var_group2);
2221 /* LDV_COMMENT_BEGIN_PREP */
2222 #if defined(DEBUG_INTR)
2223 #endif
2224 /* LDV_COMMENT_END_PREP */
2225
2226
2227
2228
2229 }
2230
2231 break;
2232 case 4: {
2233
2234 /** STRUCT: struct type: parent_ops, struct name: mdev_fops **/
2235
2236
2237 /* content: ssize_t mtty_read(struct mdev_device *mdev, char __user *buf, size_t count, loff_t *ppos)*/
2238 /* LDV_COMMENT_BEGIN_PREP */
2239 #define VERSION_STRING "0.1"
2240 #define DRIVER_AUTHOR "NVIDIA Corporation"
2241 #define MTTY_CLASS_NAME "mtty"
2242 #define MTTY_NAME "mtty"
2243 #define MTTY_STRING_LEN 16
2244 #define MTTY_CONFIG_SPACE_SIZE 0xff
2245 #define MTTY_IO_BAR_SIZE 0x8
2246 #define MTTY_MMIO_BAR_SIZE 0x100000
2247 #define STORE_LE16(addr, val) (*(u16 *)addr = val)
2248 #define STORE_LE32(addr, val) (*(u32 *)addr = val)
2249 #define MAX_FIFO_SIZE 16
2250 #define CIRCULAR_BUF_INC_IDX(idx) (idx = (idx + 1) & (MAX_FIFO_SIZE - 1))
2251 #define MTTY_VFIO_PCI_OFFSET_SHIFT 40
2252 #define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off) (off >> MTTY_VFIO_PCI_OFFSET_SHIFT)
2253 #define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \
2254 ((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT)
2255 #define MTTY_VFIO_PCI_OFFSET_MASK \
2256 (((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1)
2257 #define MAX_MTTYS 24
2258 #if defined(DEBUG_REGS)
2259 #endif
2260 #if defined(DEBUG)
2261 #endif
2262 #if defined(DEBUG_INTR)
2263 #endif
2264 #if defined(DEBUG_INTR)
2265 #endif
2266 #if defined(DEBUG_INTR)
2267 #endif
2268 #if defined(DEBUG_INTR)
2269 #endif
2270 #if defined(DEBUG_INTR)
2271 #endif
2272 #if defined(DEBUG_INTR)
2273 #endif
2274 #if defined(DEBUG)
2275 #endif
2276 #if defined(DEBUG_REGS)
2277 #endif
2278 #if defined(DEBUG_REGS)
2279 #endif
2280 /* LDV_COMMENT_END_PREP */
2281 /* LDV_COMMENT_FUNCTION_CALL Function from field "read" from driver structure with callbacks "mdev_fops" */
2282 ldv_handler_precall();
2283 mtty_read( var_group2, var_mtty_read_11_p1, var_mtty_read_11_p2, var_mtty_read_11_p3);
2284 /* LDV_COMMENT_BEGIN_PREP */
2285 #if defined(DEBUG_INTR)
2286 #endif
2287 /* LDV_COMMENT_END_PREP */
2288
2289
2290
2291
2292 }
2293
2294 break;
2295 case 5: {
2296
2297 /** STRUCT: struct type: parent_ops, struct name: mdev_fops **/
2298
2299
2300 /* content: ssize_t mtty_write(struct mdev_device *mdev, const char __user *buf, size_t count, loff_t *ppos)*/
2301 /* LDV_COMMENT_BEGIN_PREP */
2302 #define VERSION_STRING "0.1"
2303 #define DRIVER_AUTHOR "NVIDIA Corporation"
2304 #define MTTY_CLASS_NAME "mtty"
2305 #define MTTY_NAME "mtty"
2306 #define MTTY_STRING_LEN 16
2307 #define MTTY_CONFIG_SPACE_SIZE 0xff
2308 #define MTTY_IO_BAR_SIZE 0x8
2309 #define MTTY_MMIO_BAR_SIZE 0x100000
2310 #define STORE_LE16(addr, val) (*(u16 *)addr = val)
2311 #define STORE_LE32(addr, val) (*(u32 *)addr = val)
2312 #define MAX_FIFO_SIZE 16
2313 #define CIRCULAR_BUF_INC_IDX(idx) (idx = (idx + 1) & (MAX_FIFO_SIZE - 1))
2314 #define MTTY_VFIO_PCI_OFFSET_SHIFT 40
2315 #define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off) (off >> MTTY_VFIO_PCI_OFFSET_SHIFT)
2316 #define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \
2317 ((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT)
2318 #define MTTY_VFIO_PCI_OFFSET_MASK \
2319 (((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1)
2320 #define MAX_MTTYS 24
2321 #if defined(DEBUG_REGS)
2322 #endif
2323 #if defined(DEBUG)
2324 #endif
2325 #if defined(DEBUG_INTR)
2326 #endif
2327 #if defined(DEBUG_INTR)
2328 #endif
2329 #if defined(DEBUG_INTR)
2330 #endif
2331 #if defined(DEBUG_INTR)
2332 #endif
2333 #if defined(DEBUG_INTR)
2334 #endif
2335 #if defined(DEBUG_INTR)
2336 #endif
2337 #if defined(DEBUG)
2338 #endif
2339 #if defined(DEBUG_REGS)
2340 #endif
2341 #if defined(DEBUG_REGS)
2342 #endif
2343 /* LDV_COMMENT_END_PREP */
2344 /* LDV_COMMENT_FUNCTION_CALL Function from field "write" from driver structure with callbacks "mdev_fops" */
2345 ldv_handler_precall();
2346 mtty_write( var_group2, var_mtty_write_12_p1, var_mtty_write_12_p2, var_mtty_write_12_p3);
2347 /* LDV_COMMENT_BEGIN_PREP */
2348 #if defined(DEBUG_INTR)
2349 #endif
2350 /* LDV_COMMENT_END_PREP */
2351
2352
2353
2354
2355 }
2356
2357 break;
2358 case 6: {
2359
2360 /** STRUCT: struct type: parent_ops, struct name: mdev_fops **/
2361
2362
2363 /* content: static long mtty_ioctl(struct mdev_device *mdev, unsigned int cmd, unsigned long arg)*/
2364 /* LDV_COMMENT_BEGIN_PREP */
2365 #define VERSION_STRING "0.1"
2366 #define DRIVER_AUTHOR "NVIDIA Corporation"
2367 #define MTTY_CLASS_NAME "mtty"
2368 #define MTTY_NAME "mtty"
2369 #define MTTY_STRING_LEN 16
2370 #define MTTY_CONFIG_SPACE_SIZE 0xff
2371 #define MTTY_IO_BAR_SIZE 0x8
2372 #define MTTY_MMIO_BAR_SIZE 0x100000
2373 #define STORE_LE16(addr, val) (*(u16 *)addr = val)
2374 #define STORE_LE32(addr, val) (*(u32 *)addr = val)
2375 #define MAX_FIFO_SIZE 16
2376 #define CIRCULAR_BUF_INC_IDX(idx) (idx = (idx + 1) & (MAX_FIFO_SIZE - 1))
2377 #define MTTY_VFIO_PCI_OFFSET_SHIFT 40
2378 #define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off) (off >> MTTY_VFIO_PCI_OFFSET_SHIFT)
2379 #define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \
2380 ((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT)
2381 #define MTTY_VFIO_PCI_OFFSET_MASK \
2382 (((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1)
2383 #define MAX_MTTYS 24
2384 #if defined(DEBUG_REGS)
2385 #endif
2386 #if defined(DEBUG)
2387 #endif
2388 #if defined(DEBUG_INTR)
2389 #endif
2390 #if defined(DEBUG_INTR)
2391 #endif
2392 #if defined(DEBUG_INTR)
2393 #endif
2394 #if defined(DEBUG_INTR)
2395 #endif
2396 #if defined(DEBUG_INTR)
2397 #endif
2398 #if defined(DEBUG_INTR)
2399 #endif
2400 #if defined(DEBUG)
2401 #endif
2402 #if defined(DEBUG_REGS)
2403 #endif
2404 #if defined(DEBUG_REGS)
2405 #endif
2406 #if defined(DEBUG_INTR)
2407 #endif
2408 /* LDV_COMMENT_END_PREP */
2409 /* LDV_COMMENT_FUNCTION_CALL Function from field "ioctl" from driver structure with callbacks "mdev_fops" */
2410 ldv_handler_precall();
2411 mtty_ioctl( var_group2, var_mtty_ioctl_18_p1, var_mtty_ioctl_18_p2);
2412
2413
2414
2415
2416 }
2417
2418 break;
2419 default: break;
2420
2421 }
2422
2423 }
2424
2425 ldv_module_exit:
2426
2427 /** INIT: init_type: ST_MODULE_EXIT **/
2428 /* content: static void __exit mtty_dev_exit(void)*/
2429 /* LDV_COMMENT_BEGIN_PREP */
2430 #define VERSION_STRING "0.1"
2431 #define DRIVER_AUTHOR "NVIDIA Corporation"
2432 #define MTTY_CLASS_NAME "mtty"
2433 #define MTTY_NAME "mtty"
2434 #define MTTY_STRING_LEN 16
2435 #define MTTY_CONFIG_SPACE_SIZE 0xff
2436 #define MTTY_IO_BAR_SIZE 0x8
2437 #define MTTY_MMIO_BAR_SIZE 0x100000
2438 #define STORE_LE16(addr, val) (*(u16 *)addr = val)
2439 #define STORE_LE32(addr, val) (*(u32 *)addr = val)
2440 #define MAX_FIFO_SIZE 16
2441 #define CIRCULAR_BUF_INC_IDX(idx) (idx = (idx + 1) & (MAX_FIFO_SIZE - 1))
2442 #define MTTY_VFIO_PCI_OFFSET_SHIFT 40
2443 #define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off) (off >> MTTY_VFIO_PCI_OFFSET_SHIFT)
2444 #define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \
2445 ((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT)
2446 #define MTTY_VFIO_PCI_OFFSET_MASK \
2447 (((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1)
2448 #define MAX_MTTYS 24
2449 #if defined(DEBUG_REGS)
2450 #endif
2451 #if defined(DEBUG)
2452 #endif
2453 #if defined(DEBUG_INTR)
2454 #endif
2455 #if defined(DEBUG_INTR)
2456 #endif
2457 #if defined(DEBUG_INTR)
2458 #endif
2459 #if defined(DEBUG_INTR)
2460 #endif
2461 #if defined(DEBUG_INTR)
2462 #endif
2463 #if defined(DEBUG_INTR)
2464 #endif
2465 #if defined(DEBUG)
2466 #endif
2467 #if defined(DEBUG_REGS)
2468 #endif
2469 #if defined(DEBUG_REGS)
2470 #endif
2471 #if defined(DEBUG_INTR)
2472 #endif
2473 /* LDV_COMMENT_END_PREP */
2474 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver release function before driver will be uploaded from kernel. This function declared as "MODULE_EXIT(function name)". */
2475 ldv_handler_precall();
2476 mtty_dev_exit();
2477
2478 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
2479 ldv_final: ldv_check_final_state();
2480
2481 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
2482 return;
2483
2484 }
2485 #endif
2486
2487 /* LDV_COMMENT_END_MAIN */ 1
2 #include <linux/kernel.h>
3 bool ldv_is_err(const void *ptr);
4 bool ldv_is_err_or_null(const void *ptr);
5 void* ldv_err_ptr(long error);
6 long ldv_ptr_err(const void *ptr);
7
8
9 // Provide model function prototypes before their usage.
10
11 void *ldv_create_class(void);
12 int ldv_register_class(void);
13 void ldv_unregister_class(void);
14
15 int ldv_register_chrdev(int major);
16 int ldv_register_chrdev_region(void);
17 void ldv_unregister_chrdev_region(void);
18
19 int ldv_register_usb_gadget(void);
20 void ldv_unregister_usb_gadget(void);
21 #line 1 "/work/ldvuser/ref_launch/work/current--X--samples--X--defaultlinux-4.10-rc1.tar.xz--X--106_1a--X--cpachecker/linux-4.10-rc1.tar.xz/csd_deg_dscv/23/dscv_tempdir/dscv/ri/106_1a/samples/vfio-mdev/mtty.c"
22
23 /*
24 * Mediated virtual PCI serial host device driver
25 *
26 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
27 * Author: Neo Jia <cjia@nvidia.com>
28 * Kirti Wankhede <kwankhede@nvidia.com>
29 *
30 * This program is free software; you can redistribute it and/or modify
31 * it under the terms of the GNU General Public License version 2 as
32 * published by the Free Software Foundation.
33 *
34 * Sample driver that creates mdev device that simulates serial port over PCI
35 * card.
36 *
37 */
38
39 #include <linux/init.h>
40 #include <linux/module.h>
41 #include <linux/device.h>
42 #include <linux/kernel.h>
43 #include <linux/fs.h>
44 #include <linux/poll.h>
45 #include <linux/slab.h>
46 #include <linux/cdev.h>
47 #include <linux/sched.h>
48 #include <linux/wait.h>
49 #include <linux/uuid.h>
50 #include <linux/vfio.h>
51 #include <linux/iommu.h>
52 #include <linux/sysfs.h>
53 #include <linux/ctype.h>
54 #include <linux/file.h>
55 #include <linux/mdev.h>
56 #include <linux/pci.h>
57 #include <linux/serial.h>
58 #include <uapi/linux/serial_reg.h>
59 #include <linux/eventfd.h>
60 /*
61 * #defines
62 */
63
64 #define VERSION_STRING "0.1"
65 #define DRIVER_AUTHOR "NVIDIA Corporation"
66
67 #define MTTY_CLASS_NAME "mtty"
68
69 #define MTTY_NAME "mtty"
70
71 #define MTTY_STRING_LEN 16
72
73 #define MTTY_CONFIG_SPACE_SIZE 0xff
74 #define MTTY_IO_BAR_SIZE 0x8
75 #define MTTY_MMIO_BAR_SIZE 0x100000
76
77 #define STORE_LE16(addr, val) (*(u16 *)addr = val)
78 #define STORE_LE32(addr, val) (*(u32 *)addr = val)
79
80 #define MAX_FIFO_SIZE 16
81
82 #define CIRCULAR_BUF_INC_IDX(idx) (idx = (idx + 1) & (MAX_FIFO_SIZE - 1))
83
84 #define MTTY_VFIO_PCI_OFFSET_SHIFT 40
85
86 #define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off) (off >> MTTY_VFIO_PCI_OFFSET_SHIFT)
87 #define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \
88 ((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT)
89 #define MTTY_VFIO_PCI_OFFSET_MASK \
90 (((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1)
91 #define MAX_MTTYS 24
92
93 /*
94 * Global Structures
95 */
96
97 struct mtty_dev {
98 dev_t vd_devt;
99 struct class *vd_class;
100 struct cdev vd_cdev;
101 struct idr vd_idr;
102 struct device dev;
103 } mtty_dev;
104
105 struct mdev_region_info {
106 u64 start;
107 u64 phys_start;
108 u32 size;
109 u64 vfio_offset;
110 };
111
112 #if defined(DEBUG_REGS)
113 const char *wr_reg[] = {
114 "TX",
115 "IER",
116 "FCR",
117 "LCR",
118 "MCR",
119 "LSR",
120 "MSR",
121 "SCR"
122 };
123
124 const char *rd_reg[] = {
125 "RX",
126 "IER",
127 "IIR",
128 "LCR",
129 "MCR",
130 "LSR",
131 "MSR",
132 "SCR"
133 };
134 #endif
135
136 /* loop back buffer */
137 struct rxtx {
138 u8 fifo[MAX_FIFO_SIZE];
139 u8 head, tail;
140 u8 count;
141 };
142
143 struct serial_port {
144 u8 uart_reg[8]; /* 8 registers */
145 struct rxtx rxtx; /* loop back buffer */
146 bool dlab;
147 bool overrun;
148 u16 divisor;
149 u8 fcr; /* FIFO control register */
150 u8 max_fifo_size;
151 u8 intr_trigger_level; /* interrupt trigger level */
152 };
153
154 /* State of each mdev device */
155 struct mdev_state {
156 int irq_fd;
157 struct eventfd_ctx *intx_evtfd;
158 struct eventfd_ctx *msi_evtfd;
159 int irq_index;
160 u8 *vconfig;
161 struct mutex ops_lock;
162 struct mdev_device *mdev;
163 struct mdev_region_info region_info[VFIO_PCI_NUM_REGIONS];
164 u32 bar_mask[VFIO_PCI_NUM_REGIONS];
165 struct list_head next;
166 struct serial_port s[2];
167 struct mutex rxtx_lock;
168 struct vfio_device_info dev_info;
169 int nr_ports;
170 };
171
172 struct mutex mdev_list_lock;
173 struct list_head mdev_devices_list;
174
175 static const struct file_operations vd_fops = {
176 .owner = THIS_MODULE,
177 };
178
179 /* function prototypes */
180
181 static int mtty_trigger_interrupt(uuid_le uuid);
182
183 /* Helper functions */
184 static struct mdev_state *find_mdev_state_by_uuid(uuid_le uuid)
185 {
186 struct mdev_state *mds;
187
188 list_for_each_entry(mds, &mdev_devices_list, next) {
189 if (uuid_le_cmp(mds->mdev->uuid, uuid) == 0)
190 return mds;
191 }
192
193 return NULL;
194 }
195
196 void dump_buffer(char *buf, uint32_t count)
197 {
198 #if defined(DEBUG)
199 int i;
200
201 pr_info("Buffer:\n");
202 for (i = 0; i < count; i++) {
203 pr_info("%2x ", *(buf + i));
204 if ((i + 1) % 16 == 0)
205 pr_info("\n");
206 }
207 #endif
208 }
209
210 static void mtty_create_config_space(struct mdev_state *mdev_state)
211 {
212 /* PCI dev ID */
213 STORE_LE32((u32 *) &mdev_state->vconfig[0x0], 0x32534348);
214
215 /* Control: I/O+, Mem-, BusMaster- */
216 STORE_LE16((u16 *) &mdev_state->vconfig[0x4], 0x0001);
217
218 /* Status: capabilities list absent */
219 STORE_LE16((u16 *) &mdev_state->vconfig[0x6], 0x0200);
220
221 /* Rev ID */
222 mdev_state->vconfig[0x8] = 0x10;
223
224 /* programming interface class : 16550-compatible serial controller */
225 mdev_state->vconfig[0x9] = 0x02;
226
227 /* Sub class : 00 */
228 mdev_state->vconfig[0xa] = 0x00;
229
230 /* Base class : Simple Communication controllers */
231 mdev_state->vconfig[0xb] = 0x07;
232
233 /* base address registers */
234 /* BAR0: IO space */
235 STORE_LE32((u32 *) &mdev_state->vconfig[0x10], 0x000001);
236 mdev_state->bar_mask[0] = ~(MTTY_IO_BAR_SIZE) + 1;
237
238 if (mdev_state->nr_ports == 2) {
239 /* BAR1: IO space */
240 STORE_LE32((u32 *) &mdev_state->vconfig[0x14], 0x000001);
241 mdev_state->bar_mask[1] = ~(MTTY_IO_BAR_SIZE) + 1;
242 }
243
244 /* Subsystem ID */
245 STORE_LE32((u32 *) &mdev_state->vconfig[0x2c], 0x32534348);
246
247 mdev_state->vconfig[0x34] = 0x00; /* Cap Ptr */
248 mdev_state->vconfig[0x3d] = 0x01; /* interrupt pin (INTA#) */
249
250 /* Vendor specific data */
251 mdev_state->vconfig[0x40] = 0x23;
252 mdev_state->vconfig[0x43] = 0x80;
253 mdev_state->vconfig[0x44] = 0x23;
254 mdev_state->vconfig[0x48] = 0x23;
255 mdev_state->vconfig[0x4c] = 0x23;
256
257 mdev_state->vconfig[0x60] = 0x50;
258 mdev_state->vconfig[0x61] = 0x43;
259 mdev_state->vconfig[0x62] = 0x49;
260 mdev_state->vconfig[0x63] = 0x20;
261 mdev_state->vconfig[0x64] = 0x53;
262 mdev_state->vconfig[0x65] = 0x65;
263 mdev_state->vconfig[0x66] = 0x72;
264 mdev_state->vconfig[0x67] = 0x69;
265 mdev_state->vconfig[0x68] = 0x61;
266 mdev_state->vconfig[0x69] = 0x6c;
267 mdev_state->vconfig[0x6a] = 0x2f;
268 mdev_state->vconfig[0x6b] = 0x55;
269 mdev_state->vconfig[0x6c] = 0x41;
270 mdev_state->vconfig[0x6d] = 0x52;
271 mdev_state->vconfig[0x6e] = 0x54;
272 }
273
274 static void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset,
275 char *buf, u32 count)
276 {
277 u32 cfg_addr, bar_mask, bar_index = 0;
278
279 switch (offset) {
280 case 0x04: /* device control */
281 case 0x06: /* device status */
282 /* do nothing */
283 break;
284 case 0x3c: /* interrupt line */
285 mdev_state->vconfig[0x3c] = buf[0];
286 break;
287 case 0x3d:
288 /*
289 * Interrupt Pin is hardwired to INTA.
290 * This field is write protected by hardware
291 */
292 break;
293 case 0x10: /* BAR0 */
294 case 0x14: /* BAR1 */
295 if (offset == 0x10)
296 bar_index = 0;
297 else if (offset == 0x14)
298 bar_index = 1;
299
300 if ((mdev_state->nr_ports == 1) && (bar_index == 1)) {
301 STORE_LE32(&mdev_state->vconfig[offset], 0);
302 break;
303 }
304
305 cfg_addr = *(u32 *)buf;
306 pr_info("BAR%d addr 0x%x\n", bar_index, cfg_addr);
307
308 if (cfg_addr == 0xffffffff) {
309 bar_mask = mdev_state->bar_mask[bar_index];
310 cfg_addr = (cfg_addr & bar_mask);
311 }
312
313 cfg_addr |= (mdev_state->vconfig[offset] & 0x3ul);
314 STORE_LE32(&mdev_state->vconfig[offset], cfg_addr);
315 break;
316 case 0x18: /* BAR2 */
317 case 0x1c: /* BAR3 */
318 case 0x20: /* BAR4 */
319 STORE_LE32(&mdev_state->vconfig[offset], 0);
320 break;
321 default:
322 pr_info("PCI config write @0x%x of %d bytes not handled\n",
323 offset, count);
324 break;
325 }
326 }
327
328 static void handle_bar_write(unsigned int index, struct mdev_state *mdev_state,
329 u16 offset, char *buf, u32 count)
330 {
331 u8 data = *buf;
332
333 /* Handle data written by guest */
334 switch (offset) {
335 case UART_TX:
336 /* if DLAB set, data is LSB of divisor */
337 if (mdev_state->s[index].dlab) {
338 mdev_state->s[index].divisor |= data;
339 break;
340 }
341
342 mutex_lock(&mdev_state->rxtx_lock);
343
344 /* save in TX buffer */
345 if (mdev_state->s[index].rxtx.count <
346 mdev_state->s[index].max_fifo_size) {
347 mdev_state->s[index].rxtx.fifo[
348 mdev_state->s[index].rxtx.head] = data;
349 mdev_state->s[index].rxtx.count++;
350 CIRCULAR_BUF_INC_IDX(mdev_state->s[index].rxtx.head);
351 mdev_state->s[index].overrun = false;
352
353 /*
354 * Trigger interrupt if receive data interrupt is
355 * enabled and fifo reached trigger level
356 */
357 if ((mdev_state->s[index].uart_reg[UART_IER] &
358 UART_IER_RDI) &&
359 (mdev_state->s[index].rxtx.count ==
360 mdev_state->s[index].intr_trigger_level)) {
361 /* trigger interrupt */
362 #if defined(DEBUG_INTR)
363 pr_err("Serial port %d: Fifo level trigger\n",
364 index);
365 #endif
366 mtty_trigger_interrupt(mdev_state->mdev->uuid);
367 }
368 } else {
369 #if defined(DEBUG_INTR)
370 pr_err("Serial port %d: Buffer Overflow\n", index);
371 #endif
372 mdev_state->s[index].overrun = true;
373
374 /*
375 * Trigger interrupt if receiver line status interrupt
376 * is enabled
377 */
378 if (mdev_state->s[index].uart_reg[UART_IER] &
379 UART_IER_RLSI)
380 mtty_trigger_interrupt(mdev_state->mdev->uuid);
381 }
382 mutex_unlock(&mdev_state->rxtx_lock);
383 break;
384
385 case UART_IER:
386 /* if DLAB set, data is MSB of divisor */
387 if (mdev_state->s[index].dlab)
388 mdev_state->s[index].divisor |= (u16)data << 8;
389 else {
390 mdev_state->s[index].uart_reg[offset] = data;
391 mutex_lock(&mdev_state->rxtx_lock);
392 if ((data & UART_IER_THRI) &&
393 (mdev_state->s[index].rxtx.head ==
394 mdev_state->s[index].rxtx.tail)) {
395 #if defined(DEBUG_INTR)
396 pr_err("Serial port %d: IER_THRI write\n",
397 index);
398 #endif
399 mtty_trigger_interrupt(mdev_state->mdev->uuid);
400 }
401
402 mutex_unlock(&mdev_state->rxtx_lock);
403 }
404
405 break;
406
407 case UART_FCR:
408 mdev_state->s[index].fcr = data;
409
410 mutex_lock(&mdev_state->rxtx_lock);
411 if (data & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT)) {
412 /* clear loop back FIFO */
413 mdev_state->s[index].rxtx.count = 0;
414 mdev_state->s[index].rxtx.head = 0;
415 mdev_state->s[index].rxtx.tail = 0;
416 }
417 mutex_unlock(&mdev_state->rxtx_lock);
418
419 switch (data & UART_FCR_TRIGGER_MASK) {
420 case UART_FCR_TRIGGER_1:
421 mdev_state->s[index].intr_trigger_level = 1;
422 break;
423
424 case UART_FCR_TRIGGER_4:
425 mdev_state->s[index].intr_trigger_level = 4;
426 break;
427
428 case UART_FCR_TRIGGER_8:
429 mdev_state->s[index].intr_trigger_level = 8;
430 break;
431
432 case UART_FCR_TRIGGER_14:
433 mdev_state->s[index].intr_trigger_level = 14;
434 break;
435 }
436
437 /*
438 * Set trigger level to 1 otherwise or implement timer with
439 * timeout of 4 characters and on expiring that timer set
440 * Recevice data timeout in IIR register
441 */
442 mdev_state->s[index].intr_trigger_level = 1;
443 if (data & UART_FCR_ENABLE_FIFO)
444 mdev_state->s[index].max_fifo_size = MAX_FIFO_SIZE;
445 else {
446 mdev_state->s[index].max_fifo_size = 1;
447 mdev_state->s[index].intr_trigger_level = 1;
448 }
449
450 break;
451
452 case UART_LCR:
453 if (data & UART_LCR_DLAB) {
454 mdev_state->s[index].dlab = true;
455 mdev_state->s[index].divisor = 0;
456 } else
457 mdev_state->s[index].dlab = false;
458
459 mdev_state->s[index].uart_reg[offset] = data;
460 break;
461
462 case UART_MCR:
463 mdev_state->s[index].uart_reg[offset] = data;
464
465 if ((mdev_state->s[index].uart_reg[UART_IER] & UART_IER_MSI) &&
466 (data & UART_MCR_OUT2)) {
467 #if defined(DEBUG_INTR)
468 pr_err("Serial port %d: MCR_OUT2 write\n", index);
469 #endif
470 mtty_trigger_interrupt(mdev_state->mdev->uuid);
471 }
472
473 if ((mdev_state->s[index].uart_reg[UART_IER] & UART_IER_MSI) &&
474 (data & (UART_MCR_RTS | UART_MCR_DTR))) {
475 #if defined(DEBUG_INTR)
476 pr_err("Serial port %d: MCR RTS/DTR write\n", index);
477 #endif
478 mtty_trigger_interrupt(mdev_state->mdev->uuid);
479 }
480 break;
481
482 case UART_LSR:
483 case UART_MSR:
484 /* do nothing */
485 break;
486
487 case UART_SCR:
488 mdev_state->s[index].uart_reg[offset] = data;
489 break;
490
491 default:
492 break;
493 }
494 }
495
496 static void handle_bar_read(unsigned int index, struct mdev_state *mdev_state,
497 u16 offset, char *buf, u32 count)
498 {
499 /* Handle read requests by guest */
500 switch (offset) {
501 case UART_RX:
502 /* if DLAB set, data is LSB of divisor */
503 if (mdev_state->s[index].dlab) {
504 *buf = (u8)mdev_state->s[index].divisor;
505 break;
506 }
507
508 mutex_lock(&mdev_state->rxtx_lock);
509 /* return data in tx buffer */
510 if (mdev_state->s[index].rxtx.head !=
511 mdev_state->s[index].rxtx.tail) {
512 *buf = mdev_state->s[index].rxtx.fifo[
513 mdev_state->s[index].rxtx.tail];
514 mdev_state->s[index].rxtx.count--;
515 CIRCULAR_BUF_INC_IDX(mdev_state->s[index].rxtx.tail);
516 }
517
518 if (mdev_state->s[index].rxtx.head ==
519 mdev_state->s[index].rxtx.tail) {
520 /*
521 * Trigger interrupt if tx buffer empty interrupt is
522 * enabled and fifo is empty
523 */
524 #if defined(DEBUG_INTR)
525 pr_err("Serial port %d: Buffer Empty\n", index);
526 #endif
527 if (mdev_state->s[index].uart_reg[UART_IER] &
528 UART_IER_THRI)
529 mtty_trigger_interrupt(mdev_state->mdev->uuid);
530 }
531 mutex_unlock(&mdev_state->rxtx_lock);
532
533 break;
534
535 case UART_IER:
536 if (mdev_state->s[index].dlab) {
537 *buf = (u8)(mdev_state->s[index].divisor >> 8);
538 break;
539 }
540 *buf = mdev_state->s[index].uart_reg[offset] & 0x0f;
541 break;
542
543 case UART_IIR:
544 {
545 u8 ier = mdev_state->s[index].uart_reg[UART_IER];
546 *buf = 0;
547
548 mutex_lock(&mdev_state->rxtx_lock);
549 /* Interrupt priority 1: Parity, overrun, framing or break */
550 if ((ier & UART_IER_RLSI) && mdev_state->s[index].overrun)
551 *buf |= UART_IIR_RLSI;
552
553 /* Interrupt priority 2: Fifo trigger level reached */
554 if ((ier & UART_IER_RDI) &&
555 (mdev_state->s[index].rxtx.count ==
556 mdev_state->s[index].intr_trigger_level))
557 *buf |= UART_IIR_RDI;
558
559 /* Interrupt priotiry 3: transmitter holding register empty */
560 if ((ier & UART_IER_THRI) &&
561 (mdev_state->s[index].rxtx.head ==
562 mdev_state->s[index].rxtx.tail))
563 *buf |= UART_IIR_THRI;
564
565 /* Interrupt priotiry 4: Modem status: CTS, DSR, RI or DCD */
566 if ((ier & UART_IER_MSI) &&
567 (mdev_state->s[index].uart_reg[UART_MCR] &
568 (UART_MCR_RTS | UART_MCR_DTR)))
569 *buf |= UART_IIR_MSI;
570
571 /* bit0: 0=> interrupt pending, 1=> no interrupt is pending */
572 if (*buf == 0)
573 *buf = UART_IIR_NO_INT;
574
575 /* set bit 6 & 7 to be 16550 compatible */
576 *buf |= 0xC0;
577 mutex_unlock(&mdev_state->rxtx_lock);
578 }
579 break;
580
581 case UART_LCR:
582 case UART_MCR:
583 *buf = mdev_state->s[index].uart_reg[offset];
584 break;
585
586 case UART_LSR:
587 {
588 u8 lsr = 0;
589
590 mutex_lock(&mdev_state->rxtx_lock);
591 /* atleast one char in FIFO */
592 if (mdev_state->s[index].rxtx.head !=
593 mdev_state->s[index].rxtx.tail)
594 lsr |= UART_LSR_DR;
595
596 /* if FIFO overrun */
597 if (mdev_state->s[index].overrun)
598 lsr |= UART_LSR_OE;
599
600 /* transmit FIFO empty and tramsitter empty */
601 if (mdev_state->s[index].rxtx.head ==
602 mdev_state->s[index].rxtx.tail)
603 lsr |= UART_LSR_TEMT | UART_LSR_THRE;
604
605 mutex_unlock(&mdev_state->rxtx_lock);
606 *buf = lsr;
607 break;
608 }
609 case UART_MSR:
610 *buf = UART_MSR_DSR | UART_MSR_DDSR | UART_MSR_DCD;
611
612 mutex_lock(&mdev_state->rxtx_lock);
613 /* if AFE is 1 and FIFO have space, set CTS bit */
614 if (mdev_state->s[index].uart_reg[UART_MCR] &
615 UART_MCR_AFE) {
616 if (mdev_state->s[index].rxtx.count <
617 mdev_state->s[index].max_fifo_size)
618 *buf |= UART_MSR_CTS | UART_MSR_DCTS;
619 } else
620 *buf |= UART_MSR_CTS | UART_MSR_DCTS;
621 mutex_unlock(&mdev_state->rxtx_lock);
622
623 break;
624
625 case UART_SCR:
626 *buf = mdev_state->s[index].uart_reg[offset];
627 break;
628
629 default:
630 break;
631 }
632 }
633
634 static void mdev_read_base(struct mdev_state *mdev_state)
635 {
636 int index, pos;
637 u32 start_lo, start_hi;
638 u32 mem_type;
639
640 pos = PCI_BASE_ADDRESS_0;
641
642 for (index = 0; index <= VFIO_PCI_BAR5_REGION_INDEX; index++) {
643
644 if (!mdev_state->region_info[index].size)
645 continue;
646
647 start_lo = (*(u32 *)(mdev_state->vconfig + pos)) &
648 PCI_BASE_ADDRESS_MEM_MASK;
649 mem_type = (*(u32 *)(mdev_state->vconfig + pos)) &
650 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
651
652 switch (mem_type) {
653 case PCI_BASE_ADDRESS_MEM_TYPE_64:
654 start_hi = (*(u32 *)(mdev_state->vconfig + pos + 4));
655 pos += 4;
656 break;
657 case PCI_BASE_ADDRESS_MEM_TYPE_32:
658 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
659 /* 1M mem BAR treated as 32-bit BAR */
660 default:
661 /* mem unknown type treated as 32-bit BAR */
662 start_hi = 0;
663 break;
664 }
665 pos += 4;
666 mdev_state->region_info[index].start = ((u64)start_hi << 32) |
667 start_lo;
668 }
669 }
670
671 static ssize_t mdev_access(struct mdev_device *mdev, char *buf, size_t count,
672 loff_t pos, bool is_write)
673 {
674 struct mdev_state *mdev_state;
675 unsigned int index;
676 loff_t offset;
677 int ret = 0;
678
679 if (!mdev || !buf)
680 return -EINVAL;
681
682 mdev_state = mdev_get_drvdata(mdev);
683 if (!mdev_state) {
684 pr_err("%s mdev_state not found\n", __func__);
685 return -EINVAL;
686 }
687
688 mutex_lock(&mdev_state->ops_lock);
689
690 index = MTTY_VFIO_PCI_OFFSET_TO_INDEX(pos);
691 offset = pos & MTTY_VFIO_PCI_OFFSET_MASK;
692 switch (index) {
693 case VFIO_PCI_CONFIG_REGION_INDEX:
694
695 #if defined(DEBUG)
696 pr_info("%s: PCI config space %s at offset 0x%llx\n",
697 __func__, is_write ? "write" : "read", offset);
698 #endif
699 if (is_write) {
700 dump_buffer(buf, count);
701 handle_pci_cfg_write(mdev_state, offset, buf, count);
702 } else {
703 memcpy(buf, (mdev_state->vconfig + offset), count);
704 dump_buffer(buf, count);
705 }
706
707 break;
708
709 case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
710 if (!mdev_state->region_info[index].start)
711 mdev_read_base(mdev_state);
712
713 if (is_write) {
714 dump_buffer(buf, count);
715
716 #if defined(DEBUG_REGS)
717 pr_info("%s: BAR%d WR @0x%llx %s val:0x%02x dlab:%d\n",
718 __func__, index, offset, wr_reg[offset],
719 (u8)*buf, mdev_state->s[index].dlab);
720 #endif
721 handle_bar_write(index, mdev_state, offset, buf, count);
722 } else {
723 handle_bar_read(index, mdev_state, offset, buf, count);
724 dump_buffer(buf, count);
725
726 #if defined(DEBUG_REGS)
727 pr_info("%s: BAR%d RD @0x%llx %s val:0x%02x dlab:%d\n",
728 __func__, index, offset, rd_reg[offset],
729 (u8)*buf, mdev_state->s[index].dlab);
730 #endif
731 }
732 break;
733
734 default:
735 ret = -1;
736 goto accessfailed;
737 }
738
739 ret = count;
740
741
742 accessfailed:
743 mutex_unlock(&mdev_state->ops_lock);
744
745 return ret;
746 }
747
748 int mtty_create(struct kobject *kobj, struct mdev_device *mdev)
749 {
750 struct mdev_state *mdev_state;
751 char name[MTTY_STRING_LEN];
752 int nr_ports = 0, i;
753
754 if (!mdev)
755 return -EINVAL;
756
757 for (i = 0; i < 2; i++) {
758 snprintf(name, MTTY_STRING_LEN, "%s-%d",
759 dev_driver_string(mdev->parent->dev), i + 1);
760 if (!strcmp(kobj->name, name)) {
761 nr_ports = i + 1;
762 break;
763 }
764 }
765
766 if (!nr_ports)
767 return -EINVAL;
768
769 mdev_state = kzalloc(sizeof(struct mdev_state), GFP_KERNEL);
770 if (mdev_state == NULL)
771 return -ENOMEM;
772
773 mdev_state->nr_ports = nr_ports;
774 mdev_state->irq_index = -1;
775 mdev_state->s[0].max_fifo_size = MAX_FIFO_SIZE;
776 mdev_state->s[1].max_fifo_size = MAX_FIFO_SIZE;
777 mutex_init(&mdev_state->rxtx_lock);
778 mdev_state->vconfig = kzalloc(MTTY_CONFIG_SPACE_SIZE, GFP_KERNEL);
779
780 if (mdev_state->vconfig == NULL) {
781 kfree(mdev_state);
782 return -ENOMEM;
783 }
784
785 mutex_init(&mdev_state->ops_lock);
786 mdev_state->mdev = mdev;
787 mdev_set_drvdata(mdev, mdev_state);
788
789 mtty_create_config_space(mdev_state);
790
791 mutex_lock(&mdev_list_lock);
792 list_add(&mdev_state->next, &mdev_devices_list);
793 mutex_unlock(&mdev_list_lock);
794
795 return 0;
796 }
797
798 int mtty_remove(struct mdev_device *mdev)
799 {
800 struct mdev_state *mds, *tmp_mds;
801 struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
802 int ret = -EINVAL;
803
804 mutex_lock(&mdev_list_lock);
805 list_for_each_entry_safe(mds, tmp_mds, &mdev_devices_list, next) {
806 if (mdev_state == mds) {
807 list_del(&mdev_state->next);
808 mdev_set_drvdata(mdev, NULL);
809 kfree(mdev_state->vconfig);
810 kfree(mdev_state);
811 ret = 0;
812 break;
813 }
814 }
815 mutex_unlock(&mdev_list_lock);
816
817 return ret;
818 }
819
820 int mtty_reset(struct mdev_device *mdev)
821 {
822 struct mdev_state *mdev_state;
823
824 if (!mdev)
825 return -EINVAL;
826
827 mdev_state = mdev_get_drvdata(mdev);
828 if (!mdev_state)
829 return -EINVAL;
830
831 pr_info("%s: called\n", __func__);
832
833 return 0;
834 }
835
836 ssize_t mtty_read(struct mdev_device *mdev, char __user *buf, size_t count,
837 loff_t *ppos)
838 {
839 unsigned int done = 0;
840 int ret;
841
842 while (count) {
843 size_t filled;
844
845 if (count >= 4 && !(*ppos % 4)) {
846 u32 val;
847
848 ret = mdev_access(mdev, (char *)&val, sizeof(val),
849 *ppos, false);
850 if (ret <= 0)
851 goto read_err;
852
853 if (copy_to_user(buf, &val, sizeof(val)))
854 goto read_err;
855
856 filled = 4;
857 } else if (count >= 2 && !(*ppos % 2)) {
858 u16 val;
859
860 ret = mdev_access(mdev, (char *)&val, sizeof(val),
861 *ppos, false);
862 if (ret <= 0)
863 goto read_err;
864
865 if (copy_to_user(buf, &val, sizeof(val)))
866 goto read_err;
867
868 filled = 2;
869 } else {
870 u8 val;
871
872 ret = mdev_access(mdev, (char *)&val, sizeof(val),
873 *ppos, false);
874 if (ret <= 0)
875 goto read_err;
876
877 if (copy_to_user(buf, &val, sizeof(val)))
878 goto read_err;
879
880 filled = 1;
881 }
882
883 count -= filled;
884 done += filled;
885 *ppos += filled;
886 buf += filled;
887 }
888
889 return done;
890
891 read_err:
892 return -EFAULT;
893 }
894
895 ssize_t mtty_write(struct mdev_device *mdev, const char __user *buf,
896 size_t count, loff_t *ppos)
897 {
898 unsigned int done = 0;
899 int ret;
900
901 while (count) {
902 size_t filled;
903
904 if (count >= 4 && !(*ppos % 4)) {
905 u32 val;
906
907 if (copy_from_user(&val, buf, sizeof(val)))
908 goto write_err;
909
910 ret = mdev_access(mdev, (char *)&val, sizeof(val),
911 *ppos, true);
912 if (ret <= 0)
913 goto write_err;
914
915 filled = 4;
916 } else if (count >= 2 && !(*ppos % 2)) {
917 u16 val;
918
919 if (copy_from_user(&val, buf, sizeof(val)))
920 goto write_err;
921
922 ret = mdev_access(mdev, (char *)&val, sizeof(val),
923 *ppos, true);
924 if (ret <= 0)
925 goto write_err;
926
927 filled = 2;
928 } else {
929 u8 val;
930
931 if (copy_from_user(&val, buf, sizeof(val)))
932 goto write_err;
933
934 ret = mdev_access(mdev, (char *)&val, sizeof(val),
935 *ppos, true);
936 if (ret <= 0)
937 goto write_err;
938
939 filled = 1;
940 }
941 count -= filled;
942 done += filled;
943 *ppos += filled;
944 buf += filled;
945 }
946
947 return done;
948 write_err:
949 return -EFAULT;
950 }
951
952 static int mtty_set_irqs(struct mdev_device *mdev, uint32_t flags,
953 unsigned int index, unsigned int start,
954 unsigned int count, void *data)
955 {
956 int ret = 0;
957 struct mdev_state *mdev_state;
958
959 if (!mdev)
960 return -EINVAL;
961
962 mdev_state = mdev_get_drvdata(mdev);
963 if (!mdev_state)
964 return -EINVAL;
965
966 mutex_lock(&mdev_state->ops_lock);
967 switch (index) {
968 case VFIO_PCI_INTX_IRQ_INDEX:
969 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
970 case VFIO_IRQ_SET_ACTION_MASK:
971 case VFIO_IRQ_SET_ACTION_UNMASK:
972 break;
973 case VFIO_IRQ_SET_ACTION_TRIGGER:
974 {
975 if (flags & VFIO_IRQ_SET_DATA_NONE) {
976 pr_info("%s: disable INTx\n", __func__);
977 if (mdev_state->intx_evtfd)
978 eventfd_ctx_put(mdev_state->intx_evtfd);
979 break;
980 }
981
982 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
983 int fd = *(int *)data;
984
985 if (fd > 0) {
986 struct eventfd_ctx *evt;
987
988 evt = eventfd_ctx_fdget(fd);
989 if (IS_ERR(evt)) {
990 ret = PTR_ERR(evt);
991 break;
992 }
993 mdev_state->intx_evtfd = evt;
994 mdev_state->irq_fd = fd;
995 mdev_state->irq_index = index;
996 break;
997 }
998 }
999 break;
1000 }
1001 }
1002 break;
1003 case VFIO_PCI_MSI_IRQ_INDEX:
1004 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
1005 case VFIO_IRQ_SET_ACTION_MASK:
1006 case VFIO_IRQ_SET_ACTION_UNMASK:
1007 break;
1008 case VFIO_IRQ_SET_ACTION_TRIGGER:
1009 if (flags & VFIO_IRQ_SET_DATA_NONE) {
1010 if (mdev_state->msi_evtfd)
1011 eventfd_ctx_put(mdev_state->msi_evtfd);
1012 pr_info("%s: disable MSI\n", __func__);
1013 mdev_state->irq_index = VFIO_PCI_INTX_IRQ_INDEX;
1014 break;
1015 }
1016 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
1017 int fd = *(int *)data;
1018 struct eventfd_ctx *evt;
1019
1020 if (fd <= 0)
1021 break;
1022
1023 if (mdev_state->msi_evtfd)
1024 break;
1025
1026 evt = eventfd_ctx_fdget(fd);
1027 if (IS_ERR(evt)) {
1028 ret = PTR_ERR(evt);
1029 break;
1030 }
1031 mdev_state->msi_evtfd = evt;
1032 mdev_state->irq_fd = fd;
1033 mdev_state->irq_index = index;
1034 }
1035 break;
1036 }
1037 break;
1038 case VFIO_PCI_MSIX_IRQ_INDEX:
1039 pr_info("%s: MSIX_IRQ\n", __func__);
1040 break;
1041 case VFIO_PCI_ERR_IRQ_INDEX:
1042 pr_info("%s: ERR_IRQ\n", __func__);
1043 break;
1044 case VFIO_PCI_REQ_IRQ_INDEX:
1045 pr_info("%s: REQ_IRQ\n", __func__);
1046 break;
1047 }
1048
1049 mutex_unlock(&mdev_state->ops_lock);
1050 return ret;
1051 }
1052
1053 static int mtty_trigger_interrupt(uuid_le uuid)
1054 {
1055 int ret = -1;
1056 struct mdev_state *mdev_state;
1057
1058 mdev_state = find_mdev_state_by_uuid(uuid);
1059
1060 if (!mdev_state) {
1061 pr_info("%s: mdev not found\n", __func__);
1062 return -EINVAL;
1063 }
1064
1065 if ((mdev_state->irq_index == VFIO_PCI_MSI_IRQ_INDEX) &&
1066 (!mdev_state->msi_evtfd))
1067 return -EINVAL;
1068 else if ((mdev_state->irq_index == VFIO_PCI_INTX_IRQ_INDEX) &&
1069 (!mdev_state->intx_evtfd)) {
1070 pr_info("%s: Intr eventfd not found\n", __func__);
1071 return -EINVAL;
1072 }
1073
1074 if (mdev_state->irq_index == VFIO_PCI_MSI_IRQ_INDEX)
1075 ret = eventfd_signal(mdev_state->msi_evtfd, 1);
1076 else
1077 ret = eventfd_signal(mdev_state->intx_evtfd, 1);
1078
1079 #if defined(DEBUG_INTR)
1080 pr_info("Intx triggered\n");
1081 #endif
1082 if (ret != 1)
1083 pr_err("%s: eventfd signal failed (%d)\n", __func__, ret);
1084
1085 return ret;
1086 }
1087
1088 int mtty_get_region_info(struct mdev_device *mdev,
1089 struct vfio_region_info *region_info,
1090 u16 *cap_type_id, void **cap_type)
1091 {
1092 unsigned int size = 0;
1093 struct mdev_state *mdev_state;
1094 int bar_index;
1095
1096 if (!mdev)
1097 return -EINVAL;
1098
1099 mdev_state = mdev_get_drvdata(mdev);
1100 if (!mdev_state)
1101 return -EINVAL;
1102
1103 mutex_lock(&mdev_state->ops_lock);
1104 bar_index = region_info->index;
1105
1106 switch (bar_index) {
1107 case VFIO_PCI_CONFIG_REGION_INDEX:
1108 size = MTTY_CONFIG_SPACE_SIZE;
1109 break;
1110 case VFIO_PCI_BAR0_REGION_INDEX:
1111 size = MTTY_IO_BAR_SIZE;
1112 break;
1113 case VFIO_PCI_BAR1_REGION_INDEX:
1114 if (mdev_state->nr_ports == 2)
1115 size = MTTY_IO_BAR_SIZE;
1116 break;
1117 default:
1118 size = 0;
1119 break;
1120 }
1121
1122 mdev_state->region_info[bar_index].size = size;
1123 mdev_state->region_info[bar_index].vfio_offset =
1124 MTTY_VFIO_PCI_INDEX_TO_OFFSET(bar_index);
1125
1126 region_info->size = size;
1127 region_info->offset = MTTY_VFIO_PCI_INDEX_TO_OFFSET(bar_index);
1128 region_info->flags = VFIO_REGION_INFO_FLAG_READ |
1129 VFIO_REGION_INFO_FLAG_WRITE;
1130 mutex_unlock(&mdev_state->ops_lock);
1131 return 0;
1132 }
1133
1134 int mtty_get_irq_info(struct mdev_device *mdev, struct vfio_irq_info *irq_info)
1135 {
1136 switch (irq_info->index) {
1137 case VFIO_PCI_INTX_IRQ_INDEX:
1138 case VFIO_PCI_MSI_IRQ_INDEX:
1139 case VFIO_PCI_REQ_IRQ_INDEX:
1140 break;
1141
1142 default:
1143 return -EINVAL;
1144 }
1145
1146 irq_info->flags = VFIO_IRQ_INFO_EVENTFD;
1147 irq_info->count = 1;
1148
1149 if (irq_info->index == VFIO_PCI_INTX_IRQ_INDEX)
1150 irq_info->flags |= (VFIO_IRQ_INFO_MASKABLE |
1151 VFIO_IRQ_INFO_AUTOMASKED);
1152 else
1153 irq_info->flags |= VFIO_IRQ_INFO_NORESIZE;
1154
1155 return 0;
1156 }
1157
1158 int mtty_get_device_info(struct mdev_device *mdev,
1159 struct vfio_device_info *dev_info)
1160 {
1161 dev_info->flags = VFIO_DEVICE_FLAGS_PCI;
1162 dev_info->num_regions = VFIO_PCI_NUM_REGIONS;
1163 dev_info->num_irqs = VFIO_PCI_NUM_IRQS;
1164
1165 return 0;
1166 }
1167
1168 static long mtty_ioctl(struct mdev_device *mdev, unsigned int cmd,
1169 unsigned long arg)
1170 {
1171 int ret = 0;
1172 unsigned long minsz;
1173 struct mdev_state *mdev_state;
1174
1175 if (!mdev)
1176 return -EINVAL;
1177
1178 mdev_state = mdev_get_drvdata(mdev);
1179 if (!mdev_state)
1180 return -ENODEV;
1181
1182 switch (cmd) {
1183 case VFIO_DEVICE_GET_INFO:
1184 {
1185 struct vfio_device_info info;
1186
1187 minsz = offsetofend(struct vfio_device_info, num_irqs);
1188
1189 if (copy_from_user(&info, (void __user *)arg, minsz))
1190 return -EFAULT;
1191
1192 if (info.argsz < minsz)
1193 return -EINVAL;
1194
1195 ret = mtty_get_device_info(mdev, &info);
1196 if (ret)
1197 return ret;
1198
1199 memcpy(&mdev_state->dev_info, &info, sizeof(info));
1200
1201 return copy_to_user((void __user *)arg, &info, minsz);
1202 }
1203 case VFIO_DEVICE_GET_REGION_INFO:
1204 {
1205 struct vfio_region_info info;
1206 u16 cap_type_id = 0;
1207 void *cap_type = NULL;
1208
1209 minsz = offsetofend(struct vfio_region_info, offset);
1210
1211 if (copy_from_user(&info, (void __user *)arg, minsz))
1212 return -EFAULT;
1213
1214 if (info.argsz < minsz)
1215 return -EINVAL;
1216
1217 ret = mtty_get_region_info(mdev, &info, &cap_type_id,
1218 &cap_type);
1219 if (ret)
1220 return ret;
1221
1222 return copy_to_user((void __user *)arg, &info, minsz);
1223 }
1224
1225 case VFIO_DEVICE_GET_IRQ_INFO:
1226 {
1227 struct vfio_irq_info info;
1228
1229 minsz = offsetofend(struct vfio_irq_info, count);
1230
1231 if (copy_from_user(&info, (void __user *)arg, minsz))
1232 return -EFAULT;
1233
1234 if ((info.argsz < minsz) ||
1235 (info.index >= mdev_state->dev_info.num_irqs))
1236 return -EINVAL;
1237
1238 ret = mtty_get_irq_info(mdev, &info);
1239 if (ret)
1240 return ret;
1241
1242 if (info.count == -1)
1243 return -EINVAL;
1244
1245 return copy_to_user((void __user *)arg, &info, minsz);
1246 }
1247 case VFIO_DEVICE_SET_IRQS:
1248 {
1249 struct vfio_irq_set hdr;
1250 u8 *data = NULL, *ptr = NULL;
1251 size_t data_size = 0;
1252
1253 minsz = offsetofend(struct vfio_irq_set, count);
1254
1255 if (copy_from_user(&hdr, (void __user *)arg, minsz))
1256 return -EFAULT;
1257
1258 ret = vfio_set_irqs_validate_and_prepare(&hdr,
1259 mdev_state->dev_info.num_irqs,
1260 VFIO_PCI_NUM_IRQS,
1261 &data_size);
1262 if (ret)
1263 return ret;
1264
1265 if (data_size) {
1266 ptr = data = memdup_user((void __user *)(arg + minsz),
1267 data_size);
1268 if (IS_ERR(data))
1269 return PTR_ERR(data);
1270 }
1271
1272 ret = mtty_set_irqs(mdev, hdr.flags, hdr.index, hdr.start,
1273 hdr.count, data);
1274
1275 kfree(ptr);
1276 return ret;
1277 }
1278 case VFIO_DEVICE_RESET:
1279 return mtty_reset(mdev);
1280 }
1281 return -ENOTTY;
1282 }
1283
1284 int mtty_open(struct mdev_device *mdev)
1285 {
1286 pr_info("%s\n", __func__);
1287 return 0;
1288 }
1289
1290 void mtty_close(struct mdev_device *mdev)
1291 {
1292 pr_info("%s\n", __func__);
1293 }
1294
1295 static ssize_t
1296 sample_mtty_dev_show(struct device *dev, struct device_attribute *attr,
1297 char *buf)
1298 {
1299 return sprintf(buf, "This is phy device\n");
1300 }
1301
1302 static DEVICE_ATTR_RO(sample_mtty_dev);
1303
1304 static struct attribute *mtty_dev_attrs[] = {
1305 &dev_attr_sample_mtty_dev.attr,
1306 NULL,
1307 };
1308
1309 static const struct attribute_group mtty_dev_group = {
1310 .name = "mtty_dev",
1311 .attrs = mtty_dev_attrs,
1312 };
1313
1314 const struct attribute_group *mtty_dev_groups[] = {
1315 &mtty_dev_group,
1316 NULL,
1317 };
1318
1319 static ssize_t
1320 sample_mdev_dev_show(struct device *dev, struct device_attribute *attr,
1321 char *buf)
1322 {
1323 struct mdev_device *mdev = to_mdev_device(dev);
1324
1325 if (mdev)
1326 return sprintf(buf, "This is MDEV %s\n", dev_name(&mdev->dev));
1327
1328 return sprintf(buf, "\n");
1329 }
1330
1331 static DEVICE_ATTR_RO(sample_mdev_dev);
1332
1333 static struct attribute *mdev_dev_attrs[] = {
1334 &dev_attr_sample_mdev_dev.attr,
1335 NULL,
1336 };
1337
1338 static const struct attribute_group mdev_dev_group = {
1339 .name = "vendor",
1340 .attrs = mdev_dev_attrs,
1341 };
1342
1343 const struct attribute_group *mdev_dev_groups[] = {
1344 &mdev_dev_group,
1345 NULL,
1346 };
1347
1348 static ssize_t
1349 name_show(struct kobject *kobj, struct device *dev, char *buf)
1350 {
1351 char name[MTTY_STRING_LEN];
1352 int i;
1353 const char *name_str[2] = {"Single port serial", "Dual port serial"};
1354
1355 for (i = 0; i < 2; i++) {
1356 snprintf(name, MTTY_STRING_LEN, "%s-%d",
1357 dev_driver_string(dev), i + 1);
1358 if (!strcmp(kobj->name, name))
1359 return sprintf(buf, "%s\n", name_str[i]);
1360 }
1361
1362 return -EINVAL;
1363 }
1364
1365 MDEV_TYPE_ATTR_RO(name);
1366
1367 static ssize_t
1368 available_instances_show(struct kobject *kobj, struct device *dev, char *buf)
1369 {
1370 char name[MTTY_STRING_LEN];
1371 int i;
1372 struct mdev_state *mds;
1373 int ports = 0, used = 0;
1374
1375 for (i = 0; i < 2; i++) {
1376 snprintf(name, MTTY_STRING_LEN, "%s-%d",
1377 dev_driver_string(dev), i + 1);
1378 if (!strcmp(kobj->name, name)) {
1379 ports = i + 1;
1380 break;
1381 }
1382 }
1383
1384 if (!ports)
1385 return -EINVAL;
1386
1387 list_for_each_entry(mds, &mdev_devices_list, next)
1388 used += mds->nr_ports;
1389
1390 return sprintf(buf, "%d\n", (MAX_MTTYS - used)/ports);
1391 }
1392
1393 MDEV_TYPE_ATTR_RO(available_instances);
1394
1395
1396 static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
1397 char *buf)
1398 {
1399 return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
1400 }
1401
1402 MDEV_TYPE_ATTR_RO(device_api);
1403
1404 static struct attribute *mdev_types_attrs[] = {
1405 &mdev_type_attr_name.attr,
1406 &mdev_type_attr_device_api.attr,
1407 &mdev_type_attr_available_instances.attr,
1408 NULL,
1409 };
1410
1411 static struct attribute_group mdev_type_group1 = {
1412 .name = "1",
1413 .attrs = mdev_types_attrs,
1414 };
1415
1416 static struct attribute_group mdev_type_group2 = {
1417 .name = "2",
1418 .attrs = mdev_types_attrs,
1419 };
1420
1421 struct attribute_group *mdev_type_groups[] = {
1422 &mdev_type_group1,
1423 &mdev_type_group2,
1424 NULL,
1425 };
1426
1427 struct parent_ops mdev_fops = {
1428 .owner = THIS_MODULE,
1429 .dev_attr_groups = mtty_dev_groups,
1430 .mdev_attr_groups = mdev_dev_groups,
1431 .supported_type_groups = mdev_type_groups,
1432 .create = mtty_create,
1433 .remove = mtty_remove,
1434 .open = mtty_open,
1435 .release = mtty_close,
1436 .read = mtty_read,
1437 .write = mtty_write,
1438 .ioctl = mtty_ioctl,
1439 };
1440
1441 static void mtty_device_release(struct device *dev)
1442 {
1443 dev_dbg(dev, "mtty: released\n");
1444 }
1445
1446 static int __init mtty_dev_init(void)
1447 {
1448 int ret = 0;
1449
1450 pr_info("mtty_dev: %s\n", __func__);
1451
1452 memset(&mtty_dev, 0, sizeof(mtty_dev));
1453
1454 idr_init(&mtty_dev.vd_idr);
1455
1456 ret = alloc_chrdev_region(&mtty_dev.vd_devt, 0, MINORMASK, MTTY_NAME);
1457
1458 if (ret < 0) {
1459 pr_err("Error: failed to register mtty_dev, err:%d\n", ret);
1460 return ret;
1461 }
1462
1463 cdev_init(&mtty_dev.vd_cdev, &vd_fops);
1464 cdev_add(&mtty_dev.vd_cdev, mtty_dev.vd_devt, MINORMASK);
1465
1466 pr_info("major_number:%d\n", MAJOR(mtty_dev.vd_devt));
1467
1468 mtty_dev.vd_class = class_create(THIS_MODULE, MTTY_CLASS_NAME);
1469
1470 if (IS_ERR(mtty_dev.vd_class)) {
1471 pr_err("Error: failed to register mtty_dev class\n");
1472 goto failed1;
1473 }
1474
1475 mtty_dev.dev.class = mtty_dev.vd_class;
1476 mtty_dev.dev.release = mtty_device_release;
1477 dev_set_name(&mtty_dev.dev, "%s", MTTY_NAME);
1478
1479 ret = device_register(&mtty_dev.dev);
1480 if (ret)
1481 goto failed2;
1482
1483 if (mdev_register_device(&mtty_dev.dev, &mdev_fops) != 0)
1484 goto failed3;
1485
1486 mutex_init(&mdev_list_lock);
1487 INIT_LIST_HEAD(&mdev_devices_list);
1488
1489 goto all_done;
1490
1491 failed3:
1492
1493 device_unregister(&mtty_dev.dev);
1494 failed2:
1495 class_destroy(mtty_dev.vd_class);
1496
1497 failed1:
1498 cdev_del(&mtty_dev.vd_cdev);
1499 unregister_chrdev_region(mtty_dev.vd_devt, MINORMASK);
1500
1501 all_done:
1502 return ret;
1503 }
1504
1505 static void __exit mtty_dev_exit(void)
1506 {
1507 mtty_dev.dev.bus = NULL;
1508 mdev_unregister_device(&mtty_dev.dev);
1509
1510 device_unregister(&mtty_dev.dev);
1511 idr_destroy(&mtty_dev.vd_idr);
1512 cdev_del(&mtty_dev.vd_cdev);
1513 unregister_chrdev_region(mtty_dev.vd_devt, MINORMASK);
1514 class_destroy(mtty_dev.vd_class);
1515 mtty_dev.vd_class = NULL;
1516 pr_info("mtty_dev: Unloaded!\n");
1517 }
1518
1519 module_init(mtty_dev_init)
1520 module_exit(mtty_dev_exit)
1521
1522 MODULE_LICENSE("GPL v2");
1523 MODULE_INFO(supported, "Test driver that simulate serial port over PCI");
1524 MODULE_VERSION(VERSION_STRING);
1525 MODULE_AUTHOR(DRIVER_AUTHOR);
1526
1527
1528
1529
1530
1531 /* LDV_COMMENT_BEGIN_MAIN */
1532 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful
1533
1534 /*###########################################################################*/
1535
1536 /*############## Driver Environment Generator 0.2 output ####################*/
1537
1538 /*###########################################################################*/
1539
1540
1541
1542 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
1543 void ldv_check_final_state(void);
1544
1545 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
1546 void ldv_check_return_value(int res);
1547
1548 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
1549 void ldv_check_return_value_probe(int res);
1550
1551 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
1552 void ldv_initialize(void);
1553
1554 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
1555 void ldv_handler_precall(void);
1556
1557 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
1558 int nondet_int(void);
1559
1560 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
1561 int LDV_IN_INTERRUPT;
1562
1563 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
1564 void ldv_main0_sequence_infinite_withcheck_stateful(void) {
1565
1566
1567
1568 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
1569 /*============================= VARIABLE DECLARATION PART =============================*/
1570 /** STRUCT: struct type: parent_ops, struct name: mdev_fops **/
1571 /* content: int mtty_create(struct kobject *kobj, struct mdev_device *mdev)*/
1572 /* LDV_COMMENT_BEGIN_PREP */
1573 #define VERSION_STRING "0.1"
1574 #define DRIVER_AUTHOR "NVIDIA Corporation"
1575 #define MTTY_CLASS_NAME "mtty"
1576 #define MTTY_NAME "mtty"
1577 #define MTTY_STRING_LEN 16
1578 #define MTTY_CONFIG_SPACE_SIZE 0xff
1579 #define MTTY_IO_BAR_SIZE 0x8
1580 #define MTTY_MMIO_BAR_SIZE 0x100000
1581 #define STORE_LE16(addr, val) (*(u16 *)addr = val)
1582 #define STORE_LE32(addr, val) (*(u32 *)addr = val)
1583 #define MAX_FIFO_SIZE 16
1584 #define CIRCULAR_BUF_INC_IDX(idx) (idx = (idx + 1) & (MAX_FIFO_SIZE - 1))
1585 #define MTTY_VFIO_PCI_OFFSET_SHIFT 40
1586 #define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off) (off >> MTTY_VFIO_PCI_OFFSET_SHIFT)
1587 #define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \
1588 ((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT)
1589 #define MTTY_VFIO_PCI_OFFSET_MASK \
1590 (((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1)
1591 #define MAX_MTTYS 24
1592 #if defined(DEBUG_REGS)
1593 #endif
1594 #if defined(DEBUG)
1595 #endif
1596 #if defined(DEBUG_INTR)
1597 #endif
1598 #if defined(DEBUG_INTR)
1599 #endif
1600 #if defined(DEBUG_INTR)
1601 #endif
1602 #if defined(DEBUG_INTR)
1603 #endif
1604 #if defined(DEBUG_INTR)
1605 #endif
1606 #if defined(DEBUG_INTR)
1607 #endif
1608 #if defined(DEBUG)
1609 #endif
1610 #if defined(DEBUG_REGS)
1611 #endif
1612 #if defined(DEBUG_REGS)
1613 #endif
1614 /* LDV_COMMENT_END_PREP */
1615 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtty_create" */
1616 struct kobject * var_group1;
1617 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtty_create" */
1618 struct mdev_device * var_group2;
1619 /* LDV_COMMENT_BEGIN_PREP */
1620 #if defined(DEBUG_INTR)
1621 #endif
1622 /* LDV_COMMENT_END_PREP */
1623 /* content: int mtty_remove(struct mdev_device *mdev)*/
1624 /* LDV_COMMENT_BEGIN_PREP */
1625 #define VERSION_STRING "0.1"
1626 #define DRIVER_AUTHOR "NVIDIA Corporation"
1627 #define MTTY_CLASS_NAME "mtty"
1628 #define MTTY_NAME "mtty"
1629 #define MTTY_STRING_LEN 16
1630 #define MTTY_CONFIG_SPACE_SIZE 0xff
1631 #define MTTY_IO_BAR_SIZE 0x8
1632 #define MTTY_MMIO_BAR_SIZE 0x100000
1633 #define STORE_LE16(addr, val) (*(u16 *)addr = val)
1634 #define STORE_LE32(addr, val) (*(u32 *)addr = val)
1635 #define MAX_FIFO_SIZE 16
1636 #define CIRCULAR_BUF_INC_IDX(idx) (idx = (idx + 1) & (MAX_FIFO_SIZE - 1))
1637 #define MTTY_VFIO_PCI_OFFSET_SHIFT 40
1638 #define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off) (off >> MTTY_VFIO_PCI_OFFSET_SHIFT)
1639 #define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \
1640 ((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT)
1641 #define MTTY_VFIO_PCI_OFFSET_MASK \
1642 (((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1)
1643 #define MAX_MTTYS 24
1644 #if defined(DEBUG_REGS)
1645 #endif
1646 #if defined(DEBUG)
1647 #endif
1648 #if defined(DEBUG_INTR)
1649 #endif
1650 #if defined(DEBUG_INTR)
1651 #endif
1652 #if defined(DEBUG_INTR)
1653 #endif
1654 #if defined(DEBUG_INTR)
1655 #endif
1656 #if defined(DEBUG_INTR)
1657 #endif
1658 #if defined(DEBUG_INTR)
1659 #endif
1660 #if defined(DEBUG)
1661 #endif
1662 #if defined(DEBUG_REGS)
1663 #endif
1664 #if defined(DEBUG_REGS)
1665 #endif
1666 /* LDV_COMMENT_END_PREP */
1667 /* LDV_COMMENT_BEGIN_PREP */
1668 #if defined(DEBUG_INTR)
1669 #endif
1670 /* LDV_COMMENT_END_PREP */
1671 /* content: int mtty_open(struct mdev_device *mdev)*/
1672 /* LDV_COMMENT_BEGIN_PREP */
1673 #define VERSION_STRING "0.1"
1674 #define DRIVER_AUTHOR "NVIDIA Corporation"
1675 #define MTTY_CLASS_NAME "mtty"
1676 #define MTTY_NAME "mtty"
1677 #define MTTY_STRING_LEN 16
1678 #define MTTY_CONFIG_SPACE_SIZE 0xff
1679 #define MTTY_IO_BAR_SIZE 0x8
1680 #define MTTY_MMIO_BAR_SIZE 0x100000
1681 #define STORE_LE16(addr, val) (*(u16 *)addr = val)
1682 #define STORE_LE32(addr, val) (*(u32 *)addr = val)
1683 #define MAX_FIFO_SIZE 16
1684 #define CIRCULAR_BUF_INC_IDX(idx) (idx = (idx + 1) & (MAX_FIFO_SIZE - 1))
1685 #define MTTY_VFIO_PCI_OFFSET_SHIFT 40
1686 #define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off) (off >> MTTY_VFIO_PCI_OFFSET_SHIFT)
1687 #define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \
1688 ((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT)
1689 #define MTTY_VFIO_PCI_OFFSET_MASK \
1690 (((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1)
1691 #define MAX_MTTYS 24
1692 #if defined(DEBUG_REGS)
1693 #endif
1694 #if defined(DEBUG)
1695 #endif
1696 #if defined(DEBUG_INTR)
1697 #endif
1698 #if defined(DEBUG_INTR)
1699 #endif
1700 #if defined(DEBUG_INTR)
1701 #endif
1702 #if defined(DEBUG_INTR)
1703 #endif
1704 #if defined(DEBUG_INTR)
1705 #endif
1706 #if defined(DEBUG_INTR)
1707 #endif
1708 #if defined(DEBUG)
1709 #endif
1710 #if defined(DEBUG_REGS)
1711 #endif
1712 #if defined(DEBUG_REGS)
1713 #endif
1714 #if defined(DEBUG_INTR)
1715 #endif
1716 /* LDV_COMMENT_END_PREP */
1717 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "mtty_open" */
1718 int res_mtty_open_19;
1719 /* content: void mtty_close(struct mdev_device *mdev)*/
1720 /* LDV_COMMENT_BEGIN_PREP */
1721 #define VERSION_STRING "0.1"
1722 #define DRIVER_AUTHOR "NVIDIA Corporation"
1723 #define MTTY_CLASS_NAME "mtty"
1724 #define MTTY_NAME "mtty"
1725 #define MTTY_STRING_LEN 16
1726 #define MTTY_CONFIG_SPACE_SIZE 0xff
1727 #define MTTY_IO_BAR_SIZE 0x8
1728 #define MTTY_MMIO_BAR_SIZE 0x100000
1729 #define STORE_LE16(addr, val) (*(u16 *)addr = val)
1730 #define STORE_LE32(addr, val) (*(u32 *)addr = val)
1731 #define MAX_FIFO_SIZE 16
1732 #define CIRCULAR_BUF_INC_IDX(idx) (idx = (idx + 1) & (MAX_FIFO_SIZE - 1))
1733 #define MTTY_VFIO_PCI_OFFSET_SHIFT 40
1734 #define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off) (off >> MTTY_VFIO_PCI_OFFSET_SHIFT)
1735 #define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \
1736 ((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT)
1737 #define MTTY_VFIO_PCI_OFFSET_MASK \
1738 (((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1)
1739 #define MAX_MTTYS 24
1740 #if defined(DEBUG_REGS)
1741 #endif
1742 #if defined(DEBUG)
1743 #endif
1744 #if defined(DEBUG_INTR)
1745 #endif
1746 #if defined(DEBUG_INTR)
1747 #endif
1748 #if defined(DEBUG_INTR)
1749 #endif
1750 #if defined(DEBUG_INTR)
1751 #endif
1752 #if defined(DEBUG_INTR)
1753 #endif
1754 #if defined(DEBUG_INTR)
1755 #endif
1756 #if defined(DEBUG)
1757 #endif
1758 #if defined(DEBUG_REGS)
1759 #endif
1760 #if defined(DEBUG_REGS)
1761 #endif
1762 #if defined(DEBUG_INTR)
1763 #endif
1764 /* LDV_COMMENT_END_PREP */
1765 /* content: ssize_t mtty_read(struct mdev_device *mdev, char __user *buf, size_t count, loff_t *ppos)*/
1766 /* LDV_COMMENT_BEGIN_PREP */
1767 #define VERSION_STRING "0.1"
1768 #define DRIVER_AUTHOR "NVIDIA Corporation"
1769 #define MTTY_CLASS_NAME "mtty"
1770 #define MTTY_NAME "mtty"
1771 #define MTTY_STRING_LEN 16
1772 #define MTTY_CONFIG_SPACE_SIZE 0xff
1773 #define MTTY_IO_BAR_SIZE 0x8
1774 #define MTTY_MMIO_BAR_SIZE 0x100000
1775 #define STORE_LE16(addr, val) (*(u16 *)addr = val)
1776 #define STORE_LE32(addr, val) (*(u32 *)addr = val)
1777 #define MAX_FIFO_SIZE 16
1778 #define CIRCULAR_BUF_INC_IDX(idx) (idx = (idx + 1) & (MAX_FIFO_SIZE - 1))
1779 #define MTTY_VFIO_PCI_OFFSET_SHIFT 40
1780 #define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off) (off >> MTTY_VFIO_PCI_OFFSET_SHIFT)
1781 #define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \
1782 ((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT)
1783 #define MTTY_VFIO_PCI_OFFSET_MASK \
1784 (((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1)
1785 #define MAX_MTTYS 24
1786 #if defined(DEBUG_REGS)
1787 #endif
1788 #if defined(DEBUG)
1789 #endif
1790 #if defined(DEBUG_INTR)
1791 #endif
1792 #if defined(DEBUG_INTR)
1793 #endif
1794 #if defined(DEBUG_INTR)
1795 #endif
1796 #if defined(DEBUG_INTR)
1797 #endif
1798 #if defined(DEBUG_INTR)
1799 #endif
1800 #if defined(DEBUG_INTR)
1801 #endif
1802 #if defined(DEBUG)
1803 #endif
1804 #if defined(DEBUG_REGS)
1805 #endif
1806 #if defined(DEBUG_REGS)
1807 #endif
1808 /* LDV_COMMENT_END_PREP */
1809 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtty_read" */
1810 char __user * var_mtty_read_11_p1;
1811 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtty_read" */
1812 size_t var_mtty_read_11_p2;
1813 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtty_read" */
1814 loff_t * var_mtty_read_11_p3;
1815 /* LDV_COMMENT_BEGIN_PREP */
1816 #if defined(DEBUG_INTR)
1817 #endif
1818 /* LDV_COMMENT_END_PREP */
1819 /* content: ssize_t mtty_write(struct mdev_device *mdev, const char __user *buf, size_t count, loff_t *ppos)*/
1820 /* LDV_COMMENT_BEGIN_PREP */
1821 #define VERSION_STRING "0.1"
1822 #define DRIVER_AUTHOR "NVIDIA Corporation"
1823 #define MTTY_CLASS_NAME "mtty"
1824 #define MTTY_NAME "mtty"
1825 #define MTTY_STRING_LEN 16
1826 #define MTTY_CONFIG_SPACE_SIZE 0xff
1827 #define MTTY_IO_BAR_SIZE 0x8
1828 #define MTTY_MMIO_BAR_SIZE 0x100000
1829 #define STORE_LE16(addr, val) (*(u16 *)addr = val)
1830 #define STORE_LE32(addr, val) (*(u32 *)addr = val)
1831 #define MAX_FIFO_SIZE 16
1832 #define CIRCULAR_BUF_INC_IDX(idx) (idx = (idx + 1) & (MAX_FIFO_SIZE - 1))
1833 #define MTTY_VFIO_PCI_OFFSET_SHIFT 40
1834 #define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off) (off >> MTTY_VFIO_PCI_OFFSET_SHIFT)
1835 #define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \
1836 ((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT)
1837 #define MTTY_VFIO_PCI_OFFSET_MASK \
1838 (((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1)
1839 #define MAX_MTTYS 24
1840 #if defined(DEBUG_REGS)
1841 #endif
1842 #if defined(DEBUG)
1843 #endif
1844 #if defined(DEBUG_INTR)
1845 #endif
1846 #if defined(DEBUG_INTR)
1847 #endif
1848 #if defined(DEBUG_INTR)
1849 #endif
1850 #if defined(DEBUG_INTR)
1851 #endif
1852 #if defined(DEBUG_INTR)
1853 #endif
1854 #if defined(DEBUG_INTR)
1855 #endif
1856 #if defined(DEBUG)
1857 #endif
1858 #if defined(DEBUG_REGS)
1859 #endif
1860 #if defined(DEBUG_REGS)
1861 #endif
1862 /* LDV_COMMENT_END_PREP */
1863 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtty_write" */
1864 const char __user * var_mtty_write_12_p1;
1865 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtty_write" */
1866 size_t var_mtty_write_12_p2;
1867 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtty_write" */
1868 loff_t * var_mtty_write_12_p3;
1869 /* LDV_COMMENT_BEGIN_PREP */
1870 #if defined(DEBUG_INTR)
1871 #endif
1872 /* LDV_COMMENT_END_PREP */
1873 /* content: static long mtty_ioctl(struct mdev_device *mdev, unsigned int cmd, unsigned long arg)*/
1874 /* LDV_COMMENT_BEGIN_PREP */
1875 #define VERSION_STRING "0.1"
1876 #define DRIVER_AUTHOR "NVIDIA Corporation"
1877 #define MTTY_CLASS_NAME "mtty"
1878 #define MTTY_NAME "mtty"
1879 #define MTTY_STRING_LEN 16
1880 #define MTTY_CONFIG_SPACE_SIZE 0xff
1881 #define MTTY_IO_BAR_SIZE 0x8
1882 #define MTTY_MMIO_BAR_SIZE 0x100000
1883 #define STORE_LE16(addr, val) (*(u16 *)addr = val)
1884 #define STORE_LE32(addr, val) (*(u32 *)addr = val)
1885 #define MAX_FIFO_SIZE 16
1886 #define CIRCULAR_BUF_INC_IDX(idx) (idx = (idx + 1) & (MAX_FIFO_SIZE - 1))
1887 #define MTTY_VFIO_PCI_OFFSET_SHIFT 40
1888 #define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off) (off >> MTTY_VFIO_PCI_OFFSET_SHIFT)
1889 #define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \
1890 ((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT)
1891 #define MTTY_VFIO_PCI_OFFSET_MASK \
1892 (((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1)
1893 #define MAX_MTTYS 24
1894 #if defined(DEBUG_REGS)
1895 #endif
1896 #if defined(DEBUG)
1897 #endif
1898 #if defined(DEBUG_INTR)
1899 #endif
1900 #if defined(DEBUG_INTR)
1901 #endif
1902 #if defined(DEBUG_INTR)
1903 #endif
1904 #if defined(DEBUG_INTR)
1905 #endif
1906 #if defined(DEBUG_INTR)
1907 #endif
1908 #if defined(DEBUG_INTR)
1909 #endif
1910 #if defined(DEBUG)
1911 #endif
1912 #if defined(DEBUG_REGS)
1913 #endif
1914 #if defined(DEBUG_REGS)
1915 #endif
1916 #if defined(DEBUG_INTR)
1917 #endif
1918 /* LDV_COMMENT_END_PREP */
1919 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtty_ioctl" */
1920 unsigned int var_mtty_ioctl_18_p1;
1921 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "mtty_ioctl" */
1922 unsigned long var_mtty_ioctl_18_p2;
1923
1924
1925
1926
1927 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
1928 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
1929 /*============================= VARIABLE INITIALIZING PART =============================*/
1930 LDV_IN_INTERRUPT=1;
1931
1932
1933
1934
1935 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
1936 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
1937 /*============================= FUNCTION CALL SECTION =============================*/
1938 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
1939 ldv_initialize();
1940
1941 /** INIT: init_type: ST_MODULE_INIT **/
1942 /* content: static int __init mtty_dev_init(void)*/
1943 /* LDV_COMMENT_BEGIN_PREP */
1944 #define VERSION_STRING "0.1"
1945 #define DRIVER_AUTHOR "NVIDIA Corporation"
1946 #define MTTY_CLASS_NAME "mtty"
1947 #define MTTY_NAME "mtty"
1948 #define MTTY_STRING_LEN 16
1949 #define MTTY_CONFIG_SPACE_SIZE 0xff
1950 #define MTTY_IO_BAR_SIZE 0x8
1951 #define MTTY_MMIO_BAR_SIZE 0x100000
1952 #define STORE_LE16(addr, val) (*(u16 *)addr = val)
1953 #define STORE_LE32(addr, val) (*(u32 *)addr = val)
1954 #define MAX_FIFO_SIZE 16
1955 #define CIRCULAR_BUF_INC_IDX(idx) (idx = (idx + 1) & (MAX_FIFO_SIZE - 1))
1956 #define MTTY_VFIO_PCI_OFFSET_SHIFT 40
1957 #define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off) (off >> MTTY_VFIO_PCI_OFFSET_SHIFT)
1958 #define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \
1959 ((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT)
1960 #define MTTY_VFIO_PCI_OFFSET_MASK \
1961 (((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1)
1962 #define MAX_MTTYS 24
1963 #if defined(DEBUG_REGS)
1964 #endif
1965 #if defined(DEBUG)
1966 #endif
1967 #if defined(DEBUG_INTR)
1968 #endif
1969 #if defined(DEBUG_INTR)
1970 #endif
1971 #if defined(DEBUG_INTR)
1972 #endif
1973 #if defined(DEBUG_INTR)
1974 #endif
1975 #if defined(DEBUG_INTR)
1976 #endif
1977 #if defined(DEBUG_INTR)
1978 #endif
1979 #if defined(DEBUG)
1980 #endif
1981 #if defined(DEBUG_REGS)
1982 #endif
1983 #if defined(DEBUG_REGS)
1984 #endif
1985 #if defined(DEBUG_INTR)
1986 #endif
1987 /* LDV_COMMENT_END_PREP */
1988 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver init function after driver loading to kernel. This function declared as "MODULE_INIT(function name)". */
1989 ldv_handler_precall();
1990 if(mtty_dev_init())
1991 goto ldv_final;
1992 int ldv_s_mdev_fops_parent_ops = 0;
1993
1994
1995
1996 while( nondet_int()
1997 || !(ldv_s_mdev_fops_parent_ops == 0)
1998 ) {
1999
2000 switch(nondet_int()) {
2001
2002 case 0: {
2003
2004 /** STRUCT: struct type: parent_ops, struct name: mdev_fops **/
2005 if(ldv_s_mdev_fops_parent_ops==0) {
2006
2007 /* content: int mtty_open(struct mdev_device *mdev)*/
2008 /* LDV_COMMENT_BEGIN_PREP */
2009 #define VERSION_STRING "0.1"
2010 #define DRIVER_AUTHOR "NVIDIA Corporation"
2011 #define MTTY_CLASS_NAME "mtty"
2012 #define MTTY_NAME "mtty"
2013 #define MTTY_STRING_LEN 16
2014 #define MTTY_CONFIG_SPACE_SIZE 0xff
2015 #define MTTY_IO_BAR_SIZE 0x8
2016 #define MTTY_MMIO_BAR_SIZE 0x100000
2017 #define STORE_LE16(addr, val) (*(u16 *)addr = val)
2018 #define STORE_LE32(addr, val) (*(u32 *)addr = val)
2019 #define MAX_FIFO_SIZE 16
2020 #define CIRCULAR_BUF_INC_IDX(idx) (idx = (idx + 1) & (MAX_FIFO_SIZE - 1))
2021 #define MTTY_VFIO_PCI_OFFSET_SHIFT 40
2022 #define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off) (off >> MTTY_VFIO_PCI_OFFSET_SHIFT)
2023 #define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \
2024 ((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT)
2025 #define MTTY_VFIO_PCI_OFFSET_MASK \
2026 (((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1)
2027 #define MAX_MTTYS 24
2028 #if defined(DEBUG_REGS)
2029 #endif
2030 #if defined(DEBUG)
2031 #endif
2032 #if defined(DEBUG_INTR)
2033 #endif
2034 #if defined(DEBUG_INTR)
2035 #endif
2036 #if defined(DEBUG_INTR)
2037 #endif
2038 #if defined(DEBUG_INTR)
2039 #endif
2040 #if defined(DEBUG_INTR)
2041 #endif
2042 #if defined(DEBUG_INTR)
2043 #endif
2044 #if defined(DEBUG)
2045 #endif
2046 #if defined(DEBUG_REGS)
2047 #endif
2048 #if defined(DEBUG_REGS)
2049 #endif
2050 #if defined(DEBUG_INTR)
2051 #endif
2052 /* LDV_COMMENT_END_PREP */
2053 /* LDV_COMMENT_FUNCTION_CALL Function from field "open" from driver structure with callbacks "mdev_fops". Standart function test for correct return result. */
2054 ldv_handler_precall();
2055 res_mtty_open_19 = mtty_open( var_group2);
2056 ldv_check_return_value(res_mtty_open_19);
2057 if(res_mtty_open_19)
2058 goto ldv_module_exit;
2059 ldv_s_mdev_fops_parent_ops++;
2060
2061 }
2062
2063 }
2064
2065 break;
2066 case 1: {
2067
2068 /** STRUCT: struct type: parent_ops, struct name: mdev_fops **/
2069 if(ldv_s_mdev_fops_parent_ops==1) {
2070
2071 /* content: void mtty_close(struct mdev_device *mdev)*/
2072 /* LDV_COMMENT_BEGIN_PREP */
2073 #define VERSION_STRING "0.1"
2074 #define DRIVER_AUTHOR "NVIDIA Corporation"
2075 #define MTTY_CLASS_NAME "mtty"
2076 #define MTTY_NAME "mtty"
2077 #define MTTY_STRING_LEN 16
2078 #define MTTY_CONFIG_SPACE_SIZE 0xff
2079 #define MTTY_IO_BAR_SIZE 0x8
2080 #define MTTY_MMIO_BAR_SIZE 0x100000
2081 #define STORE_LE16(addr, val) (*(u16 *)addr = val)
2082 #define STORE_LE32(addr, val) (*(u32 *)addr = val)
2083 #define MAX_FIFO_SIZE 16
2084 #define CIRCULAR_BUF_INC_IDX(idx) (idx = (idx + 1) & (MAX_FIFO_SIZE - 1))
2085 #define MTTY_VFIO_PCI_OFFSET_SHIFT 40
2086 #define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off) (off >> MTTY_VFIO_PCI_OFFSET_SHIFT)
2087 #define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \
2088 ((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT)
2089 #define MTTY_VFIO_PCI_OFFSET_MASK \
2090 (((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1)
2091 #define MAX_MTTYS 24
2092 #if defined(DEBUG_REGS)
2093 #endif
2094 #if defined(DEBUG)
2095 #endif
2096 #if defined(DEBUG_INTR)
2097 #endif
2098 #if defined(DEBUG_INTR)
2099 #endif
2100 #if defined(DEBUG_INTR)
2101 #endif
2102 #if defined(DEBUG_INTR)
2103 #endif
2104 #if defined(DEBUG_INTR)
2105 #endif
2106 #if defined(DEBUG_INTR)
2107 #endif
2108 #if defined(DEBUG)
2109 #endif
2110 #if defined(DEBUG_REGS)
2111 #endif
2112 #if defined(DEBUG_REGS)
2113 #endif
2114 #if defined(DEBUG_INTR)
2115 #endif
2116 /* LDV_COMMENT_END_PREP */
2117 /* LDV_COMMENT_FUNCTION_CALL Function from field "release" from driver structure with callbacks "mdev_fops" */
2118 ldv_handler_precall();
2119 mtty_close( var_group2);
2120 ldv_s_mdev_fops_parent_ops++;
2121
2122 }
2123
2124 }
2125
2126 break;
2127 case 2: {
2128
2129 /** STRUCT: struct type: parent_ops, struct name: mdev_fops **/
2130 if(ldv_s_mdev_fops_parent_ops==2) {
2131
2132 /* content: int mtty_remove(struct mdev_device *mdev)*/
2133 /* LDV_COMMENT_BEGIN_PREP */
2134 #define VERSION_STRING "0.1"
2135 #define DRIVER_AUTHOR "NVIDIA Corporation"
2136 #define MTTY_CLASS_NAME "mtty"
2137 #define MTTY_NAME "mtty"
2138 #define MTTY_STRING_LEN 16
2139 #define MTTY_CONFIG_SPACE_SIZE 0xff
2140 #define MTTY_IO_BAR_SIZE 0x8
2141 #define MTTY_MMIO_BAR_SIZE 0x100000
2142 #define STORE_LE16(addr, val) (*(u16 *)addr = val)
2143 #define STORE_LE32(addr, val) (*(u32 *)addr = val)
2144 #define MAX_FIFO_SIZE 16
2145 #define CIRCULAR_BUF_INC_IDX(idx) (idx = (idx + 1) & (MAX_FIFO_SIZE - 1))
2146 #define MTTY_VFIO_PCI_OFFSET_SHIFT 40
2147 #define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off) (off >> MTTY_VFIO_PCI_OFFSET_SHIFT)
2148 #define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \
2149 ((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT)
2150 #define MTTY_VFIO_PCI_OFFSET_MASK \
2151 (((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1)
2152 #define MAX_MTTYS 24
2153 #if defined(DEBUG_REGS)
2154 #endif
2155 #if defined(DEBUG)
2156 #endif
2157 #if defined(DEBUG_INTR)
2158 #endif
2159 #if defined(DEBUG_INTR)
2160 #endif
2161 #if defined(DEBUG_INTR)
2162 #endif
2163 #if defined(DEBUG_INTR)
2164 #endif
2165 #if defined(DEBUG_INTR)
2166 #endif
2167 #if defined(DEBUG_INTR)
2168 #endif
2169 #if defined(DEBUG)
2170 #endif
2171 #if defined(DEBUG_REGS)
2172 #endif
2173 #if defined(DEBUG_REGS)
2174 #endif
2175 /* LDV_COMMENT_END_PREP */
2176 /* LDV_COMMENT_FUNCTION_CALL Function from field "remove" from driver structure with callbacks "mdev_fops" */
2177 ldv_handler_precall();
2178 mtty_remove( var_group2);
2179 /* LDV_COMMENT_BEGIN_PREP */
2180 #if defined(DEBUG_INTR)
2181 #endif
2182 /* LDV_COMMENT_END_PREP */
2183 ldv_s_mdev_fops_parent_ops=0;
2184
2185 }
2186
2187 }
2188
2189 break;
2190 case 3: {
2191
2192 /** STRUCT: struct type: parent_ops, struct name: mdev_fops **/
2193
2194
2195 /* content: int mtty_create(struct kobject *kobj, struct mdev_device *mdev)*/
2196 /* LDV_COMMENT_BEGIN_PREP */
2197 #define VERSION_STRING "0.1"
2198 #define DRIVER_AUTHOR "NVIDIA Corporation"
2199 #define MTTY_CLASS_NAME "mtty"
2200 #define MTTY_NAME "mtty"
2201 #define MTTY_STRING_LEN 16
2202 #define MTTY_CONFIG_SPACE_SIZE 0xff
2203 #define MTTY_IO_BAR_SIZE 0x8
2204 #define MTTY_MMIO_BAR_SIZE 0x100000
2205 #define STORE_LE16(addr, val) (*(u16 *)addr = val)
2206 #define STORE_LE32(addr, val) (*(u32 *)addr = val)
2207 #define MAX_FIFO_SIZE 16
2208 #define CIRCULAR_BUF_INC_IDX(idx) (idx = (idx + 1) & (MAX_FIFO_SIZE - 1))
2209 #define MTTY_VFIO_PCI_OFFSET_SHIFT 40
2210 #define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off) (off >> MTTY_VFIO_PCI_OFFSET_SHIFT)
2211 #define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \
2212 ((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT)
2213 #define MTTY_VFIO_PCI_OFFSET_MASK \
2214 (((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1)
2215 #define MAX_MTTYS 24
2216 #if defined(DEBUG_REGS)
2217 #endif
2218 #if defined(DEBUG)
2219 #endif
2220 #if defined(DEBUG_INTR)
2221 #endif
2222 #if defined(DEBUG_INTR)
2223 #endif
2224 #if defined(DEBUG_INTR)
2225 #endif
2226 #if defined(DEBUG_INTR)
2227 #endif
2228 #if defined(DEBUG_INTR)
2229 #endif
2230 #if defined(DEBUG_INTR)
2231 #endif
2232 #if defined(DEBUG)
2233 #endif
2234 #if defined(DEBUG_REGS)
2235 #endif
2236 #if defined(DEBUG_REGS)
2237 #endif
2238 /* LDV_COMMENT_END_PREP */
2239 /* LDV_COMMENT_FUNCTION_CALL Function from field "create" from driver structure with callbacks "mdev_fops" */
2240 ldv_handler_precall();
2241 mtty_create( var_group1, var_group2);
2242 /* LDV_COMMENT_BEGIN_PREP */
2243 #if defined(DEBUG_INTR)
2244 #endif
2245 /* LDV_COMMENT_END_PREP */
2246
2247
2248
2249
2250 }
2251
2252 break;
2253 case 4: {
2254
2255 /** STRUCT: struct type: parent_ops, struct name: mdev_fops **/
2256
2257
2258 /* content: ssize_t mtty_read(struct mdev_device *mdev, char __user *buf, size_t count, loff_t *ppos)*/
2259 /* LDV_COMMENT_BEGIN_PREP */
2260 #define VERSION_STRING "0.1"
2261 #define DRIVER_AUTHOR "NVIDIA Corporation"
2262 #define MTTY_CLASS_NAME "mtty"
2263 #define MTTY_NAME "mtty"
2264 #define MTTY_STRING_LEN 16
2265 #define MTTY_CONFIG_SPACE_SIZE 0xff
2266 #define MTTY_IO_BAR_SIZE 0x8
2267 #define MTTY_MMIO_BAR_SIZE 0x100000
2268 #define STORE_LE16(addr, val) (*(u16 *)addr = val)
2269 #define STORE_LE32(addr, val) (*(u32 *)addr = val)
2270 #define MAX_FIFO_SIZE 16
2271 #define CIRCULAR_BUF_INC_IDX(idx) (idx = (idx + 1) & (MAX_FIFO_SIZE - 1))
2272 #define MTTY_VFIO_PCI_OFFSET_SHIFT 40
2273 #define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off) (off >> MTTY_VFIO_PCI_OFFSET_SHIFT)
2274 #define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \
2275 ((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT)
2276 #define MTTY_VFIO_PCI_OFFSET_MASK \
2277 (((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1)
2278 #define MAX_MTTYS 24
2279 #if defined(DEBUG_REGS)
2280 #endif
2281 #if defined(DEBUG)
2282 #endif
2283 #if defined(DEBUG_INTR)
2284 #endif
2285 #if defined(DEBUG_INTR)
2286 #endif
2287 #if defined(DEBUG_INTR)
2288 #endif
2289 #if defined(DEBUG_INTR)
2290 #endif
2291 #if defined(DEBUG_INTR)
2292 #endif
2293 #if defined(DEBUG_INTR)
2294 #endif
2295 #if defined(DEBUG)
2296 #endif
2297 #if defined(DEBUG_REGS)
2298 #endif
2299 #if defined(DEBUG_REGS)
2300 #endif
2301 /* LDV_COMMENT_END_PREP */
2302 /* LDV_COMMENT_FUNCTION_CALL Function from field "read" from driver structure with callbacks "mdev_fops" */
2303 ldv_handler_precall();
2304 mtty_read( var_group2, var_mtty_read_11_p1, var_mtty_read_11_p2, var_mtty_read_11_p3);
2305 /* LDV_COMMENT_BEGIN_PREP */
2306 #if defined(DEBUG_INTR)
2307 #endif
2308 /* LDV_COMMENT_END_PREP */
2309
2310
2311
2312
2313 }
2314
2315 break;
2316 case 5: {
2317
2318 /** STRUCT: struct type: parent_ops, struct name: mdev_fops **/
2319
2320
2321 /* content: ssize_t mtty_write(struct mdev_device *mdev, const char __user *buf, size_t count, loff_t *ppos)*/
2322 /* LDV_COMMENT_BEGIN_PREP */
2323 #define VERSION_STRING "0.1"
2324 #define DRIVER_AUTHOR "NVIDIA Corporation"
2325 #define MTTY_CLASS_NAME "mtty"
2326 #define MTTY_NAME "mtty"
2327 #define MTTY_STRING_LEN 16
2328 #define MTTY_CONFIG_SPACE_SIZE 0xff
2329 #define MTTY_IO_BAR_SIZE 0x8
2330 #define MTTY_MMIO_BAR_SIZE 0x100000
2331 #define STORE_LE16(addr, val) (*(u16 *)addr = val)
2332 #define STORE_LE32(addr, val) (*(u32 *)addr = val)
2333 #define MAX_FIFO_SIZE 16
2334 #define CIRCULAR_BUF_INC_IDX(idx) (idx = (idx + 1) & (MAX_FIFO_SIZE - 1))
2335 #define MTTY_VFIO_PCI_OFFSET_SHIFT 40
2336 #define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off) (off >> MTTY_VFIO_PCI_OFFSET_SHIFT)
2337 #define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \
2338 ((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT)
2339 #define MTTY_VFIO_PCI_OFFSET_MASK \
2340 (((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1)
2341 #define MAX_MTTYS 24
2342 #if defined(DEBUG_REGS)
2343 #endif
2344 #if defined(DEBUG)
2345 #endif
2346 #if defined(DEBUG_INTR)
2347 #endif
2348 #if defined(DEBUG_INTR)
2349 #endif
2350 #if defined(DEBUG_INTR)
2351 #endif
2352 #if defined(DEBUG_INTR)
2353 #endif
2354 #if defined(DEBUG_INTR)
2355 #endif
2356 #if defined(DEBUG_INTR)
2357 #endif
2358 #if defined(DEBUG)
2359 #endif
2360 #if defined(DEBUG_REGS)
2361 #endif
2362 #if defined(DEBUG_REGS)
2363 #endif
2364 /* LDV_COMMENT_END_PREP */
2365 /* LDV_COMMENT_FUNCTION_CALL Function from field "write" from driver structure with callbacks "mdev_fops" */
2366 ldv_handler_precall();
2367 mtty_write( var_group2, var_mtty_write_12_p1, var_mtty_write_12_p2, var_mtty_write_12_p3);
2368 /* LDV_COMMENT_BEGIN_PREP */
2369 #if defined(DEBUG_INTR)
2370 #endif
2371 /* LDV_COMMENT_END_PREP */
2372
2373
2374
2375
2376 }
2377
2378 break;
2379 case 6: {
2380
2381 /** STRUCT: struct type: parent_ops, struct name: mdev_fops **/
2382
2383
2384 /* content: static long mtty_ioctl(struct mdev_device *mdev, unsigned int cmd, unsigned long arg)*/
2385 /* LDV_COMMENT_BEGIN_PREP */
2386 #define VERSION_STRING "0.1"
2387 #define DRIVER_AUTHOR "NVIDIA Corporation"
2388 #define MTTY_CLASS_NAME "mtty"
2389 #define MTTY_NAME "mtty"
2390 #define MTTY_STRING_LEN 16
2391 #define MTTY_CONFIG_SPACE_SIZE 0xff
2392 #define MTTY_IO_BAR_SIZE 0x8
2393 #define MTTY_MMIO_BAR_SIZE 0x100000
2394 #define STORE_LE16(addr, val) (*(u16 *)addr = val)
2395 #define STORE_LE32(addr, val) (*(u32 *)addr = val)
2396 #define MAX_FIFO_SIZE 16
2397 #define CIRCULAR_BUF_INC_IDX(idx) (idx = (idx + 1) & (MAX_FIFO_SIZE - 1))
2398 #define MTTY_VFIO_PCI_OFFSET_SHIFT 40
2399 #define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off) (off >> MTTY_VFIO_PCI_OFFSET_SHIFT)
2400 #define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \
2401 ((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT)
2402 #define MTTY_VFIO_PCI_OFFSET_MASK \
2403 (((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1)
2404 #define MAX_MTTYS 24
2405 #if defined(DEBUG_REGS)
2406 #endif
2407 #if defined(DEBUG)
2408 #endif
2409 #if defined(DEBUG_INTR)
2410 #endif
2411 #if defined(DEBUG_INTR)
2412 #endif
2413 #if defined(DEBUG_INTR)
2414 #endif
2415 #if defined(DEBUG_INTR)
2416 #endif
2417 #if defined(DEBUG_INTR)
2418 #endif
2419 #if defined(DEBUG_INTR)
2420 #endif
2421 #if defined(DEBUG)
2422 #endif
2423 #if defined(DEBUG_REGS)
2424 #endif
2425 #if defined(DEBUG_REGS)
2426 #endif
2427 #if defined(DEBUG_INTR)
2428 #endif
2429 /* LDV_COMMENT_END_PREP */
2430 /* LDV_COMMENT_FUNCTION_CALL Function from field "ioctl" from driver structure with callbacks "mdev_fops" */
2431 ldv_handler_precall();
2432 mtty_ioctl( var_group2, var_mtty_ioctl_18_p1, var_mtty_ioctl_18_p2);
2433
2434
2435
2436
2437 }
2438
2439 break;
2440 default: break;
2441
2442 }
2443
2444 }
2445
2446 ldv_module_exit:
2447
2448 /** INIT: init_type: ST_MODULE_EXIT **/
2449 /* content: static void __exit mtty_dev_exit(void)*/
2450 /* LDV_COMMENT_BEGIN_PREP */
2451 #define VERSION_STRING "0.1"
2452 #define DRIVER_AUTHOR "NVIDIA Corporation"
2453 #define MTTY_CLASS_NAME "mtty"
2454 #define MTTY_NAME "mtty"
2455 #define MTTY_STRING_LEN 16
2456 #define MTTY_CONFIG_SPACE_SIZE 0xff
2457 #define MTTY_IO_BAR_SIZE 0x8
2458 #define MTTY_MMIO_BAR_SIZE 0x100000
2459 #define STORE_LE16(addr, val) (*(u16 *)addr = val)
2460 #define STORE_LE32(addr, val) (*(u32 *)addr = val)
2461 #define MAX_FIFO_SIZE 16
2462 #define CIRCULAR_BUF_INC_IDX(idx) (idx = (idx + 1) & (MAX_FIFO_SIZE - 1))
2463 #define MTTY_VFIO_PCI_OFFSET_SHIFT 40
2464 #define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off) (off >> MTTY_VFIO_PCI_OFFSET_SHIFT)
2465 #define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \
2466 ((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT)
2467 #define MTTY_VFIO_PCI_OFFSET_MASK \
2468 (((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1)
2469 #define MAX_MTTYS 24
2470 #if defined(DEBUG_REGS)
2471 #endif
2472 #if defined(DEBUG)
2473 #endif
2474 #if defined(DEBUG_INTR)
2475 #endif
2476 #if defined(DEBUG_INTR)
2477 #endif
2478 #if defined(DEBUG_INTR)
2479 #endif
2480 #if defined(DEBUG_INTR)
2481 #endif
2482 #if defined(DEBUG_INTR)
2483 #endif
2484 #if defined(DEBUG_INTR)
2485 #endif
2486 #if defined(DEBUG)
2487 #endif
2488 #if defined(DEBUG_REGS)
2489 #endif
2490 #if defined(DEBUG_REGS)
2491 #endif
2492 #if defined(DEBUG_INTR)
2493 #endif
2494 /* LDV_COMMENT_END_PREP */
2495 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver release function before driver will be uploaded from kernel. This function declared as "MODULE_EXIT(function name)". */
2496 ldv_handler_precall();
2497 mtty_dev_exit();
2498
2499 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
2500 ldv_final: ldv_check_final_state();
2501
2502 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
2503 return;
2504
2505 }
2506 #endif
2507
2508 /* LDV_COMMENT_END_MAIN */
2509
2510 #line 21 "/work/ldvuser/ref_launch/work/current--X--samples--X--defaultlinux-4.10-rc1.tar.xz--X--106_1a--X--cpachecker/linux-4.10-rc1.tar.xz/csd_deg_dscv/23/dscv_tempdir/dscv/ri/106_1a/samples/vfio-mdev/mtty.o.c.prepared" 1
2
3 #include <verifier/rcv.h>
4 #include <kernel-model/ERR.inc>
5
6 // There are 2 possible states of usb gadget class registration.
7 enum
8 {
9 LDV_USB_GADGET_CLASS_ZERO_STATE, // Usb gadget class is not registered.
10 LDV_USB_GADGET_CLASS_REGISTERED // Usb gadget class is registered.
11 };
12
13 // There are 2 possible states of char device region registration.
14 enum
15 {
16 LDV_USB_GADGET_CHRDEV_ZERO_STATE, // Char device region is not registered for usb gadget.
17 LDV_USB_GADGET_CHRDEV_REGISTERED // Char device region is registered for usb gadget.
18 };
19
20 // There are 2 possible states of usb gadget registration.
21 enum
22 {
23 LDV_USB_GADGET_ZERO_STATE, // Usb gadget is not registered.
24 LDV_USB_GADGET_REGISTERED // Usb gadget is registered.
25 };
26
27 /* LDV_COMMENT_CHANGE_STATE Usb gadget class is not registered at the beginning. */
28 int ldv_usb_gadget_class = LDV_USB_GADGET_CLASS_ZERO_STATE;
29
30 /* LDV_COMMENT_CHANGE_STATE Char device region is not registered at the beginning. */
31 int ldv_usb_gadget_chrdev = LDV_USB_GADGET_CHRDEV_ZERO_STATE;
32
33 /* LDV_COMMENT_CHANGE_STATE Usb gadget is not registered at the beginning. */
34 int ldv_usb_gadget = LDV_USB_GADGET_ZERO_STATE;
35
36
37 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_create_class') Check that usb gadget class was not registered. Then create and register class for it. */
38 void *ldv_create_class(void)
39 {
40 void *is_got;
41
42 /* LDV_COMMENT_OTHER Get blk request in the nondeterministic way. */
43 is_got = ldv_undef_ptr();
44
45 /* LDV_COMMENT_ASSERT Get blk request just in case when an error did not happen. */
46 if (is_got <= LDV_PTR_MAX)
47 {
48 /* LDV_COMMENT_ASSERT Registring usb gadget class is only allowed if usb gadget is not registered. */
49 ldv_assert(ldv_usb_gadget == LDV_USB_GADGET_ZERO_STATE);
50 /* LDV_COMMENT_ASSERT Check that usb gadget class is unregistered. */
51 ldv_assert(ldv_usb_gadget_class == LDV_USB_GADGET_CLASS_ZERO_STATE);
52 /* LDV_COMMENT_CHANGE_STATE Register class for usb gadget. */
53 ldv_usb_gadget_class = LDV_USB_GADGET_CLASS_REGISTERED;
54 }
55
56 /* LDV_COMMENT_RETURN Return obtained blk request. */
57 return is_got;
58 }
59
60 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_register_class') Check that usb gadget class was not registered and register class for it. */
61 int ldv_register_class(void)
62 {
63 int is_reg;
64
65 /* LDV_COMMENT_OTHER Register gadget class in the nondeterministic way. */
66 is_reg = ldv_undef_int_nonpositive();
67
68 /* LDV_COMMENT_ASSERT Get blk request just in case when an error did not happen. */
69 if (!is_reg)
70 {
71 /* LDV_COMMENT_ASSERT Registering usb gadget class is only allowed if usb gadget is not registered. */
72 ldv_assert(ldv_usb_gadget == LDV_USB_GADGET_ZERO_STATE);
73 /* LDV_COMMENT_ASSERT Check that usb gadget class is unregistered. */
74 ldv_assert(ldv_usb_gadget_class == LDV_USB_GADGET_CLASS_ZERO_STATE);
75 /* LDV_COMMENT_CHANGE_STATE Register class for usb gadget. */
76 ldv_usb_gadget_class = LDV_USB_GADGET_CLASS_REGISTERED;
77 }
78
79 /* LDV_COMMENT_RETURN Return registration status (0 is success). */
80 return is_reg;
81 }
82
83 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_unregister_class') Check that usb gadget class was registered and unregister it. */
84 void ldv_unregister_class(void)
85 {
86 /* LDV_COMMENT_ASSERT Unregistering usb gadget class is only allowed if usb gadget is not registered. */
87 ldv_assert(ldv_usb_gadget == LDV_USB_GADGET_ZERO_STATE);
88 /* LDV_COMMENT_ASSERT Check that usb gadget class is registered. */
89 ldv_assert(ldv_usb_gadget_class == LDV_USB_GADGET_CLASS_REGISTERED);
90 /* LDV_COMMENT_CHANGE_STATE Unregister class for usb gadget. */
91 ldv_usb_gadget_class = LDV_USB_GADGET_CLASS_ZERO_STATE;
92 }
93
94 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_register_chrdev') Check that chrdev region was not registered and register it. */
95 int ldv_register_chrdev(int major)
96 {
97 int is_reg;
98
99 /* LDV_COMMENT_OTHER Register chrdev in the nondeterministic way. */
100 is_reg = ldv_undef_int_nonpositive();
101
102 /* LDV_COMMENT_ASSERT Register chrdev just in case when an error did not happen. */
103 if (!is_reg)
104 {
105 /* LDV_COMMENT_ASSERT Usb gadget should be unregistered at this point. */
106 ldv_assert(ldv_usb_gadget == LDV_USB_GADGET_ZERO_STATE);
107 /* LDV_COMMENT_ASSERT Check that chrdev region is unregistered. */
108 ldv_assert(ldv_usb_gadget_chrdev == LDV_USB_GADGET_CHRDEV_ZERO_STATE);
109 /* LDV_COMMENT_CHANGE_STATE Register chrdev region for usb gadget. */
110 ldv_usb_gadget_chrdev = LDV_USB_GADGET_CHRDEV_REGISTERED;
111 if (major == 0)
112 {
113 /* LDV_COMMENT_OTHER Function returns allocated major number. */
114 is_reg = ldv_undef_int();
115 ldv_assume (is_reg > 0);
116 }
117 }
118
119 /* LDV_COMMENT_RETURN Return registration status (0 is success). */
120 return is_reg;
121 }
122
123 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_register_chrdev_region') Check that chrdev region was not registered and register it. */
124 int ldv_register_chrdev_region(void)
125 {
126 int is_reg;
127
128 /* LDV_COMMENT_OTHER Register chrdev in the nondeterministic way. */
129 is_reg = ldv_undef_int_nonpositive();
130
131 /* LDV_COMMENT_ASSERT Register chrdev just in case when an error did not happen. */
132 if (!is_reg)
133 {
134 /* LDV_COMMENT_ASSERT Usb gadget should be unregistered at this point. */
135 ldv_assert(ldv_usb_gadget == LDV_USB_GADGET_ZERO_STATE);
136 /* LDV_COMMENT_ASSERT Check that chrdev region is unregistered. */
137 ldv_assert(ldv_usb_gadget_chrdev == LDV_USB_GADGET_CHRDEV_ZERO_STATE);
138 /* LDV_COMMENT_CHANGE_STATE Register chrdev region for usb gadget. */
139 ldv_usb_gadget_chrdev = LDV_USB_GADGET_CHRDEV_REGISTERED;
140 }
141
142 /* LDV_COMMENT_RETURN Return registration status (0 is success). */
143 return is_reg;
144 }
145
146 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_unregister_chrdev_region') Check that chrdev region was registered and unregister it. */
147 void ldv_unregister_chrdev_region(void)
148 {
149 /* LDV_COMMENT_ASSERT Usb gadget should not be registered at this point. */
150 ldv_assert(ldv_usb_gadget == LDV_USB_GADGET_ZERO_STATE);
151 /* LDV_COMMENT_ASSERT Check that chrdev region is registered. */
152 ldv_assert(ldv_usb_gadget_chrdev == LDV_USB_GADGET_CHRDEV_REGISTERED);
153 /* LDV_COMMENT_CHANGE_STATE Unregister chrdev. */
154 ldv_usb_gadget_chrdev = LDV_USB_GADGET_CHRDEV_ZERO_STATE;
155 }
156
157 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_register_usb_gadget') Check that usb gadget was not registered and register it. */
158 int ldv_register_usb_gadget(void)
159 {
160 int is_reg;
161
162 /* LDV_COMMENT_OTHER Register usb gadget in the nondeterministic way. */
163 is_reg = ldv_undef_int_nonpositive();
164
165 /* LDV_COMMENT_ASSERT Register usb gadget just in case when an error did not happen. */
166 if (!is_reg)
167 {
168 /* LDV_COMMENT_ASSERT Gadget should not be registered at this point. */
169 ldv_assert(ldv_usb_gadget == LDV_USB_GADGET_ZERO_STATE);
170 /* LDV_COMMENT_CHANGE_STATE Register usb gadget. */
171 ldv_usb_gadget = LDV_USB_GADGET_REGISTERED;
172 }
173
174 /* LDV_COMMENT_RETURN Return registration status (0 is success). */
175 return is_reg;
176 }
177
178 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_unregister_usb_gadget') Check that usb gadget was registered and unregister it. */
179 void ldv_unregister_usb_gadget(void)
180 {
181 /* LDV_COMMENT_ASSERT Usb gadget should be registered at this point. */
182 ldv_assert(ldv_usb_gadget == LDV_USB_GADGET_REGISTERED);
183 /* LDV_COMMENT_CHANGE_STATE Unregister usb gadget. */
184 ldv_usb_gadget = LDV_USB_GADGET_ZERO_STATE;
185 }
186
187 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_check_final_state') Check that usb gadget is fully unregistered at the end. */
188 void ldv_check_final_state(void)
189 {
190 /* LDV_COMMENT_ASSERT Usb gadget class should be unregistered at the end. */
191 ldv_assert(ldv_usb_gadget_class == LDV_USB_GADGET_CLASS_ZERO_STATE);
192 /* LDV_COMMENT_ASSERT Chrdev region should be unregistered at the end. */
193 ldv_assert(ldv_usb_gadget_chrdev == LDV_USB_GADGET_CHRDEV_ZERO_STATE);
194 /* LDV_COMMENT_ASSERT Usb gadget should be unregistered at the end. */
195 ldv_assert(ldv_usb_gadget == LDV_USB_GADGET_ZERO_STATE);
196 } 1 #ifndef _LDV_ERR_
2 #define _LDV_ERR_
3
4 #include <linux/kernel.h>
5
6 /* LDV_COMMENT_MODEL_FUNCTION_DEFENITION(name='ldv_is_err') This function return result of checking if pointer is impossible. */
7 bool ldv_is_err(const void *ptr)
8 {
9 /*LDV_COMMENT_RETURN Return value of function ldv_is_err_val().*/
10 return ((unsigned long)ptr > LDV_PTR_MAX);
11 }
12
13 /* LDV_COMMENT_MODEL_FUNCTION_DEFENITION(name='ldv_err_ptr') This function return pointer. */
14 void* ldv_err_ptr(long error)
15 {
16 /*LDV_COMMENT_RETURN Return error pointer.*/
17 return (void *)(LDV_PTR_MAX - error);
18 }
19
20 /* LDV_COMMENT_MODEL_FUNCTION_DEFENITION(name='ldv_ptr_err') This function return error if pointer is impossible. */
21 long ldv_ptr_err(const void *ptr)
22 {
23 /*LDV_COMMENT_RETURN Return error code.*/
24 return (long)(LDV_PTR_MAX - (unsigned long)ptr);
25 }
26
27 /* LDV_COMMENT_MODEL_FUNCTION_DEFENITION(name='ldv_is_err_or_null') This function check if pointer is impossible or null. */
28 bool ldv_is_err_or_null(const void *ptr)
29 {
30 /*LDV_COMMENT_RETURN Return 0 if pointer is possible and not zero, and 1 in other cases*/
31 return !ptr || ldv_is_err((unsigned long)ptr);
32 }
33
34 #endif /* _LDV_ERR_ */ 1 #ifndef _LDV_RCV_H_
2 #define _LDV_RCV_H_
3
4 /* If expr evaluates to zero, ldv_assert() causes a program to reach the error
5 label like the standard assert(). */
6 #define ldv_assert(expr) ((expr) ? 0 : ldv_error())
7
8 /* The error label wrapper. It is used because of some static verifiers (like
9 BLAST) don't accept multiple error labels through a program. */
10 static inline void ldv_error(void)
11 {
12 LDV_ERROR: goto LDV_ERROR;
13 }
14
15 /* If expr evaluates to zero, ldv_assume() causes an infinite loop that is
16 avoided by verifiers. */
17 #define ldv_assume(expr) ((expr) ? 0 : ldv_stop())
18
19 /* Infinite loop, that causes verifiers to skip such paths. */
20 static inline void ldv_stop(void) {
21 LDV_STOP: goto LDV_STOP;
22 }
23
24 /* Special nondeterministic functions. */
25 int ldv_undef_int(void);
26 void *ldv_undef_ptr(void);
27 unsigned long ldv_undef_ulong(void);
28 long ldv_undef_long(void);
29 /* Return nondeterministic negative integer number. */
30 static inline int ldv_undef_int_negative(void)
31 {
32 int ret = ldv_undef_int();
33
34 ldv_assume(ret < 0);
35
36 return ret;
37 }
38 /* Return nondeterministic nonpositive integer number. */
39 static inline int ldv_undef_int_nonpositive(void)
40 {
41 int ret = ldv_undef_int();
42
43 ldv_assume(ret <= 0);
44
45 return ret;
46 }
47
48 /* Add explicit model for __builin_expect GCC function. Without the model a
49 return value will be treated as nondetermined by verifiers. */
50 static inline long __builtin_expect(long exp, long c)
51 {
52 return exp;
53 }
54
55 /* This function causes the program to exit abnormally. GCC implements this
56 function by using a target-dependent mechanism (such as intentionally executing
57 an illegal instruction) or by calling abort. The mechanism used may vary from
58 release to release so you should not rely on any particular implementation.
59 http://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html */
60 static inline void __builtin_trap(void)
61 {
62 ldv_assert(0);
63 }
64
65 /* The constant is for simulating an error of ldv_undef_ptr() function. */
66 #define LDV_PTR_MAX 2012
67
68 #endif /* _LDV_RCV_H_ */ |
Here is an explanation of a rule violation arisen while checking your driver against a corresponding kernel.
Note that it may be false positive, i.e. there isn't a real error indeed. Please analyze a given error trace and related source code to understand whether there is an error in your driver.
Error trace column contains a path on which the given rule is violated. You can expand/collapse some entity classes by clicking on corresponding checkboxes in a main menu or in an advanced Others menu. Also you can expand/collapse each particular entity by clicking on +/-. In hovering on some entities you can see some tips. Also the error trace is bound with related source code. Line numbers may be shown as links on the left. You can click on them to open corresponding lines in source code.
Source code column contains a content of files related with the error trace. There is source code of your driver (note that there are some LDV modifications at the end), kernel headers and rule model. Tabs show a currently opened file and other available files. In hovering on them you can see full file names. On clicking a corresponding file content will be shown.
Ядро | Модуль | Правило | Верификатор | Вердикт | Статус | Время создания | Описание проблемы |
linux-4.10-rc1.tar.xz | samples/vfio-mdev/mtty.ko | 106_1a | CPAchecker | Bug | Fixed | 2016-12-31 01:46:58 | L0259 |
Комментарий
Reported: 31 Dec 2016
[В начало]