Bug
[В начало]
Ошибка # 8
Показать/спрятать трассу ошибок Error trace
{ 95 struct kernel_symbol { unsigned long value; const char *name; } ; 33 struct module ; 19 typedef signed char __s8; 20 typedef unsigned char __u8; 22 typedef short __s16; 23 typedef unsigned short __u16; 25 typedef int __s32; 26 typedef unsigned int __u32; 30 typedef unsigned long long __u64; 15 typedef signed char s8; 16 typedef unsigned char u8; 18 typedef short s16; 19 typedef unsigned short u16; 21 typedef int s32; 22 typedef unsigned int u32; 24 typedef long long s64; 25 typedef unsigned long long u64; 14 typedef long __kernel_long_t; 15 typedef unsigned long __kernel_ulong_t; 27 typedef int __kernel_pid_t; 40 typedef __kernel_long_t __kernel_suseconds_t; 48 typedef unsigned int __kernel_uid32_t; 49 typedef unsigned int __kernel_gid32_t; 71 typedef __kernel_ulong_t __kernel_size_t; 72 typedef __kernel_long_t __kernel_ssize_t; 87 typedef long long __kernel_loff_t; 88 typedef __kernel_long_t __kernel_time_t; 89 typedef __kernel_long_t __kernel_clock_t; 90 typedef int __kernel_timer_t; 91 typedef int __kernel_clockid_t; 33 typedef __u16 __be16; 35 typedef __u32 __be32; 40 typedef __u32 __wsum; 12 typedef __u32 __kernel_dev_t; 15 typedef __kernel_dev_t dev_t; 18 typedef unsigned short umode_t; 21 typedef __kernel_pid_t pid_t; 26 typedef __kernel_clockid_t clockid_t; 29 typedef _Bool bool; 31 typedef __kernel_uid32_t uid_t; 32 typedef __kernel_gid32_t gid_t; 45 typedef __kernel_loff_t loff_t; 54 typedef __kernel_size_t size_t; 59 typedef __kernel_ssize_t ssize_t; 69 typedef __kernel_time_t time_t; 102 typedef __s32 int32_t; 106 typedef __u8 uint8_t; 108 typedef __u32 uint32_t; 111 typedef __u64 uint64_t; 133 typedef unsigned long sector_t; 134 typedef unsigned long blkcnt_t; 147 typedef u64 dma_addr_t; 158 typedef unsigned int gfp_t; 159 typedef unsigned int fmode_t; 160 typedef unsigned int oom_flags_t; 163 typedef u64 phys_addr_t; 168 typedef phys_addr_t resource_size_t; 178 struct __anonstruct_atomic_t_6 { int counter; } ; 178 typedef struct __anonstruct_atomic_t_6 atomic_t; 183 struct __anonstruct_atomic64_t_7 { long counter; } ; 183 typedef struct __anonstruct_atomic64_t_7 atomic64_t; 184 struct list_head { struct list_head *next; struct list_head *prev; } ; 189 struct hlist_node ; 189 struct hlist_head { struct hlist_node *first; } ; 193 struct hlist_node { struct hlist_node *next; struct hlist_node **pprev; } ; 204 struct callback_head { struct callback_head *next; void (*func)(struct callback_head *); } ; 65 struct pt_regs { unsigned long r15; unsigned long r14; unsigned long r13; unsigned long r12; unsigned long bp; unsigned long bx; unsigned long r11; unsigned long r10; unsigned long r9; unsigned long r8; unsigned long ax; unsigned long cx; unsigned long dx; unsigned long si; unsigned long di; unsigned long orig_ax; unsigned long ip; unsigned long cs; unsigned long flags; unsigned long sp; unsigned long ss; } ; 59 struct __anonstruct_ldv_1016_9 { unsigned int a; unsigned int b; } ; 59 struct __anonstruct_ldv_1031_10 { u16 limit0; u16 base0; unsigned char base1; unsigned char type; unsigned char s; unsigned char dpl; unsigned char p; unsigned char limit; unsigned char avl; unsigned char l; unsigned char d; unsigned char g; unsigned char base2; } ; 59 union __anonunion_ldv_1032_8 { struct __anonstruct_ldv_1016_9 ldv_1016; struct __anonstruct_ldv_1031_10 ldv_1031; } ; 59 struct desc_struct { union __anonunion_ldv_1032_8 ldv_1032; } ; 12 typedef unsigned long pteval_t; 15 typedef unsigned long pgdval_t; 16 typedef unsigned long pgprotval_t; 18 struct __anonstruct_pte_t_11 { pteval_t pte; } ; 18 typedef struct __anonstruct_pte_t_11 pte_t; 20 struct pgprot { pgprotval_t pgprot; } ; 242 typedef struct pgprot pgprot_t; 244 struct __anonstruct_pgd_t_12 { pgdval_t pgd; } ; 244 typedef struct __anonstruct_pgd_t_12 pgd_t; 332 struct page ; 332 typedef struct page *pgtable_t; 340 struct file ; 353 struct seq_file ; 390 struct thread_struct ; 392 struct mm_struct ; 393 struct task_struct ; 394 struct cpumask ; 395 struct paravirt_callee_save { void *func; } ; 196 struct pv_irq_ops { struct paravirt_callee_save save_fl; struct paravirt_callee_save restore_fl; struct paravirt_callee_save irq_disable; struct paravirt_callee_save irq_enable; void (*safe_halt)(); void (*halt)(); void (*adjust_exception_frame)(); } ; 327 struct arch_spinlock ; 18 typedef u16 __ticket_t; 19 typedef u32 __ticketpair_t; 20 struct __raw_tickets { __ticket_t head; __ticket_t tail; } ; 32 union __anonunion_ldv_1452_15 { __ticketpair_t head_tail; struct __raw_tickets tickets; } ; 32 struct arch_spinlock { union __anonunion_ldv_1452_15 ldv_1452; } ; 33 typedef struct arch_spinlock arch_spinlock_t; 34 struct qrwlock { atomic_t cnts; arch_spinlock_t lock; } ; 14 typedef struct qrwlock arch_rwlock_t; 142 typedef void (*ctor_fn_t)(); 48 struct device ; 54 struct net_device ; 400 struct file_operations ; 412 struct completion ; 416 struct pid ; 527 struct bug_entry { int bug_addr_disp; int file_disp; unsigned short line; unsigned short flags; } ; 102 struct timespec ; 127 struct kernel_vm86_regs { struct pt_regs pt; unsigned short es; unsigned short __esh; unsigned short ds; unsigned short __dsh; unsigned short fs; unsigned short __fsh; unsigned short gs; unsigned short __gsh; } ; 79 union __anonunion_ldv_2961_20 { struct pt_regs *regs; struct kernel_vm86_regs *vm86; } ; 79 struct math_emu_info { long ___orig_eip; union __anonunion_ldv_2961_20 ldv_2961; } ; 306 struct cpumask { unsigned long bits[128U]; } ; 14 typedef struct cpumask cpumask_t; 671 typedef struct cpumask *cpumask_var_t; 162 struct seq_operations ; 294 struct i387_fsave_struct { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u32 status; } ; 312 struct __anonstruct_ldv_5248_25 { u64 rip; u64 rdp; } ; 312 struct __anonstruct_ldv_5254_26 { u32 fip; u32 fcs; u32 foo; u32 fos; } ; 312 union __anonunion_ldv_5255_24 { struct __anonstruct_ldv_5248_25 ldv_5248; struct __anonstruct_ldv_5254_26 ldv_5254; } ; 312 union __anonunion_ldv_5264_27 { u32 padding1[12U]; u32 sw_reserved[12U]; } ; 312 struct i387_fxsave_struct { u16 cwd; u16 swd; u16 twd; u16 fop; union __anonunion_ldv_5255_24 ldv_5255; u32 mxcsr; u32 mxcsr_mask; u32 st_space[32U]; u32 xmm_space[64U]; u32 padding[12U]; union __anonunion_ldv_5264_27 ldv_5264; } ; 346 struct i387_soft_struct { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u8 ftop; u8 changed; u8 lookahead; u8 no_update; u8 rm; u8 alimit; struct math_emu_info *info; u32 entry_eip; } ; 367 struct ymmh_struct { u32 ymmh_space[64U]; } ; 372 struct lwp_struct { u8 reserved[128U]; } ; 377 struct bndregs_struct { u64 bndregs[8U]; } ; 381 struct bndcsr_struct { u64 cfg_reg_u; u64 status_reg; } ; 386 struct xsave_hdr_struct { u64 xstate_bv; u64 reserved1[2U]; u64 reserved2[5U]; } ; 392 struct xsave_struct { struct i387_fxsave_struct i387; struct xsave_hdr_struct xsave_hdr; struct ymmh_struct ymmh; struct lwp_struct lwp; struct bndregs_struct bndregs; struct bndcsr_struct bndcsr; } ; 401 union thread_xstate { struct i387_fsave_struct fsave; struct i387_fxsave_struct fxsave; struct i387_soft_struct soft; struct xsave_struct xsave; } ; 409 struct fpu { unsigned int last_cpu; unsigned int has_fpu; union thread_xstate *state; } ; 465 struct kmem_cache ; 466 struct perf_event ; 467 struct thread_struct { struct desc_struct tls_array[3U]; unsigned long sp0; unsigned long sp; unsigned long usersp; unsigned short es; unsigned short ds; unsigned short fsindex; unsigned short gsindex; unsigned long fs; unsigned long gs; struct perf_event *ptrace_bps[4U]; unsigned long debugreg6; unsigned long ptrace_dr7; unsigned long cr2; unsigned long trap_nr; unsigned long error_code; struct fpu fpu; unsigned long *io_bitmap_ptr; unsigned long iopl; unsigned int io_bitmap_max; unsigned char fpu_counter; } ; 23 typedef atomic64_t atomic_long_t; 35 struct lockdep_map ; 55 struct stack_trace { unsigned int nr_entries; unsigned int max_entries; unsigned long *entries; int skip; } ; 26 struct lockdep_subclass_key { char __one_byte; } ; 53 struct lock_class_key { struct lockdep_subclass_key subkeys[8U]; } ; 59 struct lock_class { struct list_head hash_entry; struct list_head lock_entry; struct lockdep_subclass_key *key; unsigned int subclass; unsigned int dep_gen_id; unsigned long usage_mask; struct stack_trace usage_traces[13U]; struct list_head locks_after; struct list_head locks_before; unsigned int version; unsigned long ops; const char *name; int name_version; unsigned long contention_point[4U]; unsigned long contending_point[4U]; } ; 144 struct lockdep_map { struct lock_class_key *key; struct lock_class *class_cache[2U]; const char *name; int cpu; unsigned long ip; } ; 205 struct held_lock { u64 prev_chain_key; unsigned long acquire_ip; struct lockdep_map *instance; struct lockdep_map *nest_lock; u64 waittime_stamp; u64 holdtime_stamp; unsigned short class_idx; unsigned char irq_context; unsigned char trylock; unsigned char read; unsigned char check; unsigned char hardirqs_off; unsigned short references; } ; 530 struct raw_spinlock { arch_spinlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ; 32 typedef struct raw_spinlock raw_spinlock_t; 33 struct __anonstruct_ldv_6305_31 { u8 __padding[24U]; struct lockdep_map dep_map; } ; 33 union __anonunion_ldv_6306_30 { struct raw_spinlock rlock; struct __anonstruct_ldv_6305_31 ldv_6305; } ; 33 struct spinlock { union __anonunion_ldv_6306_30 ldv_6306; } ; 76 typedef struct spinlock spinlock_t; 23 struct __anonstruct_rwlock_t_32 { arch_rwlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ; 23 typedef struct __anonstruct_rwlock_t_32 rwlock_t; 135 struct seqcount { unsigned int sequence; struct lockdep_map dep_map; } ; 51 typedef struct seqcount seqcount_t; 259 struct __anonstruct_seqlock_t_33 { struct seqcount seqcount; spinlock_t lock; } ; 259 typedef struct __anonstruct_seqlock_t_33 seqlock_t; 433 struct timespec { __kernel_time_t tv_sec; long tv_nsec; } ; 13 struct timeval { __kernel_time_t tv_sec; __kernel_suseconds_t tv_usec; } ; 83 struct user_namespace ; 22 struct __anonstruct_kuid_t_34 { uid_t val; } ; 22 typedef struct __anonstruct_kuid_t_34 kuid_t; 27 struct __anonstruct_kgid_t_35 { gid_t val; } ; 27 typedef struct __anonstruct_kgid_t_35 kgid_t; 127 struct kstat { u64 ino; dev_t dev; umode_t mode; unsigned int nlink; kuid_t uid; kgid_t gid; dev_t rdev; loff_t size; struct timespec atime; struct timespec mtime; struct timespec ctime; unsigned long blksize; unsigned long long blocks; } ; 34 struct __wait_queue_head { spinlock_t lock; struct list_head task_list; } ; 39 typedef struct __wait_queue_head wait_queue_head_t; 98 struct __anonstruct_nodemask_t_36 { unsigned long bits[16U]; } ; 98 typedef struct __anonstruct_nodemask_t_36 nodemask_t; 814 struct optimistic_spin_queue ; 815 struct mutex { atomic_t count; spinlock_t wait_lock; struct list_head wait_list; struct task_struct *owner; const char *name; void *magic; struct lockdep_map dep_map; } ; 68 struct mutex_waiter { struct list_head list; struct task_struct *task; void *magic; } ; 178 struct rw_semaphore ; 179 struct rw_semaphore { long count; raw_spinlock_t wait_lock; struct list_head wait_list; struct task_struct *owner; struct optimistic_spin_queue *osq; struct lockdep_map dep_map; } ; 174 struct completion { unsigned int done; wait_queue_head_t wait; } ; 105 struct llist_node ; 105 struct llist_head { struct llist_node *first; } ; 64 struct llist_node { struct llist_node *next; } ; 72 struct resource { resource_size_t start; resource_size_t end; const char *name; unsigned long flags; struct resource *parent; struct resource *sibling; struct resource *child; } ; 323 union ktime { s64 tv64; } ; 59 typedef union ktime ktime_t; 412 struct tvec_base ; 413 struct timer_list { struct list_head entry; unsigned long expires; struct tvec_base *base; void (*function)(unsigned long); unsigned long data; int slack; int start_pid; void *start_site; char start_comm[16U]; struct lockdep_map lockdep_map; } ; 254 struct hrtimer ; 255 enum hrtimer_restart ; 266 struct workqueue_struct ; 267 struct work_struct ; 53 struct work_struct { atomic_long_t data; struct list_head entry; void (*func)(struct work_struct *); struct lockdep_map lockdep_map; } ; 106 struct delayed_work { struct work_struct work; struct timer_list timer; struct workqueue_struct *wq; int cpu; } ; 58 struct pm_message { int event; } ; 64 typedef struct pm_message pm_message_t; 65 struct dev_pm_ops { int (*prepare)(struct device *); void (*complete)(struct device *); int (*suspend)(struct device *); int (*resume)(struct device *); int (*freeze)(struct device *); int (*thaw)(struct device *); int (*poweroff)(struct device *); int (*restore)(struct device *); int (*suspend_late)(struct device *); int (*resume_early)(struct device *); int (*freeze_late)(struct device *); int (*thaw_early)(struct device *); int (*poweroff_late)(struct device *); int (*restore_early)(struct device *); int (*suspend_noirq)(struct device *); int (*resume_noirq)(struct device *); int (*freeze_noirq)(struct device *); int (*thaw_noirq)(struct device *); int (*poweroff_noirq)(struct device *); int (*restore_noirq)(struct device *); int (*runtime_suspend)(struct device *); int (*runtime_resume)(struct device *); int (*runtime_idle)(struct device *); } ; 320 enum rpm_status { RPM_ACTIVE = 0, RPM_RESUMING = 1, RPM_SUSPENDED = 2, RPM_SUSPENDING = 3 } ; 327 enum rpm_request { RPM_REQ_NONE = 0, RPM_REQ_IDLE = 1, RPM_REQ_SUSPEND = 2, RPM_REQ_AUTOSUSPEND = 3, RPM_REQ_RESUME = 4 } ; 335 struct wakeup_source ; 546 struct pm_subsys_data { spinlock_t lock; unsigned int refcount; struct list_head clock_list; } ; 553 struct dev_pm_qos ; 553 struct dev_pm_info { pm_message_t power_state; unsigned char can_wakeup; unsigned char async_suspend; bool is_prepared; bool is_suspended; bool is_noirq_suspended; bool is_late_suspended; bool ignore_children; bool early_init; bool direct_complete; spinlock_t lock; struct list_head entry; struct completion completion; struct wakeup_source *wakeup; bool wakeup_path; bool syscore; struct timer_list suspend_timer; unsigned long timer_expires; struct work_struct work; wait_queue_head_t wait_queue; atomic_t usage_count; atomic_t child_count; unsigned char disable_depth; unsigned char idle_notification; unsigned char request_pending; unsigned char deferred_resume; unsigned char run_wake; unsigned char runtime_auto; unsigned char no_callbacks; unsigned char irq_safe; unsigned char use_autosuspend; unsigned char timer_autosuspends; unsigned char memalloc_noio; enum rpm_request request; enum rpm_status runtime_status; int runtime_error; int autosuspend_delay; unsigned long last_busy; unsigned long active_jiffies; unsigned long suspended_jiffies; unsigned long accounting_timestamp; struct pm_subsys_data *subsys_data; void (*set_latency_tolerance)(struct device *, s32 ); struct dev_pm_qos *qos; } ; 614 struct dev_pm_domain { struct dev_pm_ops ops; } ; 22 struct __anonstruct_mm_context_t_101 { void *ldt; int size; unsigned short ia32_compat; struct mutex lock; void *vdso; } ; 22 typedef struct __anonstruct_mm_context_t_101 mm_context_t; 18 struct rb_node { unsigned long __rb_parent_color; struct rb_node *rb_right; struct rb_node *rb_left; } ; 40 struct rb_root { struct rb_node *rb_node; } ; 87 struct vm_area_struct ; 22 struct bio_vec ; 167 struct notifier_block ; 51 struct notifier_block { int (*notifier_call)(struct notifier_block *, unsigned long, void *); struct notifier_block *next; int priority; } ; 63 struct blocking_notifier_head { struct rw_semaphore rwsem; struct notifier_block *head; } ; 906 struct ctl_table ; 835 struct nsproxy ; 836 struct ctl_table_root ; 837 struct ctl_table_header ; 838 struct ctl_dir ; 39 typedef int proc_handler(struct ctl_table *, int, void *, size_t *, loff_t *); 59 struct ctl_table_poll { atomic_t event; wait_queue_head_t wait; } ; 98 struct ctl_table { const char *procname; void *data; int maxlen; umode_t mode; struct ctl_table *child; proc_handler *proc_handler; struct ctl_table_poll *poll; void *extra1; void *extra2; } ; 119 struct ctl_node { struct rb_node node; struct ctl_table_header *header; } ; 124 struct __anonstruct_ldv_14315_129 { struct ctl_table *ctl_table; int used; int count; int nreg; } ; 124 union __anonunion_ldv_14317_128 { struct __anonstruct_ldv_14315_129 ldv_14315; struct callback_head rcu; } ; 124 struct ctl_table_set ; 124 struct ctl_table_header { union __anonunion_ldv_14317_128 ldv_14317; struct completion *unregistering; struct ctl_table *ctl_table_arg; struct ctl_table_root *root; struct ctl_table_set *set; struct ctl_dir *parent; struct ctl_node *node; } ; 145 struct ctl_dir { struct ctl_table_header header; struct rb_root root; } ; 151 struct ctl_table_set { int (*is_seen)(struct ctl_table_set *); struct ctl_dir dir; } ; 156 struct ctl_table_root { struct ctl_table_set default_set; struct ctl_table_set * (*lookup)(struct ctl_table_root *, struct nsproxy *); int (*permissions)(struct ctl_table_header *, struct ctl_table *); } ; 37 struct cred ; 24 struct inode ; 58 struct arch_uprobe_task { unsigned long saved_scratch_register; unsigned int saved_trap_nr; unsigned int saved_tf; } ; 66 enum uprobe_task_state { UTASK_RUNNING = 0, UTASK_SSTEP = 1, UTASK_SSTEP_ACK = 2, UTASK_SSTEP_TRAPPED = 3 } ; 73 struct __anonstruct_ldv_14561_136 { struct arch_uprobe_task autask; unsigned long vaddr; } ; 73 struct __anonstruct_ldv_14565_137 { struct callback_head dup_xol_work; unsigned long dup_xol_addr; } ; 73 union __anonunion_ldv_14566_135 { struct __anonstruct_ldv_14561_136 ldv_14561; struct __anonstruct_ldv_14565_137 ldv_14565; } ; 73 struct uprobe ; 73 struct return_instance ; 73 struct uprobe_task { enum uprobe_task_state state; union __anonunion_ldv_14566_135 ldv_14566; struct uprobe *active_uprobe; unsigned long xol_vaddr; struct return_instance *return_instances; unsigned int depth; } ; 94 struct xol_area ; 95 struct uprobes_state { struct xol_area *xol_area; } ; 133 struct address_space ; 134 union __anonunion_ldv_14675_138 { struct address_space *mapping; void *s_mem; } ; 134 union __anonunion_ldv_14681_140 { unsigned long index; void *freelist; bool pfmemalloc; } ; 134 struct __anonstruct_ldv_14691_144 { unsigned short inuse; unsigned short objects; unsigned char frozen; } ; 134 union __anonunion_ldv_14693_143 { atomic_t _mapcount; struct __anonstruct_ldv_14691_144 ldv_14691; int units; } ; 134 struct __anonstruct_ldv_14695_142 { union __anonunion_ldv_14693_143 ldv_14693; atomic_t _count; } ; 134 union __anonunion_ldv_14697_141 { unsigned long counters; struct __anonstruct_ldv_14695_142 ldv_14695; unsigned int active; } ; 134 struct __anonstruct_ldv_14698_139 { union __anonunion_ldv_14681_140 ldv_14681; union __anonunion_ldv_14697_141 ldv_14697; } ; 134 struct __anonstruct_ldv_14705_146 { struct page *next; int pages; int pobjects; } ; 134 struct slab ; 134 union __anonunion_ldv_14710_145 { struct list_head lru; struct __anonstruct_ldv_14705_146 ldv_14705; struct slab *slab_page; struct callback_head callback_head; pgtable_t pmd_huge_pte; } ; 134 union __anonunion_ldv_14716_147 { unsigned long private; spinlock_t *ptl; struct kmem_cache *slab_cache; struct page *first_page; } ; 134 struct page { unsigned long flags; union __anonunion_ldv_14675_138 ldv_14675; struct __anonstruct_ldv_14698_139 ldv_14698; union __anonunion_ldv_14710_145 ldv_14710; union __anonunion_ldv_14716_147 ldv_14716; unsigned long debug_flags; } ; 187 struct page_frag { struct page *page; __u32 offset; __u32 size; } ; 239 struct __anonstruct_linear_149 { struct rb_node rb; unsigned long rb_subtree_last; } ; 239 union __anonunion_shared_148 { struct __anonstruct_linear_149 linear; struct list_head nonlinear; } ; 239 struct anon_vma ; 239 struct vm_operations_struct ; 239 struct mempolicy ; 239 struct vm_area_struct { unsigned long vm_start; unsigned long vm_end; struct vm_area_struct *vm_next; struct vm_area_struct *vm_prev; struct rb_node vm_rb; unsigned long rb_subtree_gap; struct mm_struct *vm_mm; pgprot_t vm_page_prot; unsigned long vm_flags; union __anonunion_shared_148 shared; struct list_head anon_vma_chain; struct anon_vma *anon_vma; const struct vm_operations_struct *vm_ops; unsigned long vm_pgoff; struct file *vm_file; void *vm_private_data; struct mempolicy *vm_policy; } ; 311 struct core_thread { struct task_struct *task; struct core_thread *next; } ; 317 struct core_state { atomic_t nr_threads; struct core_thread dumper; struct completion startup; } ; 330 struct task_rss_stat { int events; int count[3U]; } ; 338 struct mm_rss_stat { atomic_long_t count[3U]; } ; 343 struct kioctx_table ; 344 struct linux_binfmt ; 344 struct mmu_notifier_mm ; 344 struct mm_struct { struct vm_area_struct *mmap; struct rb_root mm_rb; u32 vmacache_seqnum; unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); unsigned long mmap_base; unsigned long mmap_legacy_base; unsigned long task_size; unsigned long highest_vm_end; pgd_t *pgd; atomic_t mm_users; atomic_t mm_count; atomic_long_t nr_ptes; int map_count; spinlock_t page_table_lock; struct rw_semaphore mmap_sem; struct list_head mmlist; unsigned long hiwater_rss; unsigned long hiwater_vm; unsigned long total_vm; unsigned long locked_vm; unsigned long pinned_vm; unsigned long shared_vm; unsigned long exec_vm; unsigned long stack_vm; unsigned long def_flags; unsigned long start_code; unsigned long end_code; unsigned long start_data; unsigned long end_data; unsigned long start_brk; unsigned long brk; unsigned long start_stack; unsigned long arg_start; unsigned long arg_end; unsigned long env_start; unsigned long env_end; unsigned long saved_auxv[46U]; struct mm_rss_stat rss_stat; struct linux_binfmt *binfmt; cpumask_var_t cpu_vm_mask_var; mm_context_t context; unsigned long flags; struct core_state *core_state; spinlock_t ioctx_lock; struct kioctx_table *ioctx_table; struct task_struct *owner; struct file *exe_file; struct mmu_notifier_mm *mmu_notifier_mm; struct cpumask cpumask_allocation; unsigned long numa_next_scan; unsigned long numa_scan_offset; int numa_scan_seq; bool tlb_flush_pending; struct uprobes_state uprobes_state; } ; 15 typedef __u64 Elf64_Addr; 16 typedef __u16 Elf64_Half; 20 typedef __u32 Elf64_Word; 21 typedef __u64 Elf64_Xword; 190 struct elf64_sym { Elf64_Word st_name; unsigned char st_info; unsigned char st_other; Elf64_Half st_shndx; Elf64_Addr st_value; Elf64_Xword st_size; } ; 198 typedef struct elf64_sym Elf64_Sym; 48 union __anonunion_ldv_15079_153 { unsigned long bitmap[4U]; struct callback_head callback_head; } ; 48 struct idr_layer { int prefix; int layer; struct idr_layer *ary[256U]; int count; union __anonunion_ldv_15079_153 ldv_15079; } ; 41 struct idr { struct idr_layer *hint; struct idr_layer *top; int layers; int cur; spinlock_t lock; int id_free_cnt; struct idr_layer *id_free; } ; 124 struct ida_bitmap { long nr_busy; unsigned long bitmap[15U]; } ; 153 struct ida { struct idr idr; struct ida_bitmap *free_bitmap; } ; 185 struct dentry ; 186 struct iattr ; 187 struct super_block ; 188 struct file_system_type ; 189 struct kernfs_open_node ; 190 struct kernfs_iattrs ; 213 struct kernfs_root ; 213 struct kernfs_elem_dir { unsigned long subdirs; struct rb_root children; struct kernfs_root *root; } ; 85 struct kernfs_node ; 85 struct kernfs_elem_symlink { struct kernfs_node *target_kn; } ; 89 struct kernfs_ops ; 89 struct kernfs_elem_attr { const struct kernfs_ops *ops; struct kernfs_open_node *open; loff_t size; } ; 95 union __anonunion_ldv_15223_154 { struct kernfs_elem_dir dir; struct kernfs_elem_symlink symlink; struct kernfs_elem_attr attr; } ; 95 struct kernfs_node { atomic_t count; atomic_t active; struct lockdep_map dep_map; struct kernfs_node *parent; const char *name; struct rb_node rb; const void *ns; unsigned int hash; union __anonunion_ldv_15223_154 ldv_15223; void *priv; unsigned short flags; umode_t mode; unsigned int ino; struct kernfs_iattrs *iattr; } ; 137 struct kernfs_syscall_ops { int (*remount_fs)(struct kernfs_root *, int *, char *); int (*show_options)(struct seq_file *, struct kernfs_root *); int (*mkdir)(struct kernfs_node *, const char *, umode_t ); int (*rmdir)(struct kernfs_node *); int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *); } ; 154 struct kernfs_root { struct kernfs_node *kn; unsigned int flags; struct ida ino_ida; struct kernfs_syscall_ops *syscall_ops; struct list_head supers; wait_queue_head_t deactivate_waitq; } ; 170 struct kernfs_open_file { struct kernfs_node *kn; struct file *file; void *priv; struct mutex mutex; int event; struct list_head list; size_t atomic_write_len; bool mmapped; const struct vm_operations_struct *vm_ops; } ; 186 struct kernfs_ops { int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); ssize_t (*read)(struct kernfs_open_file *, char *, size_t , loff_t ); size_t atomic_write_len; ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *); struct lock_class_key lockdep_key; } ; 462 struct sock ; 463 struct kobject ; 464 enum kobj_ns_type { KOBJ_NS_TYPE_NONE = 0, KOBJ_NS_TYPE_NET = 1, KOBJ_NS_TYPES = 2 } ; 470 struct kobj_ns_type_operations { enum kobj_ns_type type; bool (*current_may_mount)(); void * (*grab_current_ns)(); const void * (*netlink_ns)(struct sock *); const void * (*initial_ns)(); void (*drop_ns)(void *); } ; 59 struct bin_attribute ; 60 struct attribute { const char *name; umode_t mode; bool ignore_lockdep; struct lock_class_key *key; struct lock_class_key skey; } ; 37 struct attribute_group { const char *name; umode_t (*is_visible)(struct kobject *, struct attribute *, int); struct attribute **attrs; struct bin_attribute **bin_attrs; } ; 67 struct bin_attribute { struct attribute attr; size_t size; void *private; ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); int (*mmap)(struct file *, struct kobject *, struct bin_attribute *, struct vm_area_struct *); } ; 131 struct sysfs_ops { ssize_t (*show)(struct kobject *, struct attribute *, char *); ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t ); } ; 470 struct kref { atomic_t refcount; } ; 52 struct kset ; 52 struct kobj_type ; 52 struct kobject { const char *name; struct list_head entry; struct kobject *parent; struct kset *kset; struct kobj_type *ktype; struct kernfs_node *sd; struct kref kref; struct delayed_work release; unsigned char state_initialized; unsigned char state_in_sysfs; unsigned char state_add_uevent_sent; unsigned char state_remove_uevent_sent; unsigned char uevent_suppress; } ; 114 struct kobj_type { void (*release)(struct kobject *); const struct sysfs_ops *sysfs_ops; struct attribute **default_attrs; const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject *); const void * (*namespace)(struct kobject *); } ; 122 struct kobj_uevent_env { char *argv[3U]; char *envp[32U]; int envp_idx; char buf[2048U]; int buflen; } ; 130 struct kset_uevent_ops { const int (*filter)(struct kset *, struct kobject *); const const char * (*name)(struct kset *, struct kobject *); const int (*uevent)(struct kset *, struct kobject *, struct kobj_uevent_env *); } ; 147 struct kset { struct list_head list; spinlock_t list_lock; struct kobject kobj; const struct kset_uevent_ops *uevent_ops; } ; 222 struct kernel_param ; 227 struct kernel_param_ops { unsigned int flags; int (*set)(const char *, const struct kernel_param *); int (*get)(char *, const struct kernel_param *); void (*free)(void *); } ; 58 struct kparam_string ; 58 struct kparam_array ; 58 union __anonunion_ldv_15898_155 { void *arg; const struct kparam_string *str; const struct kparam_array *arr; } ; 58 struct kernel_param { const char *name; const struct kernel_param_ops *ops; u16 perm; s16 level; union __anonunion_ldv_15898_155 ldv_15898; } ; 70 struct kparam_string { unsigned int maxlen; char *string; } ; 76 struct kparam_array { unsigned int max; unsigned int elemsize; unsigned int *num; const struct kernel_param_ops *ops; void *elem; } ; 461 struct mod_arch_specific { } ; 36 struct module_param_attrs ; 36 struct module_kobject { struct kobject kobj; struct module *mod; struct kobject *drivers_dir; struct module_param_attrs *mp; struct completion *kobj_completion; } ; 46 struct module_attribute { struct attribute attr; ssize_t (*show)(struct module_attribute *, struct module_kobject *, char *); ssize_t (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t ); void (*setup)(struct module *, const char *); int (*test)(struct module *); void (*free)(struct module *); } ; 72 struct exception_table_entry ; 205 enum module_state { MODULE_STATE_LIVE = 0, MODULE_STATE_COMING = 1, MODULE_STATE_GOING = 2, MODULE_STATE_UNFORMED = 3 } ; 212 struct module_ref { unsigned long incs; unsigned long decs; } ; 226 struct module_sect_attrs ; 226 struct module_notes_attrs ; 226 struct tracepoint ; 226 struct ftrace_event_call ; 226 struct module { enum module_state state; struct list_head list; char name[56U]; struct module_kobject mkobj; struct module_attribute *modinfo_attrs; const char *version; const char *srcversion; struct kobject *holders_dir; const struct kernel_symbol *syms; const unsigned long *crcs; unsigned int num_syms; struct kernel_param *kp; unsigned int num_kp; unsigned int num_gpl_syms; const struct kernel_symbol *gpl_syms; const unsigned long *gpl_crcs; const struct kernel_symbol *unused_syms; const unsigned long *unused_crcs; unsigned int num_unused_syms; unsigned int num_unused_gpl_syms; const struct kernel_symbol *unused_gpl_syms; const unsigned long *unused_gpl_crcs; bool sig_ok; const struct kernel_symbol *gpl_future_syms; const unsigned long *gpl_future_crcs; unsigned int num_gpl_future_syms; unsigned int num_exentries; struct exception_table_entry *extable; int (*init)(); void *module_init; void *module_core; unsigned int init_size; unsigned int core_size; unsigned int init_text_size; unsigned int core_text_size; unsigned int init_ro_size; unsigned int core_ro_size; struct mod_arch_specific arch; unsigned int taints; unsigned int num_bugs; struct list_head bug_list; struct bug_entry *bug_table; Elf64_Sym *symtab; Elf64_Sym *core_symtab; unsigned int num_symtab; unsigned int core_num_syms; char *strtab; char *core_strtab; struct module_sect_attrs *sect_attrs; struct module_notes_attrs *notes_attrs; char *args; void *percpu; unsigned int percpu_size; unsigned int num_tracepoints; const struct tracepoint **tracepoints_ptrs; unsigned int num_trace_bprintk_fmt; const char **trace_bprintk_fmt_start; struct ftrace_event_call **trace_events; unsigned int num_trace_events; struct list_head source_list; struct list_head target_list; void (*exit)(); struct module_ref *refptr; ctor_fn_t (**ctors)(); unsigned int num_ctors; } ; 217 struct iovec { void *iov_base; __kernel_size_t iov_len; } ; 27 union __anonunion_ldv_16475_156 { const struct iovec *iov; const struct bio_vec *bvec; } ; 27 struct iov_iter { int type; size_t iov_offset; size_t count; union __anonunion_ldv_16475_156 ldv_16475; unsigned long nr_segs; } ; 11 typedef unsigned short __kernel_sa_family_t; 23 typedef __kernel_sa_family_t sa_family_t; 24 struct sockaddr { sa_family_t sa_family; char sa_data[14U]; } ; 54 struct poll_table_struct ; 55 struct pipe_inode_info ; 56 struct net ; 73 struct fasync_struct ; 120 struct kiocb ; 57 struct mem_cgroup ; 368 struct kmem_cache_cpu { void **freelist; unsigned long tid; struct page *page; struct page *partial; unsigned int stat[26U]; } ; 48 struct kmem_cache_order_objects { unsigned long x; } ; 58 struct memcg_cache_params ; 58 struct kmem_cache_node ; 58 struct kmem_cache { struct kmem_cache_cpu *cpu_slab; unsigned long flags; unsigned long min_partial; int size; int object_size; int offset; int cpu_partial; struct kmem_cache_order_objects oo; struct kmem_cache_order_objects max; struct kmem_cache_order_objects min; gfp_t allocflags; int refcount; void (*ctor)(void *); int inuse; int align; int reserved; const char *name; struct list_head list; struct kobject kobj; struct memcg_cache_params *memcg_params; int max_attr_size; struct kset *memcg_kset; int remote_node_defrag_ratio; struct kmem_cache_node *node[1024U]; } ; 501 struct __anonstruct_ldv_17129_158 { struct callback_head callback_head; struct kmem_cache *memcg_caches[0U]; } ; 501 struct __anonstruct_ldv_17135_159 { struct mem_cgroup *memcg; struct list_head list; struct kmem_cache *root_cache; atomic_t nr_pages; } ; 501 union __anonunion_ldv_17136_157 { struct __anonstruct_ldv_17129_158 ldv_17129; struct __anonstruct_ldv_17135_159 ldv_17135; } ; 501 struct memcg_cache_params { bool is_root_cache; union __anonunion_ldv_17136_157 ldv_17136; } ; 62 struct exception_table_entry { int insn; int fixup; } ; 140 struct sk_buff ; 155 struct klist_node ; 37 struct klist_node { void *n_klist; struct list_head n_node; struct kref n_ref; } ; 67 struct path ; 68 struct seq_file { char *buf; size_t size; size_t from; size_t count; size_t pad_until; loff_t index; loff_t read_pos; u64 version; struct mutex lock; const struct seq_operations *op; int poll_event; struct user_namespace *user_ns; void *private; } ; 35 struct seq_operations { void * (*start)(struct seq_file *, loff_t *); void (*stop)(struct seq_file *, void *); void * (*next)(struct seq_file *, void *, loff_t *); int (*show)(struct seq_file *, void *); } ; 196 struct pinctrl ; 197 struct pinctrl_state ; 194 struct dev_pin_info { struct pinctrl *p; struct pinctrl_state *default_state; struct pinctrl_state *sleep_state; struct pinctrl_state *idle_state; } ; 42 struct dma_map_ops ; 42 struct dev_archdata { struct dma_map_ops *dma_ops; void *iommu; } ; 11 struct pdev_archdata { } ; 14 struct device_private ; 15 struct device_driver ; 16 struct driver_private ; 17 struct class ; 18 struct subsys_private ; 19 struct bus_type ; 20 struct device_node ; 21 struct iommu_ops ; 22 struct iommu_group ; 60 struct device_attribute ; 60 struct bus_type { const char *name; const char *dev_name; struct device *dev_root; struct device_attribute *dev_attrs; const struct attribute_group **bus_groups; const struct attribute_group **dev_groups; const struct attribute_group **drv_groups; int (*match)(struct device *, struct device_driver *); int (*uevent)(struct device *, struct kobj_uevent_env *); int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*online)(struct device *); int (*offline)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct dev_pm_ops *pm; struct iommu_ops *iommu_ops; struct subsys_private *p; struct lock_class_key lock_key; } ; 138 struct device_type ; 195 struct of_device_id ; 195 struct acpi_device_id ; 195 struct device_driver { const char *name; struct bus_type *bus; struct module *owner; const char *mod_name; bool suppress_bind_attrs; const struct of_device_id *of_match_table; const struct acpi_device_id *acpi_match_table; int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct attribute_group **groups; const struct dev_pm_ops *pm; struct driver_private *p; } ; 321 struct class_attribute ; 321 struct class { const char *name; struct module *owner; struct class_attribute *class_attrs; const struct attribute_group **dev_groups; struct kobject *dev_kobj; int (*dev_uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *); void (*class_release)(struct class *); void (*dev_release)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct kobj_ns_type_operations *ns_type; const void * (*namespace)(struct device *); const struct dev_pm_ops *pm; struct subsys_private *p; } ; 414 struct class_attribute { struct attribute attr; ssize_t (*show)(struct class *, struct class_attribute *, char *); ssize_t (*store)(struct class *, struct class_attribute *, const char *, size_t ); } ; 482 struct device_type { const char *name; const struct attribute_group **groups; int (*uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *, kuid_t *, kgid_t *); void (*release)(struct device *); const struct dev_pm_ops *pm; } ; 510 struct device_attribute { struct attribute attr; ssize_t (*show)(struct device *, struct device_attribute *, char *); ssize_t (*store)(struct device *, struct device_attribute *, const char *, size_t ); } ; 640 struct device_dma_parameters { unsigned int max_segment_size; unsigned long segment_boundary_mask; } ; 649 struct acpi_device ; 650 struct acpi_dev_node { struct acpi_device *companion; } ; 656 struct dma_coherent_mem ; 656 struct cma ; 656 struct device { struct device *parent; struct device_private *p; struct kobject kobj; const char *init_name; const struct device_type *type; struct mutex mutex; struct bus_type *bus; struct device_driver *driver; void *platform_data; void *driver_data; struct dev_pm_info power; struct dev_pm_domain *pm_domain; struct dev_pin_info *pins; int numa_node; u64 *dma_mask; u64 coherent_dma_mask; unsigned long dma_pfn_offset; struct device_dma_parameters *dma_parms; struct list_head dma_pools; struct dma_coherent_mem *dma_mem; struct cma *cma_area; struct dev_archdata archdata; struct device_node *of_node; struct acpi_dev_node acpi_node; dev_t devt; u32 id; spinlock_t devres_lock; struct list_head devres_head; struct klist_node knode_class; struct class *class; const struct attribute_group **groups; void (*release)(struct device *); struct iommu_group *iommu_group; bool offline_disabled; bool offline; } ; 803 struct wakeup_source { const char *name; struct list_head entry; spinlock_t lock; struct timer_list timer; unsigned long timer_expires; ktime_t total_time; ktime_t max_time; ktime_t last_time; ktime_t start_prevent_time; ktime_t prevent_sleep_time; unsigned long event_count; unsigned long active_count; unsigned long relax_count; unsigned long expire_count; unsigned long wakeup_count; bool active; bool autosleep_enabled; } ; 93 struct shrink_control { gfp_t gfp_mask; unsigned long nr_to_scan; nodemask_t nodes_to_scan; int nid; } ; 26 struct shrinker { unsigned long int (*count_objects)(struct shrinker *, struct shrink_control *); unsigned long int (*scan_objects)(struct shrinker *, struct shrink_control *); int seeks; long batch; unsigned long flags; struct list_head list; atomic_long_t *nr_deferred; } ; 71 struct file_ra_state ; 72 struct user_struct ; 73 struct writeback_control ; 188 struct vm_fault { unsigned int flags; unsigned long pgoff; void *virtual_address; struct page *page; unsigned long max_pgoff; pte_t *pte; } ; 221 struct vm_operations_struct { void (*open)(struct vm_area_struct *); void (*close)(struct vm_area_struct *); int (*fault)(struct vm_area_struct *, struct vm_fault *); void (*map_pages)(struct vm_area_struct *, struct vm_fault *); int (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *); int (*access)(struct vm_area_struct *, unsigned long, void *, int, int); const char * (*name)(struct vm_area_struct *); int (*set_policy)(struct vm_area_struct *, struct mempolicy *); struct mempolicy * (*get_policy)(struct vm_area_struct *, unsigned long); int (*migrate)(struct vm_area_struct *, const nodemask_t *, const nodemask_t *, unsigned long); int (*remap_pages)(struct vm_area_struct *, unsigned long, unsigned long, unsigned long); } ; 2112 struct scatterlist { unsigned long sg_magic; unsigned long page_link; unsigned int offset; unsigned int length; dma_addr_t dma_address; unsigned int dma_length; } ; 17 struct sg_table { struct scatterlist *sgl; unsigned int nents; unsigned int orig_nents; } ; 38 typedef s32 dma_cookie_t; 1153 struct timerqueue_node { struct rb_node node; ktime_t expires; } ; 12 struct timerqueue_head { struct rb_root head; struct timerqueue_node *next; } ; 50 struct hrtimer_clock_base ; 51 struct hrtimer_cpu_base ; 60 enum hrtimer_restart { HRTIMER_NORESTART = 0, HRTIMER_RESTART = 1 } ; 65 struct hrtimer { struct timerqueue_node node; ktime_t _softexpires; enum hrtimer_restart (*function)(struct hrtimer *); struct hrtimer_clock_base *base; unsigned long state; int start_pid; void *start_site; char start_comm[16U]; } ; 132 struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base; int index; clockid_t clockid; struct timerqueue_head active; ktime_t resolution; ktime_t (*get_time)(); ktime_t softirq_time; ktime_t offset; } ; 163 struct hrtimer_cpu_base { raw_spinlock_t lock; unsigned int active_bases; unsigned int clock_was_set; ktime_t expires_next; int hres_active; int hang_detected; unsigned long nr_events; unsigned long nr_retries; unsigned long nr_hangs; ktime_t max_hang_time; struct hrtimer_clock_base clock_base[4U]; } ; 474 struct dma_attrs { unsigned long flags[1U]; } ; 70 enum dma_data_direction { DMA_BIDIRECTIONAL = 0, DMA_TO_DEVICE = 1, DMA_FROM_DEVICE = 2, DMA_NONE = 3 } ; 77 struct dma_map_ops { void * (*alloc)(struct device *, size_t , dma_addr_t *, gfp_t , struct dma_attrs *); void (*free)(struct device *, size_t , void *, dma_addr_t , struct dma_attrs *); int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t , size_t , struct dma_attrs *); int (*get_sgtable)(struct device *, struct sg_table *, void *, dma_addr_t , size_t , struct dma_attrs *); dma_addr_t (*map_page)(struct device *, struct page *, unsigned long, size_t , enum dma_data_direction , struct dma_attrs *); void (*unmap_page)(struct device *, dma_addr_t , size_t , enum dma_data_direction , struct dma_attrs *); int (*map_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , struct dma_attrs *); void (*unmap_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , struct dma_attrs *); void (*sync_single_for_cpu)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_single_for_device)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_sg_for_cpu)(struct device *, struct scatterlist *, int, enum dma_data_direction ); void (*sync_sg_for_device)(struct device *, struct scatterlist *, int, enum dma_data_direction ); int (*mapping_error)(struct device *, dma_addr_t ); int (*dma_supported)(struct device *, u64 ); int (*set_dma_mask)(struct device *, u64 ); int is_phys; } ; 15 typedef u64 netdev_features_t; 22 struct kernel_cap_struct { __u32 cap[2U]; } ; 25 typedef struct kernel_cap_struct kernel_cap_t; 218 struct plist_head { struct list_head node_list; } ; 84 struct plist_node { int prio; struct list_head prio_list; struct list_head node_list; } ; 4 typedef unsigned long cputime_t; 25 struct sem_undo_list ; 25 struct sysv_sem { struct sem_undo_list *undo_list; } ; 24 struct __anonstruct_sigset_t_162 { unsigned long sig[1U]; } ; 24 typedef struct __anonstruct_sigset_t_162 sigset_t; 25 struct siginfo ; 17 typedef void __signalfn_t(int); 18 typedef __signalfn_t *__sighandler_t; 20 typedef void __restorefn_t(); 21 typedef __restorefn_t *__sigrestore_t; 34 union sigval { int sival_int; void *sival_ptr; } ; 10 typedef union sigval sigval_t; 11 struct __anonstruct__kill_164 { __kernel_pid_t _pid; __kernel_uid32_t _uid; } ; 11 struct __anonstruct__timer_165 { __kernel_timer_t _tid; int _overrun; char _pad[0U]; sigval_t _sigval; int _sys_private; } ; 11 struct __anonstruct__rt_166 { __kernel_pid_t _pid; __kernel_uid32_t _uid; sigval_t _sigval; } ; 11 struct __anonstruct__sigchld_167 { __kernel_pid_t _pid; __kernel_uid32_t _uid; int _status; __kernel_clock_t _utime; __kernel_clock_t _stime; } ; 11 struct __anonstruct__sigfault_168 { void *_addr; short _addr_lsb; } ; 11 struct __anonstruct__sigpoll_169 { long _band; int _fd; } ; 11 struct __anonstruct__sigsys_170 { void *_call_addr; int _syscall; unsigned int _arch; } ; 11 union __anonunion__sifields_163 { int _pad[28U]; struct __anonstruct__kill_164 _kill; struct __anonstruct__timer_165 _timer; struct __anonstruct__rt_166 _rt; struct __anonstruct__sigchld_167 _sigchld; struct __anonstruct__sigfault_168 _sigfault; struct __anonstruct__sigpoll_169 _sigpoll; struct __anonstruct__sigsys_170 _sigsys; } ; 11 struct siginfo { int si_signo; int si_errno; int si_code; union __anonunion__sifields_163 _sifields; } ; 109 typedef struct siginfo siginfo_t; 21 struct sigpending { struct list_head list; sigset_t signal; } ; 246 struct sigaction { __sighandler_t sa_handler; unsigned long sa_flags; __sigrestore_t sa_restorer; sigset_t sa_mask; } ; 260 struct k_sigaction { struct sigaction sa; } ; 459 enum pid_type { PIDTYPE_PID = 0, PIDTYPE_PGID = 1, PIDTYPE_SID = 2, PIDTYPE_MAX = 3 } ; 466 struct pid_namespace ; 466 struct upid { int nr; struct pid_namespace *ns; struct hlist_node pid_chain; } ; 56 struct pid { atomic_t count; unsigned int level; struct hlist_head tasks[3U]; struct callback_head rcu; struct upid numbers[1U]; } ; 68 struct pid_link { struct hlist_node node; struct pid *pid; } ; 174 struct percpu_counter { raw_spinlock_t lock; s64 count; struct list_head list; s32 *counters; } ; 46 struct seccomp_filter ; 47 struct seccomp { int mode; struct seccomp_filter *filter; } ; 40 struct rt_mutex_waiter ; 41 struct rlimit { __kernel_ulong_t rlim_cur; __kernel_ulong_t rlim_max; } ; 11 struct task_io_accounting { u64 rchar; u64 wchar; u64 syscr; u64 syscw; u64 read_bytes; u64 write_bytes; u64 cancelled_write_bytes; } ; 45 struct latency_record { unsigned long backtrace[12U]; unsigned int count; unsigned long time; unsigned long max; } ; 39 struct assoc_array_ptr ; 39 struct assoc_array { struct assoc_array_ptr *root; unsigned long nr_leaves_on_tree; } ; 31 typedef int32_t key_serial_t; 34 typedef uint32_t key_perm_t; 35 struct key ; 36 struct signal_struct ; 37 struct key_type ; 41 struct keyring_index_key { struct key_type *type; const char *description; size_t desc_len; } ; 123 union __anonunion_ldv_24417_173 { struct list_head graveyard_link; struct rb_node serial_node; } ; 123 struct key_user ; 123 union __anonunion_ldv_24425_174 { time_t expiry; time_t revoked_at; } ; 123 struct __anonstruct_ldv_24438_176 { struct key_type *type; char *description; } ; 123 union __anonunion_ldv_24439_175 { struct keyring_index_key index_key; struct __anonstruct_ldv_24438_176 ldv_24438; } ; 123 union __anonunion_type_data_177 { struct list_head link; unsigned long x[2U]; void *p[2U]; int reject_error; } ; 123 union __anonunion_payload_179 { unsigned long value; void *rcudata; void *data; void *data2[2U]; } ; 123 union __anonunion_ldv_24454_178 { union __anonunion_payload_179 payload; struct assoc_array keys; } ; 123 struct key { atomic_t usage; key_serial_t serial; union __anonunion_ldv_24417_173 ldv_24417; struct rw_semaphore sem; struct key_user *user; void *security; union __anonunion_ldv_24425_174 ldv_24425; time_t last_used_at; kuid_t uid; kgid_t gid; key_perm_t perm; unsigned short quotalen; unsigned short datalen; unsigned long flags; union __anonunion_ldv_24439_175 ldv_24439; union __anonunion_type_data_177 type_data; union __anonunion_ldv_24454_178 ldv_24454; } ; 356 struct audit_context ; 27 struct group_info { atomic_t usage; int ngroups; int nblocks; kgid_t small_block[32U]; kgid_t *blocks[0U]; } ; 78 struct cred { atomic_t usage; atomic_t subscribers; void *put_addr; unsigned int magic; kuid_t uid; kgid_t gid; kuid_t suid; kgid_t sgid; kuid_t euid; kgid_t egid; kuid_t fsuid; kgid_t fsgid; unsigned int securebits; kernel_cap_t cap_inheritable; kernel_cap_t cap_permitted; kernel_cap_t cap_effective; kernel_cap_t cap_bset; unsigned char jit_keyring; struct key *session_keyring; struct key *process_keyring; struct key *thread_keyring; struct key *request_key_auth; void *security; struct user_struct *user; struct user_namespace *user_ns; struct group_info *group_info; struct callback_head rcu; } ; 125 struct futex_pi_state ; 126 struct robust_list_head ; 127 struct bio_list ; 128 struct fs_struct ; 129 struct perf_event_context ; 130 struct blk_plug ; 180 struct cfs_rq ; 181 struct task_group ; 426 struct sighand_struct { atomic_t count; struct k_sigaction action[64U]; spinlock_t siglock; wait_queue_head_t signalfd_wqh; } ; 465 struct pacct_struct { int ac_flag; long ac_exitcode; unsigned long ac_mem; cputime_t ac_utime; cputime_t ac_stime; unsigned long ac_minflt; unsigned long ac_majflt; } ; 473 struct cpu_itimer { cputime_t expires; cputime_t incr; u32 error; u32 incr_error; } ; 480 struct cputime { cputime_t utime; cputime_t stime; } ; 492 struct task_cputime { cputime_t utime; cputime_t stime; unsigned long long sum_exec_runtime; } ; 512 struct thread_group_cputimer { struct task_cputime cputime; int running; raw_spinlock_t lock; } ; 554 struct autogroup ; 555 struct tty_struct ; 555 struct taskstats ; 555 struct tty_audit_buf ; 555 struct signal_struct { atomic_t sigcnt; atomic_t live; int nr_threads; struct list_head thread_head; wait_queue_head_t wait_chldexit; struct task_struct *curr_target; struct sigpending shared_pending; int group_exit_code; int notify_count; struct task_struct *group_exit_task; int group_stop_count; unsigned int flags; unsigned char is_child_subreaper; unsigned char has_child_subreaper; int posix_timer_id; struct list_head posix_timers; struct hrtimer real_timer; struct pid *leader_pid; ktime_t it_real_incr; struct cpu_itimer it[2U]; struct thread_group_cputimer cputimer; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; struct pid *tty_old_pgrp; int leader; struct tty_struct *tty; struct autogroup *autogroup; cputime_t utime; cputime_t stime; cputime_t cutime; cputime_t cstime; cputime_t gtime; cputime_t cgtime; struct cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; unsigned long cnvcsw; unsigned long cnivcsw; unsigned long min_flt; unsigned long maj_flt; unsigned long cmin_flt; unsigned long cmaj_flt; unsigned long inblock; unsigned long oublock; unsigned long cinblock; unsigned long coublock; unsigned long maxrss; unsigned long cmaxrss; struct task_io_accounting ioac; unsigned long long sum_sched_runtime; struct rlimit rlim[16U]; struct pacct_struct pacct; struct taskstats *stats; unsigned int audit_tty; unsigned int audit_tty_log_passwd; struct tty_audit_buf *tty_audit_buf; struct rw_semaphore group_rwsem; oom_flags_t oom_flags; short oom_score_adj; short oom_score_adj_min; struct mutex cred_guard_mutex; } ; 735 struct user_struct { atomic_t __count; atomic_t processes; atomic_t sigpending; atomic_t inotify_watches; atomic_t inotify_devs; atomic_t fanotify_listeners; atomic_long_t epoll_watches; unsigned long mq_bytes; unsigned long locked_shm; struct key *uid_keyring; struct key *session_keyring; struct hlist_node uidhash_node; kuid_t uid; atomic_long_t locked_vm; } ; 778 struct backing_dev_info ; 779 struct reclaim_state ; 780 struct sched_info { unsigned long pcount; unsigned long long run_delay; unsigned long long last_arrival; unsigned long long last_queued; } ; 794 struct task_delay_info { spinlock_t lock; unsigned int flags; struct timespec blkio_start; struct timespec blkio_end; u64 blkio_delay; u64 swapin_delay; u32 blkio_count; u32 swapin_count; struct timespec freepages_start; struct timespec freepages_end; u64 freepages_delay; u32 freepages_count; } ; 1026 struct io_context ; 1060 struct uts_namespace ; 1061 struct load_weight { unsigned long weight; u32 inv_weight; } ; 1069 struct sched_avg { u32 runnable_avg_sum; u32 runnable_avg_period; u64 last_runnable_update; s64 decay_count; unsigned long load_avg_contrib; } ; 1081 struct sched_statistics { u64 wait_start; u64 wait_max; u64 wait_count; u64 wait_sum; u64 iowait_count; u64 iowait_sum; u64 sleep_start; u64 sleep_max; s64 sum_sleep_runtime; u64 block_start; u64 block_max; u64 exec_max; u64 slice_max; u64 nr_migrations_cold; u64 nr_failed_migrations_affine; u64 nr_failed_migrations_running; u64 nr_failed_migrations_hot; u64 nr_forced_migrations; u64 nr_wakeups; u64 nr_wakeups_sync; u64 nr_wakeups_migrate; u64 nr_wakeups_local; u64 nr_wakeups_remote; u64 nr_wakeups_affine; u64 nr_wakeups_affine_attempts; u64 nr_wakeups_passive; u64 nr_wakeups_idle; } ; 1116 struct sched_entity { struct load_weight load; struct rb_node run_node; struct list_head group_node; unsigned int on_rq; u64 exec_start; u64 sum_exec_runtime; u64 vruntime; u64 prev_sum_exec_runtime; u64 nr_migrations; struct sched_statistics statistics; int depth; struct sched_entity *parent; struct cfs_rq *cfs_rq; struct cfs_rq *my_q; struct sched_avg avg; } ; 1148 struct rt_rq ; 1148 struct sched_rt_entity { struct list_head run_list; unsigned long timeout; unsigned long watchdog_stamp; unsigned int time_slice; struct sched_rt_entity *back; struct sched_rt_entity *parent; struct rt_rq *rt_rq; struct rt_rq *my_q; } ; 1164 struct sched_dl_entity { struct rb_node rb_node; u64 dl_runtime; u64 dl_deadline; u64 dl_period; u64 dl_bw; s64 runtime; u64 deadline; unsigned int flags; int dl_throttled; int dl_new; int dl_boosted; int dl_yielded; struct hrtimer dl_timer; } ; 1222 struct memcg_batch_info { int do_batch; struct mem_cgroup *memcg; unsigned long nr_pages; unsigned long memsw_nr_pages; } ; 1643 struct memcg_oom_info { struct mem_cgroup *memcg; gfp_t gfp_mask; int order; unsigned char may_oom; } ; 1650 struct sched_class ; 1650 struct files_struct ; 1650 struct css_set ; 1650 struct compat_robust_list_head ; 1650 struct numa_group ; 1650 struct task_struct { volatile long state; void *stack; atomic_t usage; unsigned int flags; unsigned int ptrace; struct llist_node wake_entry; int on_cpu; struct task_struct *last_wakee; unsigned long wakee_flips; unsigned long wakee_flip_decay_ts; int wake_cpu; int on_rq; int prio; int static_prio; int normal_prio; unsigned int rt_priority; const struct sched_class *sched_class; struct sched_entity se; struct sched_rt_entity rt; struct task_group *sched_task_group; struct sched_dl_entity dl; struct hlist_head preempt_notifiers; unsigned int policy; int nr_cpus_allowed; cpumask_t cpus_allowed; struct sched_info sched_info; struct list_head tasks; struct plist_node pushable_tasks; struct rb_node pushable_dl_tasks; struct mm_struct *mm; struct mm_struct *active_mm; unsigned char brk_randomized; u32 vmacache_seqnum; struct vm_area_struct *vmacache[4U]; struct task_rss_stat rss_stat; int exit_state; int exit_code; int exit_signal; int pdeath_signal; unsigned int jobctl; unsigned int personality; unsigned char in_execve; unsigned char in_iowait; unsigned char no_new_privs; unsigned char sched_reset_on_fork; unsigned char sched_contributes_to_load; pid_t pid; pid_t tgid; struct task_struct *real_parent; struct task_struct *parent; struct list_head children; struct list_head sibling; struct task_struct *group_leader; struct list_head ptraced; struct list_head ptrace_entry; struct pid_link pids[3U]; struct list_head thread_group; struct list_head thread_node; struct completion *vfork_done; int *set_child_tid; int *clear_child_tid; cputime_t utime; cputime_t stime; cputime_t utimescaled; cputime_t stimescaled; cputime_t gtime; struct cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; struct timespec start_time; struct timespec real_start_time; unsigned long min_flt; unsigned long maj_flt; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; const struct cred *real_cred; const struct cred *cred; char comm[16U]; int link_count; int total_link_count; struct sysv_sem sysvsem; unsigned long last_switch_count; struct thread_struct thread; struct fs_struct *fs; struct files_struct *files; struct nsproxy *nsproxy; struct signal_struct *signal; struct sighand_struct *sighand; sigset_t blocked; sigset_t real_blocked; sigset_t saved_sigmask; struct sigpending pending; unsigned long sas_ss_sp; size_t sas_ss_size; int (*notifier)(void *); void *notifier_data; sigset_t *notifier_mask; struct callback_head *task_works; struct audit_context *audit_context; kuid_t loginuid; unsigned int sessionid; struct seccomp seccomp; u32 parent_exec_id; u32 self_exec_id; spinlock_t alloc_lock; raw_spinlock_t pi_lock; struct rb_root pi_waiters; struct rb_node *pi_waiters_leftmost; struct rt_mutex_waiter *pi_blocked_on; struct task_struct *pi_top_task; struct mutex_waiter *blocked_on; unsigned int irq_events; unsigned long hardirq_enable_ip; unsigned long hardirq_disable_ip; unsigned int hardirq_enable_event; unsigned int hardirq_disable_event; int hardirqs_enabled; int hardirq_context; unsigned long softirq_disable_ip; unsigned long softirq_enable_ip; unsigned int softirq_disable_event; unsigned int softirq_enable_event; int softirqs_enabled; int softirq_context; u64 curr_chain_key; int lockdep_depth; unsigned int lockdep_recursion; struct held_lock held_locks[48U]; gfp_t lockdep_reclaim_gfp; void *journal_info; struct bio_list *bio_list; struct blk_plug *plug; struct reclaim_state *reclaim_state; struct backing_dev_info *backing_dev_info; struct io_context *io_context; unsigned long ptrace_message; siginfo_t *last_siginfo; struct task_io_accounting ioac; u64 acct_rss_mem1; u64 acct_vm_mem1; cputime_t acct_timexpd; nodemask_t mems_allowed; seqcount_t mems_allowed_seq; int cpuset_mem_spread_rotor; int cpuset_slab_spread_rotor; struct css_set *cgroups; struct list_head cg_list; struct robust_list_head *robust_list; struct compat_robust_list_head *compat_robust_list; struct list_head pi_state_list; struct futex_pi_state *pi_state_cache; struct perf_event_context *perf_event_ctxp[2U]; struct mutex perf_event_mutex; struct list_head perf_event_list; struct mempolicy *mempolicy; short il_next; short pref_node_fork; int numa_scan_seq; unsigned int numa_scan_period; unsigned int numa_scan_period_max; int numa_preferred_nid; unsigned long numa_migrate_retry; u64 node_stamp; u64 last_task_numa_placement; u64 last_sum_exec_runtime; struct callback_head numa_work; struct list_head numa_entry; struct numa_group *numa_group; unsigned long *numa_faults_memory; unsigned long total_numa_faults; unsigned long *numa_faults_buffer_memory; unsigned long *numa_faults_cpu; unsigned long *numa_faults_buffer_cpu; unsigned long numa_faults_locality[2U]; unsigned long numa_pages_migrated; struct callback_head rcu; struct pipe_inode_info *splice_pipe; struct page_frag task_frag; struct task_delay_info *delays; int make_it_fail; int nr_dirtied; int nr_dirtied_pause; unsigned long dirty_paused_when; int latency_record_count; struct latency_record latency_record[32U]; unsigned long timer_slack_ns; unsigned long default_timer_slack_ns; unsigned long trace; unsigned long trace_recursion; struct memcg_batch_info memcg_batch; unsigned int memcg_kmem_skip_account; struct memcg_oom_info memcg_oom; struct uprobe_task *utask; unsigned int sequential_io; unsigned int sequential_io_avg; } ; 18 struct nf_conntrack { atomic_t use; } ; 137 struct nf_bridge_info { atomic_t use; unsigned int mask; struct net_device *physindev; struct net_device *physoutdev; unsigned long data[4U]; } ; 147 struct sk_buff_head { struct sk_buff *next; struct sk_buff *prev; __u32 qlen; spinlock_t lock; } ; 360 typedef unsigned int sk_buff_data_t; 361 struct __anonstruct_ldv_26193_184 { u32 stamp_us; u32 stamp_jiffies; } ; 361 union __anonunion_ldv_26194_183 { u64 v64; struct __anonstruct_ldv_26193_184 ldv_26193; } ; 361 struct skb_mstamp { union __anonunion_ldv_26194_183 ldv_26194; } ; 414 union __anonunion_ldv_26213_185 { ktime_t tstamp; struct skb_mstamp skb_mstamp; } ; 414 struct sec_path ; 414 struct __anonstruct_ldv_26229_187 { __u16 csum_start; __u16 csum_offset; } ; 414 union __anonunion_ldv_26230_186 { __wsum csum; struct __anonstruct_ldv_26229_187 ldv_26229; } ; 414 union __anonunion_ldv_26269_188 { unsigned int napi_id; dma_cookie_t dma_cookie; } ; 414 union __anonunion_ldv_26275_189 { __u32 mark; __u32 dropcount; __u32 reserved_tailroom; } ; 414 struct sk_buff { struct sk_buff *next; struct sk_buff *prev; union __anonunion_ldv_26213_185 ldv_26213; struct sock *sk; struct net_device *dev; char cb[48U]; unsigned long _skb_refdst; struct sec_path *sp; unsigned int len; unsigned int data_len; __u16 mac_len; __u16 hdr_len; union __anonunion_ldv_26230_186 ldv_26230; __u32 priority; unsigned char ignore_df; unsigned char cloned; unsigned char ip_summed; unsigned char nohdr; unsigned char nfctinfo; unsigned char pkt_type; unsigned char fclone; unsigned char ipvs_property; unsigned char peeked; unsigned char nf_trace; __be16 protocol; void (*destructor)(struct sk_buff *); struct nf_conntrack *nfct; struct nf_bridge_info *nf_bridge; int skb_iif; __u32 hash; __be16 vlan_proto; __u16 vlan_tci; __u16 tc_index; __u16 tc_verd; __u16 queue_mapping; unsigned char ndisc_nodetype; unsigned char pfmemalloc; unsigned char ooo_okay; unsigned char l4_hash; unsigned char wifi_acked_valid; unsigned char wifi_acked; unsigned char no_fcs; unsigned char head_frag; unsigned char encapsulation; unsigned char encap_hdr_csum; unsigned char csum_valid; unsigned char csum_complete_sw; union __anonunion_ldv_26269_188 ldv_26269; __u32 secmark; union __anonunion_ldv_26275_189 ldv_26275; __be16 inner_protocol; __u16 inner_transport_header; __u16 inner_network_header; __u16 inner_mac_header; __u16 transport_header; __u16 network_header; __u16 mac_header; sk_buff_data_t tail; sk_buff_data_t end; unsigned char *head; unsigned char *data; unsigned int truesize; atomic_t users; } ; 641 struct dst_entry ; 84 struct pm_qos_request { struct plist_node node; int pm_qos_class; struct delayed_work work; } ; 48 struct pm_qos_flags_request { struct list_head node; s32 flags; } ; 53 enum dev_pm_qos_req_type { DEV_PM_QOS_RESUME_LATENCY = 1, DEV_PM_QOS_LATENCY_TOLERANCE = 2, DEV_PM_QOS_FLAGS = 3 } ; 59 union __anonunion_data_190 { struct plist_node pnode; struct pm_qos_flags_request flr; } ; 59 struct dev_pm_qos_request { enum dev_pm_qos_req_type type; union __anonunion_data_190 data; struct device *dev; } ; 68 enum pm_qos_type { PM_QOS_UNITIALIZED = 0, PM_QOS_MAX = 1, PM_QOS_MIN = 2 } ; 74 struct pm_qos_constraints { struct plist_head list; s32 target_value; s32 default_value; s32 no_constraint_value; enum pm_qos_type type; struct blocking_notifier_head *notifiers; } ; 88 struct pm_qos_flags { struct list_head list; s32 effective_flags; } ; 93 struct dev_pm_qos { struct pm_qos_constraints resume_latency; struct pm_qos_constraints latency_tolerance; struct pm_qos_flags flags; struct dev_pm_qos_request *resume_latency_req; struct dev_pm_qos_request *latency_tolerance_req; struct dev_pm_qos_request *flags_req; } ; 54 struct dql { unsigned int num_queued; unsigned int adj_limit; unsigned int last_obj_cnt; unsigned int limit; unsigned int num_completed; unsigned int prev_ovlimit; unsigned int prev_num_queued; unsigned int prev_last_obj_cnt; unsigned int lowest_slack; unsigned long slack_start_time; unsigned int max_limit; unsigned int min_limit; unsigned int slack_hold_time; } ; 43 struct __anonstruct_sync_serial_settings_191 { unsigned int clock_rate; unsigned int clock_type; unsigned short loopback; } ; 43 typedef struct __anonstruct_sync_serial_settings_191 sync_serial_settings; 50 struct __anonstruct_te1_settings_192 { unsigned int clock_rate; unsigned int clock_type; unsigned short loopback; unsigned int slot_map; } ; 50 typedef struct __anonstruct_te1_settings_192 te1_settings; 55 struct __anonstruct_raw_hdlc_proto_193 { unsigned short encoding; unsigned short parity; } ; 55 typedef struct __anonstruct_raw_hdlc_proto_193 raw_hdlc_proto; 65 struct __anonstruct_fr_proto_194 { unsigned int t391; unsigned int t392; unsigned int n391; unsigned int n392; unsigned int n393; unsigned short lmi; unsigned short dce; } ; 65 typedef struct __anonstruct_fr_proto_194 fr_proto; 69 struct __anonstruct_fr_proto_pvc_195 { unsigned int dlci; } ; 69 typedef struct __anonstruct_fr_proto_pvc_195 fr_proto_pvc; 74 struct __anonstruct_fr_proto_pvc_info_196 { unsigned int dlci; char master[16U]; } ; 74 typedef struct __anonstruct_fr_proto_pvc_info_196 fr_proto_pvc_info; 79 struct __anonstruct_cisco_proto_197 { unsigned int interval; unsigned int timeout; } ; 79 typedef struct __anonstruct_cisco_proto_197 cisco_proto; 117 struct ifmap { unsigned long mem_start; unsigned long mem_end; unsigned short base_addr; unsigned char irq; unsigned char dma; unsigned char port; } ; 177 union __anonunion_ifs_ifsu_198 { raw_hdlc_proto *raw_hdlc; cisco_proto *cisco; fr_proto *fr; fr_proto_pvc *fr_pvc; fr_proto_pvc_info *fr_pvc_info; sync_serial_settings *sync; te1_settings *te1; } ; 177 struct if_settings { unsigned int type; unsigned int size; union __anonunion_ifs_ifsu_198 ifs_ifsu; } ; 195 union __anonunion_ifr_ifrn_199 { char ifrn_name[16U]; } ; 195 union __anonunion_ifr_ifru_200 { struct sockaddr ifru_addr; struct sockaddr ifru_dstaddr; struct sockaddr ifru_broadaddr; struct sockaddr ifru_netmask; struct sockaddr ifru_hwaddr; short ifru_flags; int ifru_ivalue; int ifru_mtu; struct ifmap ifru_map; char ifru_slave[16U]; char ifru_newname[16U]; void *ifru_data; struct if_settings ifru_settings; } ; 195 struct ifreq { union __anonunion_ifr_ifrn_199 ifr_ifrn; union __anonunion_ifr_ifru_200 ifr_ifru; } ; 91 struct hlist_bl_node ; 91 struct hlist_bl_head { struct hlist_bl_node *first; } ; 36 struct hlist_bl_node { struct hlist_bl_node *next; struct hlist_bl_node **pprev; } ; 114 struct __anonstruct_ldv_27896_203 { spinlock_t lock; unsigned int count; } ; 114 union __anonunion_ldv_27897_202 { struct __anonstruct_ldv_27896_203 ldv_27896; } ; 114 struct lockref { union __anonunion_ldv_27897_202 ldv_27897; } ; 49 struct nameidata ; 50 struct vfsmount ; 51 struct __anonstruct_ldv_27920_205 { u32 hash; u32 len; } ; 51 union __anonunion_ldv_27922_204 { struct __anonstruct_ldv_27920_205 ldv_27920; u64 hash_len; } ; 51 struct qstr { union __anonunion_ldv_27922_204 ldv_27922; const unsigned char *name; } ; 90 struct dentry_operations ; 90 union __anonunion_d_u_206 { struct list_head d_child; struct callback_head d_rcu; } ; 90 struct dentry { unsigned int d_flags; seqcount_t d_seq; struct hlist_bl_node d_hash; struct dentry *d_parent; struct qstr d_name; struct inode *d_inode; unsigned char d_iname[32U]; struct lockref d_lockref; const struct dentry_operations *d_op; struct super_block *d_sb; unsigned long d_time; void *d_fsdata; struct list_head d_lru; union __anonunion_d_u_206 d_u; struct list_head d_subdirs; struct hlist_node d_alias; } ; 142 struct dentry_operations { int (*d_revalidate)(struct dentry *, unsigned int); int (*d_weak_revalidate)(struct dentry *, unsigned int); int (*d_hash)(const struct dentry *, struct qstr *); int (*d_compare)(const struct dentry *, const struct dentry *, unsigned int, const char *, const struct qstr *); int (*d_delete)(const struct dentry *); void (*d_release)(struct dentry *); void (*d_prune)(struct dentry *); void (*d_iput)(struct dentry *, struct inode *); char * (*d_dname)(struct dentry *, char *, int); struct vfsmount * (*d_automount)(struct path *); int (*d_manage)(struct dentry *, bool ); } ; 477 struct path { struct vfsmount *mnt; struct dentry *dentry; } ; 27 struct list_lru_node { spinlock_t lock; struct list_head list; long nr_items; } ; 30 struct list_lru { struct list_lru_node *node; nodemask_t active_nodes; } ; 58 struct __anonstruct_ldv_28283_208 { struct radix_tree_node *parent; void *private_data; } ; 58 union __anonunion_ldv_28285_207 { struct __anonstruct_ldv_28283_208 ldv_28283; struct callback_head callback_head; } ; 58 struct radix_tree_node { unsigned int path; unsigned int count; union __anonunion_ldv_28285_207 ldv_28285; struct list_head private_list; void *slots[64U]; unsigned long tags[3U][1U]; } ; 105 struct radix_tree_root { unsigned int height; gfp_t gfp_mask; struct radix_tree_node *rnode; } ; 45 struct fiemap_extent { __u64 fe_logical; __u64 fe_physical; __u64 fe_length; __u64 fe_reserved64[2U]; __u32 fe_flags; __u32 fe_reserved[3U]; } ; 38 enum migrate_mode { MIGRATE_ASYNC = 0, MIGRATE_SYNC_LIGHT = 1, MIGRATE_SYNC = 2 } ; 30 struct block_device ; 31 struct cgroup_subsys_state ; 19 struct bio_vec { struct page *bv_page; unsigned int bv_len; unsigned int bv_offset; } ; 59 struct export_operations ; 61 struct kstatfs ; 62 struct swap_info_struct ; 69 struct iattr { unsigned int ia_valid; umode_t ia_mode; kuid_t ia_uid; kgid_t ia_gid; loff_t ia_size; struct timespec ia_atime; struct timespec ia_mtime; struct timespec ia_ctime; struct file *ia_file; } ; 253 struct fs_disk_quota { __s8 d_version; __s8 d_flags; __u16 d_fieldmask; __u32 d_id; __u64 d_blk_hardlimit; __u64 d_blk_softlimit; __u64 d_ino_hardlimit; __u64 d_ino_softlimit; __u64 d_bcount; __u64 d_icount; __s32 d_itimer; __s32 d_btimer; __u16 d_iwarns; __u16 d_bwarns; __s32 d_padding2; __u64 d_rtb_hardlimit; __u64 d_rtb_softlimit; __u64 d_rtbcount; __s32 d_rtbtimer; __u16 d_rtbwarns; __s16 d_padding3; char d_padding4[8U]; } ; 76 struct fs_qfilestat { __u64 qfs_ino; __u64 qfs_nblks; __u32 qfs_nextents; } ; 151 typedef struct fs_qfilestat fs_qfilestat_t; 152 struct fs_quota_stat { __s8 qs_version; __u16 qs_flags; __s8 qs_pad; fs_qfilestat_t qs_uquota; fs_qfilestat_t qs_gquota; __u32 qs_incoredqs; __s32 qs_btimelimit; __s32 qs_itimelimit; __s32 qs_rtbtimelimit; __u16 qs_bwarnlimit; __u16 qs_iwarnlimit; } ; 166 struct fs_qfilestatv { __u64 qfs_ino; __u64 qfs_nblks; __u32 qfs_nextents; __u32 qfs_pad; } ; 196 struct fs_quota_statv { __s8 qs_version; __u8 qs_pad1; __u16 qs_flags; __u32 qs_incoredqs; struct fs_qfilestatv qs_uquota; struct fs_qfilestatv qs_gquota; struct fs_qfilestatv qs_pquota; __s32 qs_btimelimit; __s32 qs_itimelimit; __s32 qs_rtbtimelimit; __u16 qs_bwarnlimit; __u16 qs_iwarnlimit; __u64 qs_pad2[8U]; } ; 212 struct dquot ; 19 typedef __kernel_uid32_t projid_t; 23 struct __anonstruct_kprojid_t_209 { projid_t val; } ; 23 typedef struct __anonstruct_kprojid_t_209 kprojid_t; 119 struct if_dqinfo { __u64 dqi_bgrace; __u64 dqi_igrace; __u32 dqi_flags; __u32 dqi_valid; } ; 152 enum quota_type { USRQUOTA = 0, GRPQUOTA = 1, PRJQUOTA = 2 } ; 60 typedef long long qsize_t; 61 union __anonunion_ldv_28810_210 { kuid_t uid; kgid_t gid; kprojid_t projid; } ; 61 struct kqid { union __anonunion_ldv_28810_210 ldv_28810; enum quota_type type; } ; 178 struct mem_dqblk { qsize_t dqb_bhardlimit; qsize_t dqb_bsoftlimit; qsize_t dqb_curspace; qsize_t dqb_rsvspace; qsize_t dqb_ihardlimit; qsize_t dqb_isoftlimit; qsize_t dqb_curinodes; time_t dqb_btime; time_t dqb_itime; } ; 200 struct quota_format_type ; 201 struct mem_dqinfo { struct quota_format_type *dqi_format; int dqi_fmt_id; struct list_head dqi_dirty_list; unsigned long dqi_flags; unsigned int dqi_bgrace; unsigned int dqi_igrace; qsize_t dqi_maxblimit; qsize_t dqi_maxilimit; void *dqi_priv; } ; 264 struct dquot { struct hlist_node dq_hash; struct list_head dq_inuse; struct list_head dq_free; struct list_head dq_dirty; struct mutex dq_lock; atomic_t dq_count; wait_queue_head_t dq_wait_unused; struct super_block *dq_sb; struct kqid dq_id; loff_t dq_off; unsigned long dq_flags; struct mem_dqblk dq_dqb; } ; 291 struct quota_format_ops { int (*check_quota_file)(struct super_block *, int); int (*read_file_info)(struct super_block *, int); int (*write_file_info)(struct super_block *, int); int (*free_file_info)(struct super_block *, int); int (*read_dqblk)(struct dquot *); int (*commit_dqblk)(struct dquot *); int (*release_dqblk)(struct dquot *); } ; 302 struct dquot_operations { int (*write_dquot)(struct dquot *); struct dquot * (*alloc_dquot)(struct super_block *, int); void (*destroy_dquot)(struct dquot *); int (*acquire_dquot)(struct dquot *); int (*release_dquot)(struct dquot *); int (*mark_dirty)(struct dquot *); int (*write_info)(struct super_block *, int); qsize_t * (*get_reserved_space)(struct inode *); } ; 316 struct quotactl_ops { int (*quota_on)(struct super_block *, int, int, struct path *); int (*quota_on_meta)(struct super_block *, int, int); int (*quota_off)(struct super_block *, int); int (*quota_sync)(struct super_block *, int); int (*get_info)(struct super_block *, int, struct if_dqinfo *); int (*set_info)(struct super_block *, int, struct if_dqinfo *); int (*get_dqblk)(struct super_block *, struct kqid , struct fs_disk_quota *); int (*set_dqblk)(struct super_block *, struct kqid , struct fs_disk_quota *); int (*get_xstate)(struct super_block *, struct fs_quota_stat *); int (*set_xstate)(struct super_block *, unsigned int, int); int (*get_xstatev)(struct super_block *, struct fs_quota_statv *); int (*rm_xquota)(struct super_block *, unsigned int); } ; 334 struct quota_format_type { int qf_fmt_id; const struct quota_format_ops *qf_ops; struct module *qf_owner; struct quota_format_type *qf_next; } ; 380 struct quota_info { unsigned int flags; struct mutex dqio_mutex; struct mutex dqonoff_mutex; struct rw_semaphore dqptr_sem; struct inode *files[2U]; struct mem_dqinfo info[2U]; const struct quota_format_ops *ops[2U]; } ; 323 struct address_space_operations { int (*writepage)(struct page *, struct writeback_control *); int (*readpage)(struct file *, struct page *); int (*writepages)(struct address_space *, struct writeback_control *); int (*set_page_dirty)(struct page *); int (*readpages)(struct file *, struct address_space *, struct list_head *, unsigned int); int (*write_begin)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page **, void **); int (*write_end)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page *, void *); sector_t (*bmap)(struct address_space *, sector_t ); void (*invalidatepage)(struct page *, unsigned int, unsigned int); int (*releasepage)(struct page *, gfp_t ); void (*freepage)(struct page *); ssize_t (*direct_IO)(int, struct kiocb *, struct iov_iter *, loff_t ); int (*get_xip_mem)(struct address_space *, unsigned long, int, void **, unsigned long *); int (*migratepage)(struct address_space *, struct page *, struct page *, enum migrate_mode ); int (*launder_page)(struct page *); int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long); void (*is_dirty_writeback)(struct page *, bool *, bool *); int (*error_remove_page)(struct address_space *, struct page *); int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *); void (*swap_deactivate)(struct file *); } ; 382 struct address_space { struct inode *host; struct radix_tree_root page_tree; spinlock_t tree_lock; unsigned int i_mmap_writable; struct rb_root i_mmap; struct list_head i_mmap_nonlinear; struct mutex i_mmap_mutex; unsigned long nrpages; unsigned long nrshadows; unsigned long writeback_index; const struct address_space_operations *a_ops; unsigned long flags; struct backing_dev_info *backing_dev_info; spinlock_t private_lock; struct list_head private_list; void *private_data; } ; 405 struct request_queue ; 406 struct hd_struct ; 406 struct gendisk ; 406 struct block_device { dev_t bd_dev; int bd_openers; struct inode *bd_inode; struct super_block *bd_super; struct mutex bd_mutex; struct list_head bd_inodes; void *bd_claiming; void *bd_holder; int bd_holders; bool bd_write_holder; struct list_head bd_holder_disks; struct block_device *bd_contains; unsigned int bd_block_size; struct hd_struct *bd_part; unsigned int bd_part_count; int bd_invalidated; struct gendisk *bd_disk; struct request_queue *bd_queue; struct list_head bd_list; unsigned long bd_private; int bd_fsfreeze_count; struct mutex bd_fsfreeze_mutex; } ; 478 struct posix_acl ; 479 struct inode_operations ; 479 union __anonunion_ldv_29224_213 { const unsigned int i_nlink; unsigned int __i_nlink; } ; 479 union __anonunion_ldv_29244_214 { struct hlist_head i_dentry; struct callback_head i_rcu; } ; 479 struct file_lock ; 479 struct cdev ; 479 union __anonunion_ldv_29261_215 { struct pipe_inode_info *i_pipe; struct block_device *i_bdev; struct cdev *i_cdev; } ; 479 struct inode { umode_t i_mode; unsigned short i_opflags; kuid_t i_uid; kgid_t i_gid; unsigned int i_flags; struct posix_acl *i_acl; struct posix_acl *i_default_acl; const struct inode_operations *i_op; struct super_block *i_sb; struct address_space *i_mapping; void *i_security; unsigned long i_ino; union __anonunion_ldv_29224_213 ldv_29224; dev_t i_rdev; loff_t i_size; struct timespec i_atime; struct timespec i_mtime; struct timespec i_ctime; spinlock_t i_lock; unsigned short i_bytes; unsigned int i_blkbits; blkcnt_t i_blocks; unsigned long i_state; struct mutex i_mutex; unsigned long dirtied_when; struct hlist_node i_hash; struct list_head i_wb_list; struct list_head i_lru; struct list_head i_sb_list; union __anonunion_ldv_29244_214 ldv_29244; u64 i_version; atomic_t i_count; atomic_t i_dio_count; atomic_t i_writecount; atomic_t i_readcount; const struct file_operations *i_fop; struct file_lock *i_flock; struct address_space i_data; struct dquot *i_dquot[2U]; struct list_head i_devices; union __anonunion_ldv_29261_215 ldv_29261; __u32 i_generation; __u32 i_fsnotify_mask; struct hlist_head i_fsnotify_marks; void *i_private; } ; 715 struct fown_struct { rwlock_t lock; struct pid *pid; enum pid_type pid_type; kuid_t uid; kuid_t euid; int signum; } ; 723 struct file_ra_state { unsigned long start; unsigned int size; unsigned int async_size; unsigned int ra_pages; unsigned int mmap_miss; loff_t prev_pos; } ; 746 union __anonunion_f_u_216 { struct llist_node fu_llist; struct callback_head fu_rcuhead; } ; 746 struct file { union __anonunion_f_u_216 f_u; struct path f_path; struct inode *f_inode; const struct file_operations *f_op; spinlock_t f_lock; atomic_long_t f_count; unsigned int f_flags; fmode_t f_mode; struct mutex f_pos_lock; loff_t f_pos; struct fown_struct f_owner; const struct cred *f_cred; struct file_ra_state f_ra; u64 f_version; void *f_security; void *private_data; struct list_head f_ep_links; struct list_head f_tfile_llink; struct address_space *f_mapping; } ; 836 typedef struct files_struct *fl_owner_t; 837 struct file_lock_operations { void (*fl_copy_lock)(struct file_lock *, struct file_lock *); void (*fl_release_private)(struct file_lock *); } ; 842 struct lock_manager_operations { int (*lm_compare_owner)(struct file_lock *, struct file_lock *); unsigned long int (*lm_owner_key)(struct file_lock *); void (*lm_notify)(struct file_lock *); int (*lm_grant)(struct file_lock *, struct file_lock *, int); void (*lm_break)(struct file_lock *); int (*lm_change)(struct file_lock **, int); } ; 860 struct nlm_lockowner ; 861 struct nfs_lock_info { u32 state; struct nlm_lockowner *owner; struct list_head list; } ; 14 struct nfs4_lock_state ; 15 struct nfs4_lock_info { struct nfs4_lock_state *owner; } ; 19 struct __anonstruct_afs_218 { struct list_head link; int state; } ; 19 union __anonunion_fl_u_217 { struct nfs_lock_info nfs_fl; struct nfs4_lock_info nfs4_fl; struct __anonstruct_afs_218 afs; } ; 19 struct file_lock { struct file_lock *fl_next; struct hlist_node fl_link; struct list_head fl_block; fl_owner_t fl_owner; unsigned int fl_flags; unsigned char fl_type; unsigned int fl_pid; int fl_link_cpu; struct pid *fl_nspid; wait_queue_head_t fl_wait; struct file *fl_file; loff_t fl_start; loff_t fl_end; struct fasync_struct *fl_fasync; unsigned long fl_break_time; unsigned long fl_downgrade_time; const struct file_lock_operations *fl_ops; const struct lock_manager_operations *fl_lmops; union __anonunion_fl_u_217 fl_u; } ; 963 struct fasync_struct { spinlock_t fa_lock; int magic; int fa_fd; struct fasync_struct *fa_next; struct file *fa_file; struct callback_head fa_rcu; } ; 1157 struct sb_writers { struct percpu_counter counter[3U]; wait_queue_head_t wait; int frozen; wait_queue_head_t wait_unfrozen; struct lockdep_map lock_map[3U]; } ; 1173 struct super_operations ; 1173 struct xattr_handler ; 1173 struct mtd_info ; 1173 struct super_block { struct list_head s_list; dev_t s_dev; unsigned char s_blocksize_bits; unsigned long s_blocksize; loff_t s_maxbytes; struct file_system_type *s_type; const struct super_operations *s_op; const struct dquot_operations *dq_op; const struct quotactl_ops *s_qcop; const struct export_operations *s_export_op; unsigned long s_flags; unsigned long s_magic; struct dentry *s_root; struct rw_semaphore s_umount; int s_count; atomic_t s_active; void *s_security; const struct xattr_handler **s_xattr; struct list_head s_inodes; struct hlist_bl_head s_anon; struct list_head s_mounts; struct block_device *s_bdev; struct backing_dev_info *s_bdi; struct mtd_info *s_mtd; struct hlist_node s_instances; struct quota_info s_dquot; struct sb_writers s_writers; char s_id[32U]; u8 s_uuid[16U]; void *s_fs_info; unsigned int s_max_links; fmode_t s_mode; u32 s_time_gran; struct mutex s_vfs_rename_mutex; char *s_subtype; char *s_options; const struct dentry_operations *s_d_op; int cleancache_poolid; struct shrinker s_shrink; atomic_long_t s_remove_count; int s_readonly_remount; struct workqueue_struct *s_dio_done_wq; struct list_lru s_dentry_lru; struct list_lru s_inode_lru; struct callback_head rcu; } ; 1403 struct fiemap_extent_info { unsigned int fi_flags; unsigned int fi_extents_mapped; unsigned int fi_extents_max; struct fiemap_extent *fi_extents_start; } ; 1441 struct dir_context { int (*actor)(void *, const char *, int, loff_t , u64 , unsigned int); loff_t pos; } ; 1446 struct file_operations { struct module *owner; loff_t (*llseek)(struct file *, loff_t , int); ssize_t (*read)(struct file *, char *, size_t , loff_t *); ssize_t (*write)(struct file *, const char *, size_t , loff_t *); ssize_t (*aio_read)(struct kiocb *, const struct iovec *, unsigned long, loff_t ); ssize_t (*aio_write)(struct kiocb *, const struct iovec *, unsigned long, loff_t ); ssize_t (*read_iter)(struct kiocb *, struct iov_iter *); ssize_t (*write_iter)(struct kiocb *, struct iov_iter *); int (*iterate)(struct file *, struct dir_context *); unsigned int (*poll)(struct file *, struct poll_table_struct *); long int (*unlocked_ioctl)(struct file *, unsigned int, unsigned long); long int (*compat_ioctl)(struct file *, unsigned int, unsigned long); int (*mmap)(struct file *, struct vm_area_struct *); int (*open)(struct inode *, struct file *); int (*flush)(struct file *, fl_owner_t ); int (*release)(struct inode *, struct file *); int (*fsync)(struct file *, loff_t , loff_t , int); int (*aio_fsync)(struct kiocb *, int); int (*fasync)(int, struct file *, int); int (*lock)(struct file *, int, struct file_lock *); ssize_t (*sendpage)(struct file *, struct page *, int, size_t , loff_t *, int); unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); int (*check_flags)(int); int (*flock)(struct file *, int, struct file_lock *); ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t , unsigned int); ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t , unsigned int); int (*setlease)(struct file *, long, struct file_lock **); long int (*fallocate)(struct file *, int, loff_t , loff_t ); int (*show_fdinfo)(struct seq_file *, struct file *); } ; 1488 struct inode_operations { struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int); void * (*follow_link)(struct dentry *, struct nameidata *); int (*permission)(struct inode *, int); struct posix_acl * (*get_acl)(struct inode *, int); int (*readlink)(struct dentry *, char *, int); void (*put_link)(struct dentry *, struct nameidata *, void *); int (*create)(struct inode *, struct dentry *, umode_t , bool ); int (*link)(struct dentry *, struct inode *, struct dentry *); int (*unlink)(struct inode *, struct dentry *); int (*symlink)(struct inode *, struct dentry *, const char *); int (*mkdir)(struct inode *, struct dentry *, umode_t ); int (*rmdir)(struct inode *, struct dentry *); int (*mknod)(struct inode *, struct dentry *, umode_t , dev_t ); int (*rename)(struct inode *, struct dentry *, struct inode *, struct dentry *); int (*rename2)(struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); int (*setattr)(struct dentry *, struct iattr *); int (*getattr)(struct vfsmount *, struct dentry *, struct kstat *); int (*setxattr)(struct dentry *, const char *, const void *, size_t , int); ssize_t (*getxattr)(struct dentry *, const char *, void *, size_t ); ssize_t (*listxattr)(struct dentry *, char *, size_t ); int (*removexattr)(struct dentry *, const char *); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 , u64 ); int (*update_time)(struct inode *, struct timespec *, int); int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t , int *); int (*tmpfile)(struct inode *, struct dentry *, umode_t ); int (*set_acl)(struct inode *, struct posix_acl *, int); } ; 1535 struct super_operations { struct inode * (*alloc_inode)(struct super_block *); void (*destroy_inode)(struct inode *); void (*dirty_inode)(struct inode *, int); int (*write_inode)(struct inode *, struct writeback_control *); int (*drop_inode)(struct inode *); void (*evict_inode)(struct inode *); void (*put_super)(struct super_block *); int (*sync_fs)(struct super_block *, int); int (*freeze_fs)(struct super_block *); int (*unfreeze_fs)(struct super_block *); int (*statfs)(struct dentry *, struct kstatfs *); int (*remount_fs)(struct super_block *, int *, char *); void (*umount_begin)(struct super_block *); int (*show_options)(struct seq_file *, struct dentry *); int (*show_devname)(struct seq_file *, struct dentry *); int (*show_path)(struct seq_file *, struct dentry *); int (*show_stats)(struct seq_file *, struct dentry *); ssize_t (*quota_read)(struct super_block *, int, char *, size_t , loff_t ); ssize_t (*quota_write)(struct super_block *, int, const char *, size_t , loff_t ); int (*bdev_try_to_free_page)(struct super_block *, struct page *, gfp_t ); long int (*nr_cached_objects)(struct super_block *, int); long int (*free_cached_objects)(struct super_block *, long, int); } ; 1749 struct file_system_type { const char *name; int fs_flags; struct dentry * (*mount)(struct file_system_type *, int, const char *, void *); void (*kill_sb)(struct super_block *); struct module *owner; struct file_system_type *next; struct hlist_head fs_supers; struct lock_class_key s_lock_key; struct lock_class_key s_umount_key; struct lock_class_key s_vfs_rename_key; struct lock_class_key s_writers_key[3U]; struct lock_class_key i_lock_key; struct lock_class_key i_mutex_key; struct lock_class_key i_mutex_dir_key; } ; 39 typedef s32 compat_long_t; 44 typedef u32 compat_uptr_t; 276 struct compat_robust_list { compat_uptr_t next; } ; 280 struct compat_robust_list_head { struct compat_robust_list list; compat_long_t futex_offset; compat_uptr_t list_op_pending; } ; 703 struct ethhdr { unsigned char h_dest[6U]; unsigned char h_source[6U]; __be16 h_proto; } ; 34 struct ethtool_cmd { __u32 cmd; __u32 supported; __u32 advertising; __u16 speed; __u8 duplex; __u8 port; __u8 phy_address; __u8 transceiver; __u8 autoneg; __u8 mdio_support; __u32 maxtxpkt; __u32 maxrxpkt; __u16 speed_hi; __u8 eth_tp_mdix; __u8 eth_tp_mdix_ctrl; __u32 lp_advertising; __u32 reserved[2U]; } ; 125 struct ethtool_drvinfo { __u32 cmd; char driver[32U]; char version[32U]; char fw_version[32U]; char bus_info[32U]; char reserved1[32U]; char reserved2[12U]; __u32 n_priv_flags; __u32 n_stats; __u32 testinfo_len; __u32 eedump_len; __u32 regdump_len; } ; 187 struct ethtool_wolinfo { __u32 cmd; __u32 supported; __u32 wolopts; __u8 sopass[6U]; } ; 211 struct ethtool_regs { __u32 cmd; __u32 version; __u32 len; __u8 data[0U]; } ; 233 struct ethtool_eeprom { __u32 cmd; __u32 magic; __u32 offset; __u32 len; __u8 data[0U]; } ; 259 struct ethtool_eee { __u32 cmd; __u32 supported; __u32 advertised; __u32 lp_advertised; __u32 eee_active; __u32 eee_enabled; __u32 tx_lpi_enabled; __u32 tx_lpi_timer; __u32 reserved[2U]; } ; 288 struct ethtool_modinfo { __u32 cmd; __u32 type; __u32 eeprom_len; __u32 reserved[8U]; } ; 305 struct ethtool_coalesce { __u32 cmd; __u32 rx_coalesce_usecs; __u32 rx_max_coalesced_frames; __u32 rx_coalesce_usecs_irq; __u32 rx_max_coalesced_frames_irq; __u32 tx_coalesce_usecs; __u32 tx_max_coalesced_frames; __u32 tx_coalesce_usecs_irq; __u32 tx_max_coalesced_frames_irq; __u32 stats_block_coalesce_usecs; __u32 use_adaptive_rx_coalesce; __u32 use_adaptive_tx_coalesce; __u32 pkt_rate_low; __u32 rx_coalesce_usecs_low; __u32 rx_max_coalesced_frames_low; __u32 tx_coalesce_usecs_low; __u32 tx_max_coalesced_frames_low; __u32 pkt_rate_high; __u32 rx_coalesce_usecs_high; __u32 rx_max_coalesced_frames_high; __u32 tx_coalesce_usecs_high; __u32 tx_max_coalesced_frames_high; __u32 rate_sample_interval; } ; 404 struct ethtool_ringparam { __u32 cmd; __u32 rx_max_pending; __u32 rx_mini_max_pending; __u32 rx_jumbo_max_pending; __u32 tx_max_pending; __u32 rx_pending; __u32 rx_mini_pending; __u32 rx_jumbo_pending; __u32 tx_pending; } ; 441 struct ethtool_channels { __u32 cmd; __u32 max_rx; __u32 max_tx; __u32 max_other; __u32 max_combined; __u32 rx_count; __u32 tx_count; __u32 other_count; __u32 combined_count; } ; 469 struct ethtool_pauseparam { __u32 cmd; __u32 autoneg; __u32 rx_pause; __u32 tx_pause; } ; 568 struct ethtool_test { __u32 cmd; __u32 flags; __u32 reserved; __u32 len; __u64 data[0U]; } ; 600 struct ethtool_stats { __u32 cmd; __u32 n_stats; __u64 data[0U]; } ; 642 struct ethtool_tcpip4_spec { __be32 ip4src; __be32 ip4dst; __be16 psrc; __be16 pdst; __u8 tos; } ; 675 struct ethtool_ah_espip4_spec { __be32 ip4src; __be32 ip4dst; __be32 spi; __u8 tos; } ; 691 struct ethtool_usrip4_spec { __be32 ip4src; __be32 ip4dst; __be32 l4_4_bytes; __u8 tos; __u8 ip_ver; __u8 proto; } ; 711 union ethtool_flow_union { struct ethtool_tcpip4_spec tcp_ip4_spec; struct ethtool_tcpip4_spec udp_ip4_spec; struct ethtool_tcpip4_spec sctp_ip4_spec; struct ethtool_ah_espip4_spec ah_ip4_spec; struct ethtool_ah_espip4_spec esp_ip4_spec; struct ethtool_usrip4_spec usr_ip4_spec; struct ethhdr ether_spec; __u8 hdata[52U]; } ; 722 struct ethtool_flow_ext { __u8 padding[2U]; unsigned char h_dest[6U]; __be16 vlan_etype; __be16 vlan_tci; __be32 data[2U]; } ; 741 struct ethtool_rx_flow_spec { __u32 flow_type; union ethtool_flow_union h_u; struct ethtool_flow_ext h_ext; union ethtool_flow_union m_u; struct ethtool_flow_ext m_ext; __u64 ring_cookie; __u32 location; } ; 767 struct ethtool_rxnfc { __u32 cmd; __u32 flow_type; __u64 data; struct ethtool_rx_flow_spec fs; __u32 rule_cnt; __u32 rule_locs[0U]; } ; 933 struct ethtool_flash { __u32 cmd; __u32 region; char data[128U]; } ; 941 struct ethtool_dump { __u32 cmd; __u32 version; __u32 flag; __u32 len; __u8 data[0U]; } ; 1017 struct ethtool_ts_info { __u32 cmd; __u32 so_timestamping; __s32 phc_index; __u32 tx_types; __u32 tx_reserved[3U]; __u32 rx_filters; __u32 rx_reserved[3U]; } ; 44 enum ethtool_phys_id_state { ETHTOOL_ID_INACTIVE = 0, ETHTOOL_ID_ACTIVE = 1, ETHTOOL_ID_ON = 2, ETHTOOL_ID_OFF = 3 } ; 79 struct ethtool_ops { int (*get_settings)(struct net_device *, struct ethtool_cmd *); int (*set_settings)(struct net_device *, struct ethtool_cmd *); void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); int (*get_regs_len)(struct net_device *); void (*get_regs)(struct net_device *, struct ethtool_regs *, void *); void (*get_wol)(struct net_device *, struct ethtool_wolinfo *); int (*set_wol)(struct net_device *, struct ethtool_wolinfo *); u32 (*get_msglevel)(struct net_device *); void (*set_msglevel)(struct net_device *, u32 ); int (*nway_reset)(struct net_device *); u32 (*get_link)(struct net_device *); int (*get_eeprom_len)(struct net_device *); int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *); int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *); void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *); int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *); void (*get_pauseparam)(struct net_device *, struct ethtool_pauseparam *); int (*set_pauseparam)(struct net_device *, struct ethtool_pauseparam *); void (*self_test)(struct net_device *, struct ethtool_test *, u64 *); void (*get_strings)(struct net_device *, u32 , u8 *); int (*set_phys_id)(struct net_device *, enum ethtool_phys_id_state ); void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, u64 *); int (*begin)(struct net_device *); void (*complete)(struct net_device *); u32 (*get_priv_flags)(struct net_device *); int (*set_priv_flags)(struct net_device *, u32 ); int (*get_sset_count)(struct net_device *, int); int (*get_rxnfc)(struct net_device *, struct ethtool_rxnfc *, u32 *); int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *); int (*flash_device)(struct net_device *, struct ethtool_flash *); int (*reset)(struct net_device *, u32 *); u32 (*get_rxfh_key_size)(struct net_device *); u32 (*get_rxfh_indir_size)(struct net_device *); int (*get_rxfh)(struct net_device *, u32 *, u8 *); int (*set_rxfh)(struct net_device *, const u32 *, const u8 *); void (*get_channels)(struct net_device *, struct ethtool_channels *); int (*set_channels)(struct net_device *, struct ethtool_channels *); int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); int (*get_dump_data)(struct net_device *, struct ethtool_dump *, void *); int (*set_dump)(struct net_device *, struct ethtool_dump *); int (*get_ts_info)(struct net_device *, struct ethtool_ts_info *); int (*get_module_info)(struct net_device *, struct ethtool_modinfo *); int (*get_module_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*get_eee)(struct net_device *, struct ethtool_eee *); int (*set_eee)(struct net_device *, struct ethtool_eee *); } ; 235 struct prot_inuse ; 236 struct netns_core { struct ctl_table_header *sysctl_hdr; int sysctl_somaxconn; struct prot_inuse *inuse; } ; 38 struct u64_stats_sync { } ; 145 struct ipstats_mib { u64 mibs[36U]; struct u64_stats_sync syncp; } ; 61 struct icmp_mib { unsigned long mibs[28U]; } ; 67 struct icmpmsg_mib { atomic_long_t mibs[512U]; } ; 72 struct icmpv6_mib { unsigned long mibs[6U]; } ; 83 struct icmpv6msg_mib { atomic_long_t mibs[512U]; } ; 93 struct tcp_mib { unsigned long mibs[16U]; } ; 100 struct udp_mib { unsigned long mibs[8U]; } ; 106 struct linux_mib { unsigned long mibs[103U]; } ; 112 struct linux_xfrm_mib { unsigned long mibs[29U]; } ; 118 struct proc_dir_entry ; 118 struct netns_mib { struct tcp_mib *tcp_statistics; struct ipstats_mib *ip_statistics; struct linux_mib *net_statistics; struct udp_mib *udp_statistics; struct udp_mib *udplite_statistics; struct icmp_mib *icmp_statistics; struct icmpmsg_mib *icmpmsg_statistics; struct proc_dir_entry *proc_net_devsnmp6; struct udp_mib *udp_stats_in6; struct udp_mib *udplite_stats_in6; struct ipstats_mib *ipv6_statistics; struct icmpv6_mib *icmpv6_statistics; struct icmpv6msg_mib *icmpv6msg_statistics; struct linux_xfrm_mib *xfrm_statistics; } ; 26 struct netns_unix { int sysctl_max_dgram_qlen; struct ctl_table_header *ctl; } ; 12 struct netns_packet { struct mutex sklist_lock; struct hlist_head sklist; } ; 14 struct netns_frags { int nqueues; struct list_head lru_list; spinlock_t lru_lock; struct percpu_counter mem; int timeout; int high_thresh; int low_thresh; } ; 180 struct tcpm_hash_bucket ; 181 struct ipv4_devconf ; 182 struct fib_rules_ops ; 183 struct fib_table ; 184 struct local_ports { seqlock_t lock; int range[2U]; } ; 22 struct ping_group_range { seqlock_t lock; kgid_t range[2U]; } ; 27 struct inet_peer_base ; 27 struct xt_table ; 27 struct netns_ipv4 { struct ctl_table_header *forw_hdr; struct ctl_table_header *frags_hdr; struct ctl_table_header *ipv4_hdr; struct ctl_table_header *route_hdr; struct ctl_table_header *xfrm4_hdr; struct ipv4_devconf *devconf_all; struct ipv4_devconf *devconf_dflt; struct fib_rules_ops *rules_ops; bool fib_has_custom_rules; struct fib_table *fib_local; struct fib_table *fib_main; struct fib_table *fib_default; int fib_num_tclassid_users; struct hlist_head *fib_table_hash; struct sock *fibnl; struct sock **icmp_sk; struct inet_peer_base *peers; struct tcpm_hash_bucket *tcp_metrics_hash; unsigned int tcp_metrics_hash_log; struct netns_frags frags; struct xt_table *iptable_filter; struct xt_table *iptable_mangle; struct xt_table *iptable_raw; struct xt_table *arptable_filter; struct xt_table *iptable_security; struct xt_table *nat_table; int sysctl_icmp_echo_ignore_all; int sysctl_icmp_echo_ignore_broadcasts; int sysctl_icmp_ignore_bogus_error_responses; int sysctl_icmp_ratelimit; int sysctl_icmp_ratemask; int sysctl_icmp_errors_use_inbound_ifaddr; struct local_ports ip_local_ports; int sysctl_tcp_ecn; int sysctl_ip_no_pmtu_disc; int sysctl_ip_fwd_use_pmtu; int sysctl_fwmark_reflect; int sysctl_tcp_fwmark_accept; struct ping_group_range ping_group_range; atomic_t dev_addr_genid; unsigned long *sysctl_local_reserved_ports; struct list_head mr_tables; struct fib_rules_ops *mr_rules_ops; atomic_t rt_genid; } ; 102 struct neighbour ; 102 struct dst_ops { unsigned short family; __be16 protocol; unsigned int gc_thresh; int (*gc)(struct dst_ops *); struct dst_entry * (*check)(struct dst_entry *, __u32 ); unsigned int (*default_advmss)(const struct dst_entry *); unsigned int (*mtu)(const struct dst_entry *); u32 * (*cow_metrics)(struct dst_entry *, unsigned long); void (*destroy)(struct dst_entry *); void (*ifdown)(struct dst_entry *, struct net_device *, int); struct dst_entry * (*negative_advice)(struct dst_entry *); void (*link_failure)(struct sk_buff *); void (*update_pmtu)(struct dst_entry *, struct sock *, struct sk_buff *, u32 ); void (*redirect)(struct dst_entry *, struct sock *, struct sk_buff *); int (*local_out)(struct sk_buff *); struct neighbour * (*neigh_lookup)(const struct dst_entry *, struct sk_buff *, const void *); struct kmem_cache *kmem_cachep; struct percpu_counter pcpuc_entries; } ; 73 struct netns_sysctl_ipv6 { struct ctl_table_header *hdr; struct ctl_table_header *route_hdr; struct ctl_table_header *icmp_hdr; struct ctl_table_header *frags_hdr; struct ctl_table_header *xfrm6_hdr; int bindv6only; int flush_delay; int ip6_rt_max_size; int ip6_rt_gc_min_interval; int ip6_rt_gc_timeout; int ip6_rt_gc_interval; int ip6_rt_gc_elasticity; int ip6_rt_mtu_expires; int ip6_rt_min_advmss; int flowlabel_consistency; int icmpv6_time; int anycast_src_echo_reply; int fwmark_reflect; } ; 35 struct ipv6_devconf ; 35 struct rt6_info ; 35 struct rt6_statistics ; 35 struct fib6_table ; 35 struct netns_ipv6 { struct netns_sysctl_ipv6 sysctl; struct ipv6_devconf *devconf_all; struct ipv6_devconf *devconf_dflt; struct inet_peer_base *peers; struct netns_frags frags; struct xt_table *ip6table_filter; struct xt_table *ip6table_mangle; struct xt_table *ip6table_raw; struct xt_table *ip6table_security; struct xt_table *ip6table_nat; struct rt6_info *ip6_null_entry; struct rt6_statistics *rt6_stats; struct timer_list ip6_fib_timer; struct hlist_head *fib_table_hash; struct fib6_table *fib6_main_tbl; struct dst_ops ip6_dst_ops; unsigned int ip6_rt_gc_expire; unsigned long ip6_rt_last_gc; struct rt6_info *ip6_prohibit_entry; struct rt6_info *ip6_blk_hole_entry; struct fib6_table *fib6_local_tbl; struct fib_rules_ops *fib6_rules_ops; struct sock **icmp_sk; struct sock *ndisc_sk; struct sock *tcp_sk; struct sock *igmp_sk; struct list_head mr6_tables; struct fib_rules_ops *mr6_rules_ops; atomic_t dev_addr_genid; atomic_t rt_genid; } ; 80 struct netns_nf_frag { struct netns_sysctl_ipv6 sysctl; struct netns_frags frags; } ; 86 struct netns_sysctl_lowpan { struct ctl_table_header *frags_hdr; } ; 14 struct netns_ieee802154_lowpan { struct netns_sysctl_lowpan sysctl; struct netns_frags frags; u16 max_dsize; } ; 21 struct sctp_mib ; 22 struct netns_sctp { struct sctp_mib *sctp_statistics; struct proc_dir_entry *proc_net_sctp; struct ctl_table_header *sysctl_header; struct sock *ctl_sock; struct list_head local_addr_list; struct list_head addr_waitq; struct timer_list addr_wq_timer; struct list_head auto_asconf_splist; spinlock_t addr_wq_lock; spinlock_t local_addr_lock; unsigned int rto_initial; unsigned int rto_min; unsigned int rto_max; int rto_alpha; int rto_beta; int max_burst; int cookie_preserve_enable; char *sctp_hmac_alg; unsigned int valid_cookie_life; unsigned int sack_timeout; unsigned int hb_interval; int max_retrans_association; int max_retrans_path; int max_retrans_init; int pf_retrans; int sndbuf_policy; int rcvbuf_policy; int default_auto_asconf; int addip_enable; int addip_noauth; int prsctp_enable; int auth_enable; int scope_policy; int rwnd_upd_shift; unsigned long max_autoclose; } ; 133 struct netns_dccp { struct sock *v4_ctl_sk; struct sock *v6_ctl_sk; } ; 324 struct nlattr ; 337 struct nf_logger ; 338 struct netns_nf { struct proc_dir_entry *proc_netfilter; const struct nf_logger *nf_loggers[13U]; struct ctl_table_header *nf_log_dir_header; } ; 17 struct ebt_table ; 18 struct netns_xt { struct list_head tables[13U]; bool notrack_deprecated_warning; struct ebt_table *broute_table; struct ebt_table *frame_filter; struct ebt_table *frame_nat; bool ulog_warn_deprecated; bool ebt_ulog_warn_deprecated; } ; 24 struct hlist_nulls_node ; 24 struct hlist_nulls_head { struct hlist_nulls_node *first; } ; 20 struct hlist_nulls_node { struct hlist_nulls_node *next; struct hlist_nulls_node **pprev; } ; 32 struct nf_proto_net { struct ctl_table_header *ctl_table_header; struct ctl_table *ctl_table; struct ctl_table_header *ctl_compat_header; struct ctl_table *ctl_compat_table; unsigned int users; } ; 24 struct nf_generic_net { struct nf_proto_net pn; unsigned int timeout; } ; 29 struct nf_tcp_net { struct nf_proto_net pn; unsigned int timeouts[14U]; unsigned int tcp_loose; unsigned int tcp_be_liberal; unsigned int tcp_max_retrans; } ; 43 struct nf_udp_net { struct nf_proto_net pn; unsigned int timeouts[2U]; } ; 48 struct nf_icmp_net { struct nf_proto_net pn; unsigned int timeout; } ; 53 struct nf_ip_net { struct nf_generic_net generic; struct nf_tcp_net tcp; struct nf_udp_net udp; struct nf_icmp_net icmp; struct nf_icmp_net icmpv6; struct ctl_table_header *ctl_table_header; struct ctl_table *ctl_table; } ; 64 struct ct_pcpu { spinlock_t lock; struct hlist_nulls_head unconfirmed; struct hlist_nulls_head dying; struct hlist_nulls_head tmpl; } ; 72 struct ip_conntrack_stat ; 72 struct nf_ct_event_notifier ; 72 struct nf_exp_event_notifier ; 72 struct netns_ct { atomic_t count; unsigned int expect_count; struct ctl_table_header *sysctl_header; struct ctl_table_header *acct_sysctl_header; struct ctl_table_header *tstamp_sysctl_header; struct ctl_table_header *event_sysctl_header; struct ctl_table_header *helper_sysctl_header; char *slabname; unsigned int sysctl_log_invalid; unsigned int sysctl_events_retry_timeout; int sysctl_events; int sysctl_acct; int sysctl_auto_assign_helper; bool auto_assign_helper_warned; int sysctl_tstamp; int sysctl_checksum; unsigned int htable_size; seqcount_t generation; struct kmem_cache *nf_conntrack_cachep; struct hlist_nulls_head *hash; struct hlist_head *expect_hash; struct ct_pcpu *pcpu_lists; struct ip_conntrack_stat *stat; struct nf_ct_event_notifier *nf_conntrack_event_cb; struct nf_exp_event_notifier *nf_expect_event_cb; struct nf_ip_net nf_ct_proto; unsigned int labels_used; u8 label_words; struct hlist_head *nat_bysource; unsigned int nat_htable_size; } ; 111 struct nft_af_info ; 112 struct netns_nftables { struct list_head af_info; struct list_head commit_list; struct nft_af_info *ipv4; struct nft_af_info *ipv6; struct nft_af_info *inet; struct nft_af_info *arp; struct nft_af_info *bridge; u8 gencursor; u8 genctr; } ; 499 enum irqreturn { IRQ_NONE = 0, IRQ_HANDLED = 1, IRQ_WAKE_THREAD = 2 } ; 16 typedef enum irqreturn irqreturn_t; 450 struct tasklet_struct { struct tasklet_struct *next; unsigned long state; atomic_t count; void (*func)(unsigned long); unsigned long data; } ; 663 struct flow_cache_percpu { struct hlist_head *hash_table; int hash_count; u32 hash_rnd; int hash_rnd_recalc; struct tasklet_struct flush_tasklet; } ; 16 struct flow_cache { u32 hash_shift; struct flow_cache_percpu *percpu; struct notifier_block hotcpu_notifier; int low_watermark; int high_watermark; struct timer_list rnd_timer; } ; 25 struct xfrm_policy_hash { struct hlist_head *table; unsigned int hmask; } ; 17 struct netns_xfrm { struct list_head state_all; struct hlist_head *state_bydst; struct hlist_head *state_bysrc; struct hlist_head *state_byspi; unsigned int state_hmask; unsigned int state_num; struct work_struct state_hash_work; struct hlist_head state_gc_list; struct work_struct state_gc_work; struct list_head policy_all; struct hlist_head *policy_byidx; unsigned int policy_idx_hmask; struct hlist_head policy_inexact[6U]; struct xfrm_policy_hash policy_bydst[6U]; unsigned int policy_count[6U]; struct work_struct policy_hash_work; struct sock *nlsk; struct sock *nlsk_stash; u32 sysctl_aevent_etime; u32 sysctl_aevent_rseqth; int sysctl_larval_drop; u32 sysctl_acq_expires; struct ctl_table_header *sysctl_hdr; struct dst_ops xfrm4_dst_ops; struct dst_ops xfrm6_dst_ops; spinlock_t xfrm_state_lock; rwlock_t xfrm_policy_lock; struct mutex xfrm_cfg_mutex; struct flow_cache flow_cache_global; atomic_t flow_cache_genid; struct list_head flow_cache_gc_list; spinlock_t flow_cache_gc_lock; struct work_struct flow_cache_gc_work; struct work_struct flow_cache_flush_work; struct mutex flow_flush_sem; } ; 74 struct net_generic ; 75 struct netns_ipvs ; 76 struct net { atomic_t passive; atomic_t count; spinlock_t rules_mod_lock; struct list_head list; struct list_head cleanup_list; struct list_head exit_list; struct user_namespace *user_ns; unsigned int proc_inum; struct proc_dir_entry *proc_net; struct proc_dir_entry *proc_net_stat; struct ctl_table_set sysctls; struct sock *rtnl; struct sock *genl_sock; struct list_head dev_base_head; struct hlist_head *dev_name_head; struct hlist_head *dev_index_head; unsigned int dev_base_seq; int ifindex; unsigned int dev_unreg_count; struct list_head rules_ops; struct net_device *loopback_dev; struct netns_core core; struct netns_mib mib; struct netns_packet packet; struct netns_unix unx; struct netns_ipv4 ipv4; struct netns_ipv6 ipv6; struct netns_ieee802154_lowpan ieee802154_lowpan; struct netns_sctp sctp; struct netns_dccp dccp; struct netns_nf nf; struct netns_xt xt; struct netns_ct ct; struct netns_nftables nft; struct netns_nf_frag nf_frag; struct sock *nfnl; struct sock *nfnl_stash; struct sk_buff_head wext_nlevents; struct net_generic *gen; struct netns_xfrm xfrm; struct netns_ipvs *ipvs; struct sock *diag_nlsk; atomic_t fnhe_genid; } ; 400 struct dsa_chip_data { struct device *mii_bus; int sw_addr; char *port_names[12U]; s8 *rtable; } ; 46 struct dsa_platform_data { struct device *netdev; int nr_chips; struct dsa_chip_data *chip; } ; 61 struct dsa_switch ; 61 struct dsa_switch_tree { struct dsa_platform_data *pd; struct net_device *master_netdev; __be16 tag_protocol; s8 cpu_switch; s8 cpu_port; int link_poll_needed; struct work_struct link_poll_work; struct timer_list link_poll_timer; struct dsa_switch *ds[4U]; } ; 94 struct dsa_switch_driver ; 94 struct mii_bus ; 94 struct dsa_switch { struct dsa_switch_tree *dst; int index; struct dsa_chip_data *pd; struct dsa_switch_driver *drv; struct mii_bus *master_mii_bus; u32 dsa_port_mask; u32 phys_port_mask; struct mii_bus *slave_mii_bus; struct net_device *ports[12U]; } ; 146 struct dsa_switch_driver { struct list_head list; __be16 tag_protocol; int priv_size; char * (*probe)(struct mii_bus *, int); int (*setup)(struct dsa_switch *); int (*set_addr)(struct dsa_switch *, u8 *); int (*phy_read)(struct dsa_switch *, int, int); int (*phy_write)(struct dsa_switch *, int, int, u16 ); void (*poll_link)(struct dsa_switch *); void (*get_strings)(struct dsa_switch *, int, uint8_t *); void (*get_ethtool_stats)(struct dsa_switch *, int, uint64_t *); int (*get_sset_count)(struct dsa_switch *); } ; 205 struct ieee_ets { __u8 willing; __u8 ets_cap; __u8 cbs; __u8 tc_tx_bw[8U]; __u8 tc_rx_bw[8U]; __u8 tc_tsa[8U]; __u8 prio_tc[8U]; __u8 tc_reco_bw[8U]; __u8 tc_reco_tsa[8U]; __u8 reco_prio_tc[8U]; } ; 69 struct ieee_maxrate { __u64 tc_maxrate[8U]; } ; 80 struct ieee_pfc { __u8 pfc_cap; __u8 pfc_en; __u8 mbc; __u16 delay; __u64 requests[8U]; __u64 indications[8U]; } ; 100 struct cee_pg { __u8 willing; __u8 error; __u8 pg_en; __u8 tcs_supported; __u8 pg_bw[8U]; __u8 prio_pg[8U]; } ; 123 struct cee_pfc { __u8 willing; __u8 error; __u8 pfc_en; __u8 tcs_supported; } ; 138 struct dcb_app { __u8 selector; __u8 priority; __u16 protocol; } ; 167 struct dcb_peer_app_info { __u8 willing; __u8 error; } ; 40 struct dcbnl_rtnl_ops { int (*ieee_getets)(struct net_device *, struct ieee_ets *); int (*ieee_setets)(struct net_device *, struct ieee_ets *); int (*ieee_getmaxrate)(struct net_device *, struct ieee_maxrate *); int (*ieee_setmaxrate)(struct net_device *, struct ieee_maxrate *); int (*ieee_getpfc)(struct net_device *, struct ieee_pfc *); int (*ieee_setpfc)(struct net_device *, struct ieee_pfc *); int (*ieee_getapp)(struct net_device *, struct dcb_app *); int (*ieee_setapp)(struct net_device *, struct dcb_app *); int (*ieee_delapp)(struct net_device *, struct dcb_app *); int (*ieee_peer_getets)(struct net_device *, struct ieee_ets *); int (*ieee_peer_getpfc)(struct net_device *, struct ieee_pfc *); u8 (*getstate)(struct net_device *); u8 (*setstate)(struct net_device *, u8 ); void (*getpermhwaddr)(struct net_device *, u8 *); void (*setpgtccfgtx)(struct net_device *, int, u8 , u8 , u8 , u8 ); void (*setpgbwgcfgtx)(struct net_device *, int, u8 ); void (*setpgtccfgrx)(struct net_device *, int, u8 , u8 , u8 , u8 ); void (*setpgbwgcfgrx)(struct net_device *, int, u8 ); void (*getpgtccfgtx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *); void (*getpgbwgcfgtx)(struct net_device *, int, u8 *); void (*getpgtccfgrx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *); void (*getpgbwgcfgrx)(struct net_device *, int, u8 *); void (*setpfccfg)(struct net_device *, int, u8 ); void (*getpfccfg)(struct net_device *, int, u8 *); u8 (*setall)(struct net_device *); u8 (*getcap)(struct net_device *, int, u8 *); int (*getnumtcs)(struct net_device *, int, u8 *); int (*setnumtcs)(struct net_device *, int, u8 ); u8 (*getpfcstate)(struct net_device *); void (*setpfcstate)(struct net_device *, u8 ); void (*getbcncfg)(struct net_device *, int, u32 *); void (*setbcncfg)(struct net_device *, int, u32 ); void (*getbcnrp)(struct net_device *, int, u8 *); void (*setbcnrp)(struct net_device *, int, u8 ); u8 (*setapp)(struct net_device *, u8 , u16 , u8 ); u8 (*getapp)(struct net_device *, u8 , u16 ); u8 (*getfeatcfg)(struct net_device *, int, u8 *); u8 (*setfeatcfg)(struct net_device *, int, u8 ); u8 (*getdcbx)(struct net_device *); u8 (*setdcbx)(struct net_device *, u8 ); int (*peer_getappinfo)(struct net_device *, struct dcb_peer_app_info *, u16 *); int (*peer_getapptable)(struct net_device *, struct dcb_app *); int (*cee_peer_getpg)(struct net_device *, struct cee_pg *); int (*cee_peer_getpfc)(struct net_device *, struct cee_pfc *); } ; 102 struct taskstats { __u16 version; __u32 ac_exitcode; __u8 ac_flag; __u8 ac_nice; __u64 cpu_count; __u64 cpu_delay_total; __u64 blkio_count; __u64 blkio_delay_total; __u64 swapin_count; __u64 swapin_delay_total; __u64 cpu_run_real_total; __u64 cpu_run_virtual_total; char ac_comm[32U]; __u8 ac_sched; __u8 ac_pad[3U]; __u32 ac_uid; __u32 ac_gid; __u32 ac_pid; __u32 ac_ppid; __u32 ac_btime; __u64 ac_etime; __u64 ac_utime; __u64 ac_stime; __u64 ac_minflt; __u64 ac_majflt; __u64 coremem; __u64 virtmem; __u64 hiwater_rss; __u64 hiwater_vm; __u64 read_char; __u64 write_char; __u64 read_syscalls; __u64 write_syscalls; __u64 read_bytes; __u64 write_bytes; __u64 cancelled_write_bytes; __u64 nvcsw; __u64 nivcsw; __u64 ac_utimescaled; __u64 ac_stimescaled; __u64 cpu_scaled_run_real_total; __u64 freepages_count; __u64 freepages_delay_total; } ; 58 struct percpu_ref ; 54 typedef void percpu_ref_func_t(struct percpu_ref *); 55 struct percpu_ref { atomic_t count; unsigned int *pcpu_count; percpu_ref_func_t *release; percpu_ref_func_t *confirm_kill; struct callback_head rcu; } ; 205 struct cgroup_root ; 206 struct cgroup_subsys ; 207 struct cgroup ; 58 struct cgroup_subsys_state { struct cgroup *cgroup; struct cgroup_subsys *ss; struct percpu_ref refcnt; struct cgroup_subsys_state *parent; struct list_head sibling; struct list_head children; int id; unsigned int flags; u64 serial_nr; struct callback_head callback_head; struct work_struct destroy_work; } ; 167 struct cgroup { struct cgroup_subsys_state self; unsigned long flags; int id; int populated_cnt; struct kernfs_node *kn; struct kernfs_node *populated_kn; unsigned int child_subsys_mask; struct cgroup_subsys_state *subsys[12U]; struct cgroup_root *root; struct list_head cset_links; struct list_head e_csets[12U]; struct list_head release_list; struct list_head pidlists; struct mutex pidlist_mutex; wait_queue_head_t offline_waitq; } ; 253 struct cgroup_root { struct kernfs_root *kf_root; unsigned int subsys_mask; int hierarchy_id; struct cgroup cgrp; atomic_t nr_cgrps; struct list_head root_list; unsigned int flags; struct idr cgroup_idr; char release_agent_path[4096U]; char name[64U]; } ; 355 struct css_set { atomic_t refcount; struct hlist_node hlist; struct list_head tasks; struct list_head mg_tasks; struct list_head cgrp_links; struct cgroup *dfl_cgrp; struct cgroup_subsys_state *subsys[12U]; struct list_head mg_preload_node; struct list_head mg_node; struct cgroup *mg_src_cgrp; struct css_set *mg_dst_cset; struct list_head e_cset_node[12U]; struct callback_head callback_head; } ; 438 struct cftype { char name[64U]; int private; umode_t mode; size_t max_write_len; unsigned int flags; struct cgroup_subsys *ss; struct list_head node; struct kernfs_ops *kf_ops; u64 (*read_u64)(struct cgroup_subsys_state *, struct cftype *); s64 (*read_s64)(struct cgroup_subsys_state *, struct cftype *); int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); int (*write_u64)(struct cgroup_subsys_state *, struct cftype *, u64 ); int (*write_s64)(struct cgroup_subsys_state *, struct cftype *, s64 ); ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); struct lock_class_key lockdep_key; } ; 609 struct cgroup_taskset ; 617 struct cgroup_subsys { struct cgroup_subsys_state * (*css_alloc)(struct cgroup_subsys_state *); int (*css_online)(struct cgroup_subsys_state *); void (*css_offline)(struct cgroup_subsys_state *); void (*css_free)(struct cgroup_subsys_state *); int (*can_attach)(struct cgroup_subsys_state *, struct cgroup_taskset *); void (*cancel_attach)(struct cgroup_subsys_state *, struct cgroup_taskset *); void (*attach)(struct cgroup_subsys_state *, struct cgroup_taskset *); void (*fork)(struct task_struct *); void (*exit)(struct cgroup_subsys_state *, struct cgroup_subsys_state *, struct task_struct *); void (*bind)(struct cgroup_subsys_state *); int disabled; int early_init; bool broken_hierarchy; bool warned_broken_hierarchy; int id; const char *name; struct cgroup_root *root; struct idr css_idr; struct list_head cfts; struct cftype *base_cftypes; } ; 919 struct netprio_map { struct callback_head rcu; u32 priomap_len; u32 priomap[]; } ; 3161 struct mnt_namespace ; 3162 struct ipc_namespace ; 3163 struct nsproxy { atomic_t count; struct uts_namespace *uts_ns; struct ipc_namespace *ipc_ns; struct mnt_namespace *mnt_ns; struct pid_namespace *pid_ns_for_children; struct net *net_ns; } ; 41 struct nlmsghdr { __u32 nlmsg_len; __u16 nlmsg_type; __u16 nlmsg_flags; __u32 nlmsg_seq; __u32 nlmsg_pid; } ; 145 struct nlattr { __u16 nla_len; __u16 nla_type; } ; 104 struct netlink_callback { struct sk_buff *skb; const struct nlmsghdr *nlh; int (*dump)(struct sk_buff *, struct netlink_callback *); int (*done)(struct netlink_callback *); void *data; struct module *module; u16 family; u16 min_dump_alloc; unsigned int prev_seq; unsigned int seq; long args[6U]; } ; 180 struct ndmsg { __u8 ndm_family; __u8 ndm_pad1; __u16 ndm_pad2; __s32 ndm_ifindex; __u16 ndm_state; __u8 ndm_flags; __u8 ndm_type; } ; 39 struct rtnl_link_stats64 { __u64 rx_packets; __u64 tx_packets; __u64 rx_bytes; __u64 tx_bytes; __u64 rx_errors; __u64 tx_errors; __u64 rx_dropped; __u64 tx_dropped; __u64 multicast; __u64 collisions; __u64 rx_length_errors; __u64 rx_over_errors; __u64 rx_crc_errors; __u64 rx_frame_errors; __u64 rx_fifo_errors; __u64 rx_missed_errors; __u64 tx_aborted_errors; __u64 tx_carrier_errors; __u64 tx_fifo_errors; __u64 tx_heartbeat_errors; __u64 tx_window_errors; __u64 rx_compressed; __u64 tx_compressed; } ; 547 struct ifla_vf_info { __u32 vf; __u8 mac[32U]; __u32 vlan; __u32 qos; __u32 spoofchk; __u32 linkstate; __u32 min_tx_rate; __u32 max_tx_rate; } ; 28 struct netpoll_info ; 29 struct phy_device ; 30 struct wireless_dev ; 61 enum netdev_tx { __NETDEV_TX_MIN = -2147483648, NETDEV_TX_OK = 0, NETDEV_TX_BUSY = 16, NETDEV_TX_LOCKED = 32 } ; 106 typedef enum netdev_tx netdev_tx_t; 125 struct net_device_stats { unsigned long rx_packets; unsigned long tx_packets; unsigned long rx_bytes; unsigned long tx_bytes; unsigned long rx_errors; unsigned long tx_errors; unsigned long rx_dropped; unsigned long tx_dropped; unsigned long multicast; unsigned long collisions; unsigned long rx_length_errors; unsigned long rx_over_errors; unsigned long rx_crc_errors; unsigned long rx_frame_errors; unsigned long rx_fifo_errors; unsigned long rx_missed_errors; unsigned long tx_aborted_errors; unsigned long tx_carrier_errors; unsigned long tx_fifo_errors; unsigned long tx_heartbeat_errors; unsigned long tx_window_errors; unsigned long rx_compressed; unsigned long tx_compressed; } ; 186 struct neigh_parms ; 207 struct netdev_hw_addr_list { struct list_head list; int count; } ; 212 struct hh_cache { u16 hh_len; u16 __pad; seqlock_t hh_lock; unsigned long hh_data[16U]; } ; 241 struct header_ops { int (*create)(struct sk_buff *, struct net_device *, unsigned short, const void *, const void *, unsigned int); int (*parse)(const struct sk_buff *, unsigned char *); int (*rebuild)(struct sk_buff *); int (*cache)(const struct neighbour *, struct hh_cache *, __be16 ); void (*cache_update)(struct hh_cache *, const struct net_device *, const unsigned char *); } ; 292 struct napi_struct { struct list_head poll_list; unsigned long state; int weight; unsigned int gro_count; int (*poll)(struct napi_struct *, int); spinlock_t poll_lock; int poll_owner; struct net_device *dev; struct sk_buff *gro_list; struct sk_buff *skb; struct list_head dev_list; struct hlist_node napi_hash_node; unsigned int napi_id; } ; 336 enum rx_handler_result { RX_HANDLER_CONSUMED = 0, RX_HANDLER_ANOTHER = 1, RX_HANDLER_EXACT = 2, RX_HANDLER_PASS = 3 } ; 384 typedef enum rx_handler_result rx_handler_result_t; 385 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **); 522 struct Qdisc ; 522 struct netdev_queue { struct net_device *dev; struct Qdisc *qdisc; struct Qdisc *qdisc_sleeping; struct kobject kobj; int numa_node; spinlock_t _xmit_lock; int xmit_lock_owner; unsigned long trans_start; unsigned long trans_timeout; unsigned long state; struct dql dql; } ; 591 struct rps_map { unsigned int len; struct callback_head rcu; u16 cpus[0U]; } ; 603 struct rps_dev_flow { u16 cpu; u16 filter; unsigned int last_qtail; } ; 615 struct rps_dev_flow_table { unsigned int mask; struct callback_head rcu; struct rps_dev_flow flows[0U]; } ; 666 struct netdev_rx_queue { struct rps_map *rps_map; struct rps_dev_flow_table *rps_flow_table; struct kobject kobj; struct net_device *dev; } ; 689 struct xps_map { unsigned int len; unsigned int alloc_len; struct callback_head rcu; u16 queues[0U]; } ; 702 struct xps_dev_maps { struct callback_head rcu; struct xps_map *cpu_map[0U]; } ; 713 struct netdev_tc_txq { u16 count; u16 offset; } ; 724 struct netdev_fcoe_hbainfo { char manufacturer[64U]; char serial_number[64U]; char hardware_version[64U]; char driver_version[64U]; char optionrom_version[64U]; char firmware_version[64U]; char model[256U]; char model_description[256U]; } ; 740 struct netdev_phys_port_id { unsigned char id[32U]; unsigned char id_len; } ; 753 struct net_device_ops { int (*ndo_init)(struct net_device *); void (*ndo_uninit)(struct net_device *); int (*ndo_open)(struct net_device *); int (*ndo_stop)(struct net_device *); netdev_tx_t (*ndo_start_xmit)(struct sk_buff *, struct net_device *); u16 (*ndo_select_queue)(struct net_device *, struct sk_buff *, void *, u16 (*)(struct net_device *, struct sk_buff *)); void (*ndo_change_rx_flags)(struct net_device *, int); void (*ndo_set_rx_mode)(struct net_device *); int (*ndo_set_mac_address)(struct net_device *, void *); int (*ndo_validate_addr)(struct net_device *); int (*ndo_do_ioctl)(struct net_device *, struct ifreq *, int); int (*ndo_set_config)(struct net_device *, struct ifmap *); int (*ndo_change_mtu)(struct net_device *, int); int (*ndo_neigh_setup)(struct net_device *, struct neigh_parms *); void (*ndo_tx_timeout)(struct net_device *); struct rtnl_link_stats64 * (*ndo_get_stats64)(struct net_device *, struct rtnl_link_stats64 *); struct net_device_stats * (*ndo_get_stats)(struct net_device *); int (*ndo_vlan_rx_add_vid)(struct net_device *, __be16 , u16 ); int (*ndo_vlan_rx_kill_vid)(struct net_device *, __be16 , u16 ); void (*ndo_poll_controller)(struct net_device *); int (*ndo_netpoll_setup)(struct net_device *, struct netpoll_info *); void (*ndo_netpoll_cleanup)(struct net_device *); int (*ndo_busy_poll)(struct napi_struct *); int (*ndo_set_vf_mac)(struct net_device *, int, u8 *); int (*ndo_set_vf_vlan)(struct net_device *, int, u16 , u8 ); int (*ndo_set_vf_rate)(struct net_device *, int, int, int); int (*ndo_set_vf_spoofchk)(struct net_device *, int, bool ); int (*ndo_get_vf_config)(struct net_device *, int, struct ifla_vf_info *); int (*ndo_set_vf_link_state)(struct net_device *, int, int); int (*ndo_set_vf_port)(struct net_device *, int, struct nlattr **); int (*ndo_get_vf_port)(struct net_device *, int, struct sk_buff *); int (*ndo_setup_tc)(struct net_device *, u8 ); int (*ndo_fcoe_enable)(struct net_device *); int (*ndo_fcoe_disable)(struct net_device *); int (*ndo_fcoe_ddp_setup)(struct net_device *, u16 , struct scatterlist *, unsigned int); int (*ndo_fcoe_ddp_done)(struct net_device *, u16 ); int (*ndo_fcoe_ddp_target)(struct net_device *, u16 , struct scatterlist *, unsigned int); int (*ndo_fcoe_get_hbainfo)(struct net_device *, struct netdev_fcoe_hbainfo *); int (*ndo_fcoe_get_wwn)(struct net_device *, u64 *, int); int (*ndo_rx_flow_steer)(struct net_device *, const struct sk_buff *, u16 , u32 ); int (*ndo_add_slave)(struct net_device *, struct net_device *); int (*ndo_del_slave)(struct net_device *, struct net_device *); netdev_features_t (*ndo_fix_features)(struct net_device *, netdev_features_t ); int (*ndo_set_features)(struct net_device *, netdev_features_t ); int (*ndo_neigh_construct)(struct neighbour *); void (*ndo_neigh_destroy)(struct neighbour *); int (*ndo_fdb_add)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16 ); int (*ndo_fdb_del)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *); int (*ndo_fdb_dump)(struct sk_buff *, struct netlink_callback *, struct net_device *, int); int (*ndo_bridge_setlink)(struct net_device *, struct nlmsghdr *); int (*ndo_bridge_getlink)(struct sk_buff *, u32 , u32 , struct net_device *, u32 ); int (*ndo_bridge_dellink)(struct net_device *, struct nlmsghdr *); int (*ndo_change_carrier)(struct net_device *, bool ); int (*ndo_get_phys_port_id)(struct net_device *, struct netdev_phys_port_id *); void (*ndo_add_vxlan_port)(struct net_device *, sa_family_t , __be16 ); void (*ndo_del_vxlan_port)(struct net_device *, sa_family_t , __be16 ); void * (*ndo_dfwd_add_station)(struct net_device *, struct net_device *); void (*ndo_dfwd_del_station)(struct net_device *, void *); netdev_tx_t (*ndo_dfwd_start_xmit)(struct sk_buff *, struct net_device *, void *); int (*ndo_get_lock_subclass)(struct net_device *); } ; 1187 struct __anonstruct_adj_list_246 { struct list_head upper; struct list_head lower; } ; 1187 struct __anonstruct_all_adj_list_247 { struct list_head upper; struct list_head lower; } ; 1187 struct iw_handler_def ; 1187 struct iw_public_data ; 1187 struct forwarding_accel_ops ; 1187 struct vlan_info ; 1187 struct tipc_bearer ; 1187 struct in_device ; 1187 struct dn_dev ; 1187 struct inet6_dev ; 1187 struct cpu_rmap ; 1187 struct pcpu_lstats ; 1187 struct pcpu_sw_netstats ; 1187 struct pcpu_dstats ; 1187 struct pcpu_vstats ; 1187 union __anonunion_ldv_40376_248 { void *ml_priv; struct pcpu_lstats *lstats; struct pcpu_sw_netstats *tstats; struct pcpu_dstats *dstats; struct pcpu_vstats *vstats; } ; 1187 struct garp_port ; 1187 struct mrp_port ; 1187 struct rtnl_link_ops ; 1187 struct net_device { char name[16U]; struct hlist_node name_hlist; char *ifalias; unsigned long mem_end; unsigned long mem_start; unsigned long base_addr; int irq; unsigned long state; struct list_head dev_list; struct list_head napi_list; struct list_head unreg_list; struct list_head close_list; struct __anonstruct_adj_list_246 adj_list; struct __anonstruct_all_adj_list_247 all_adj_list; netdev_features_t features; netdev_features_t hw_features; netdev_features_t wanted_features; netdev_features_t vlan_features; netdev_features_t hw_enc_features; netdev_features_t mpls_features; int ifindex; int iflink; struct net_device_stats stats; atomic_long_t rx_dropped; atomic_long_t tx_dropped; atomic_t carrier_changes; const struct iw_handler_def *wireless_handlers; struct iw_public_data *wireless_data; const struct net_device_ops *netdev_ops; const struct ethtool_ops *ethtool_ops; const struct forwarding_accel_ops *fwd_ops; const struct header_ops *header_ops; unsigned int flags; unsigned int priv_flags; unsigned short gflags; unsigned short padded; unsigned char operstate; unsigned char link_mode; unsigned char if_port; unsigned char dma; unsigned int mtu; unsigned short type; unsigned short hard_header_len; unsigned short needed_headroom; unsigned short needed_tailroom; unsigned char perm_addr[32U]; unsigned char addr_assign_type; unsigned char addr_len; unsigned short neigh_priv_len; unsigned short dev_id; unsigned short dev_port; spinlock_t addr_list_lock; struct netdev_hw_addr_list uc; struct netdev_hw_addr_list mc; struct netdev_hw_addr_list dev_addrs; struct kset *queues_kset; bool uc_promisc; unsigned int promiscuity; unsigned int allmulti; struct vlan_info *vlan_info; struct dsa_switch_tree *dsa_ptr; struct tipc_bearer *tipc_ptr; void *atalk_ptr; struct in_device *ip_ptr; struct dn_dev *dn_ptr; struct inet6_dev *ip6_ptr; void *ax25_ptr; struct wireless_dev *ieee80211_ptr; unsigned long last_rx; unsigned char *dev_addr; struct netdev_rx_queue *_rx; unsigned int num_rx_queues; unsigned int real_num_rx_queues; rx_handler_func_t *rx_handler; void *rx_handler_data; struct netdev_queue *ingress_queue; unsigned char broadcast[32U]; struct netdev_queue *_tx; unsigned int num_tx_queues; unsigned int real_num_tx_queues; struct Qdisc *qdisc; unsigned long tx_queue_len; spinlock_t tx_global_lock; struct xps_dev_maps *xps_maps; struct cpu_rmap *rx_cpu_rmap; unsigned long trans_start; int watchdog_timeo; struct timer_list watchdog_timer; int *pcpu_refcnt; struct list_head todo_list; struct hlist_node index_hlist; struct list_head link_watch_list; unsigned char reg_state; bool dismantle; unsigned short rtnl_link_state; void (*destructor)(struct net_device *); struct netpoll_info *npinfo; struct net *nd_net; union __anonunion_ldv_40376_248 ldv_40376; struct garp_port *garp_port; struct mrp_port *mrp_port; struct device dev; const struct attribute_group *sysfs_groups[4U]; const struct attribute_group *sysfs_rx_queue_group; const struct rtnl_link_ops *rtnl_link_ops; unsigned int gso_max_size; u16 gso_max_segs; const struct dcbnl_rtnl_ops *dcbnl_ops; u8 num_tc; struct netdev_tc_txq tc_to_txq[16U]; u8 prio_tc_map[16U]; unsigned int fcoe_ddp_xid; struct netprio_map *priomap; struct phy_device *phydev; struct lock_class_key *qdisc_tx_busylock; int group; struct pm_qos_request pm_qos_req; } ; 1806 struct pcpu_sw_netstats { u64 rx_packets; u64 rx_bytes; u64 tx_packets; u64 tx_bytes; struct u64_stats_sync syncp; } ; 483 struct tcmsg { unsigned char tcm_family; unsigned char tcm__pad1; unsigned short tcm__pad2; int tcm_ifindex; __u32 tcm_handle; __u32 tcm_parent; __u32 tcm_info; } ; 13 typedef unsigned long kernel_ulong_t; 186 struct acpi_device_id { __u8 id[9U]; kernel_ulong_t driver_data; } ; 219 struct of_device_id { char name[32U]; char type[32U]; char compatible[128U]; const void *data; } ; 479 struct platform_device_id { char name[20U]; kernel_ulong_t driver_data; } ; 628 struct mfd_cell ; 629 struct platform_device { const char *name; int id; bool id_auto; struct device dev; u32 num_resources; struct resource *resource; const struct platform_device_id *id_entry; struct mfd_cell *mfd_cell; struct pdev_archdata archdata; } ; 172 struct platform_driver { int (*probe)(struct platform_device *); int (*remove)(struct platform_device *); void (*shutdown)(struct platform_device *); int (*suspend)(struct platform_device *, pm_message_t ); int (*resume)(struct platform_device *); struct device_driver driver; const struct platform_device_id *id_table; bool prevent_deferred_probe; } ; 6 typedef unsigned char cc_t; 7 typedef unsigned int speed_t; 8 typedef unsigned int tcflag_t; 30 struct ktermios { tcflag_t c_iflag; tcflag_t c_oflag; tcflag_t c_cflag; tcflag_t c_lflag; cc_t c_line; cc_t c_cc[19U]; speed_t c_ispeed; speed_t c_ospeed; } ; 41 struct winsize { unsigned short ws_row; unsigned short ws_col; unsigned short ws_xpixel; unsigned short ws_ypixel; } ; 93 struct termiox { __u16 x_hflag; __u16 x_cflag; __u16 x_rflag[5U]; __u16 x_sflag; } ; 16 struct cdev { struct kobject kobj; struct module *owner; const struct file_operations *ops; struct list_head list; dev_t dev; unsigned int count; } ; 34 struct tty_driver ; 35 struct serial_icounter_struct ; 36 struct tty_operations { struct tty_struct * (*lookup)(struct tty_driver *, struct inode *, int); int (*install)(struct tty_driver *, struct tty_struct *); void (*remove)(struct tty_driver *, struct tty_struct *); int (*open)(struct tty_struct *, struct file *); void (*close)(struct tty_struct *, struct file *); void (*shutdown)(struct tty_struct *); void (*cleanup)(struct tty_struct *); int (*write)(struct tty_struct *, const unsigned char *, int); int (*put_char)(struct tty_struct *, unsigned char); void (*flush_chars)(struct tty_struct *); int (*write_room)(struct tty_struct *); int (*chars_in_buffer)(struct tty_struct *); int (*ioctl)(struct tty_struct *, unsigned int, unsigned long); long int (*compat_ioctl)(struct tty_struct *, unsigned int, unsigned long); void (*set_termios)(struct tty_struct *, struct ktermios *); void (*throttle)(struct tty_struct *); void (*unthrottle)(struct tty_struct *); void (*stop)(struct tty_struct *); void (*start)(struct tty_struct *); void (*hangup)(struct tty_struct *); int (*break_ctl)(struct tty_struct *, int); void (*flush_buffer)(struct tty_struct *); void (*set_ldisc)(struct tty_struct *); void (*wait_until_sent)(struct tty_struct *, int); void (*send_xchar)(struct tty_struct *, char); int (*tiocmget)(struct tty_struct *); int (*tiocmset)(struct tty_struct *, unsigned int, unsigned int); int (*resize)(struct tty_struct *, struct winsize *); int (*set_termiox)(struct tty_struct *, struct termiox *); int (*get_icount)(struct tty_struct *, struct serial_icounter_struct *); int (*poll_init)(struct tty_driver *, int, char *); int (*poll_get_char)(struct tty_driver *, int); void (*poll_put_char)(struct tty_driver *, int, char); const struct file_operations *proc_fops; } ; 289 struct tty_port ; 289 struct tty_driver { int magic; struct kref kref; struct cdev *cdevs; struct module *owner; const char *driver_name; const char *name; int name_base; int major; int minor_start; unsigned int num; short type; short subtype; struct ktermios init_termios; unsigned long flags; struct proc_dir_entry *proc_entry; struct tty_driver *other; struct tty_struct **ttys; struct tty_port **ports; struct ktermios **termios; void *driver_state; const struct tty_operations *ops; struct list_head tty_drivers; } ; 356 struct ld_semaphore { long count; raw_spinlock_t wait_lock; unsigned int wait_readers; struct list_head read_wait; struct list_head write_wait; struct lockdep_map dep_map; } ; 170 struct tty_ldisc_ops { int magic; char *name; int num; int flags; int (*open)(struct tty_struct *); void (*close)(struct tty_struct *); void (*flush_buffer)(struct tty_struct *); ssize_t (*chars_in_buffer)(struct tty_struct *); ssize_t (*read)(struct tty_struct *, struct file *, unsigned char *, size_t ); ssize_t (*write)(struct tty_struct *, struct file *, const unsigned char *, size_t ); int (*ioctl)(struct tty_struct *, struct file *, unsigned int, unsigned long); long int (*compat_ioctl)(struct tty_struct *, struct file *, unsigned int, unsigned long); void (*set_termios)(struct tty_struct *, struct ktermios *); unsigned int (*poll)(struct tty_struct *, struct file *, struct poll_table_struct *); int (*hangup)(struct tty_struct *); void (*receive_buf)(struct tty_struct *, const unsigned char *, char *, int); void (*write_wakeup)(struct tty_struct *); void (*dcd_change)(struct tty_struct *, unsigned int); void (*fasync)(struct tty_struct *, int); int (*receive_buf2)(struct tty_struct *, const unsigned char *, char *, int); struct module *owner; int refcount; } ; 220 struct tty_ldisc { struct tty_ldisc_ops *ops; struct tty_struct *tty; } ; 225 union __anonunion_ldv_42946_255 { struct tty_buffer *next; struct llist_node free; } ; 225 struct tty_buffer { union __anonunion_ldv_42946_255 ldv_42946; int used; int size; int commit; int read; int flags; unsigned long data[0U]; } ; 59 struct tty_bufhead { struct tty_buffer *head; struct work_struct work; struct mutex lock; atomic_t priority; struct tty_buffer sentinel; struct llist_head free; atomic_t mem_used; int mem_limit; struct tty_buffer *tail; } ; 71 struct tty_port_operations { int (*carrier_raised)(struct tty_port *); void (*dtr_rts)(struct tty_port *, int); void (*shutdown)(struct tty_port *); int (*activate)(struct tty_port *, struct tty_struct *); void (*destruct)(struct tty_port *); } ; 197 struct tty_port { struct tty_bufhead buf; struct tty_struct *tty; struct tty_struct *itty; const struct tty_port_operations *ops; spinlock_t lock; int blocked_open; int count; wait_queue_head_t open_wait; wait_queue_head_t close_wait; wait_queue_head_t delta_msr_wait; unsigned long flags; unsigned char console; unsigned char low_latency; struct mutex mutex; struct mutex buf_mutex; unsigned char *xmit_buf; unsigned int close_delay; unsigned int closing_wait; int drain_delay; struct kref kref; } ; 222 struct tty_struct { int magic; struct kref kref; struct device *dev; struct tty_driver *driver; const struct tty_operations *ops; int index; struct ld_semaphore ldisc_sem; struct tty_ldisc *ldisc; struct mutex atomic_write_lock; struct mutex legacy_mutex; struct mutex throttle_mutex; struct rw_semaphore termios_rwsem; struct mutex winsize_mutex; spinlock_t ctrl_lock; struct ktermios termios; struct ktermios termios_locked; struct termiox *termiox; char name[64U]; struct pid *pgrp; struct pid *session; unsigned long flags; int count; struct winsize winsize; unsigned char stopped; unsigned char hw_stopped; unsigned char flow_stopped; unsigned char packet; unsigned char ctrl_status; unsigned int receive_room; int flow_change; struct tty_struct *link; struct fasync_struct *fasync; int alt_speed; wait_queue_head_t write_wait; wait_queue_head_t read_wait; struct work_struct hangup_work; void *disc_data; void *driver_data; struct list_head tty_files; unsigned char closing; unsigned char *write_buf; int write_cnt; struct work_struct SAK_work; struct tty_port *port; } ; 162 struct if_irda_qos { unsigned long baudrate; unsigned short data_size; unsigned short window_size; unsigned short min_turn_time; unsigned short max_turn_time; unsigned char add_bofs; unsigned char link_disc; } ; 188 struct if_irda_line { __u8 dtr; __u8 rts; } ; 194 union __anonunion_ifr_ifrn_259 { char ifrn_name[16U]; } ; 194 union __anonunion_ifr_ifru_260 { struct if_irda_line ifru_line; struct if_irda_qos ifru_qos; unsigned short ifru_flags; unsigned int ifru_receiving; unsigned int ifru_mode; unsigned int ifru_dongle; } ; 194 struct if_irda_req { union __anonunion_ifr_ifrn_259 ifr_ifrn; union __anonunion_ifr_ifru_260 ifr_ifru; } ; 225 struct tc_stats { __u64 bytes; __u32 packets; __u32 drops; __u32 overlimits; __u32 bps; __u32 pps; __u32 qlen; __u32 backlog; } ; 92 struct tc_sizespec { unsigned char cell_log; unsigned char size_log; short cell_align; int overhead; unsigned int linklayer; unsigned int mpu; unsigned int mtu; unsigned int tsize; } ; 26 struct gnet_stats_basic_packed { __u64 bytes; __u32 packets; } ; 40 struct gnet_stats_rate_est64 { __u64 bps; __u64 pps; } ; 50 struct gnet_stats_queue { __u32 qlen; __u32 backlog; __u32 drops; __u32 requeues; __u32 overlimits; } ; 76 struct gnet_dump { spinlock_t *lock; struct sk_buff *skb; struct nlattr *tail; int compat_tc_stats; int compat_xstats; void *xstats; int xstats_len; struct tc_stats tc_stats; } ; 68 struct nla_policy { u16 type; u16 len; } ; 25 struct rtnl_link_ops { struct list_head list; const char *kind; size_t priv_size; void (*setup)(struct net_device *); int maxtype; const struct nla_policy *policy; int (*validate)(struct nlattr **, struct nlattr **); int (*newlink)(struct net *, struct net_device *, struct nlattr **, struct nlattr **); int (*changelink)(struct net_device *, struct nlattr **, struct nlattr **); void (*dellink)(struct net_device *, struct list_head *); size_t (*get_size)(const struct net_device *); int (*fill_info)(struct sk_buff *, const struct net_device *); size_t (*get_xstats_size)(const struct net_device *); int (*fill_xstats)(struct sk_buff *, const struct net_device *); unsigned int (*get_num_tx_queues)(); unsigned int (*get_num_rx_queues)(); int slave_maxtype; const struct nla_policy *slave_policy; int (*slave_validate)(struct nlattr **, struct nlattr **); int (*slave_changelink)(struct net_device *, struct net_device *, struct nlattr **, struct nlattr **); size_t (*get_slave_size)(const struct net_device *, const struct net_device *); int (*fill_slave_info)(struct sk_buff *, const struct net_device *, const struct net_device *); } ; 144 struct Qdisc_ops ; 145 struct qdisc_walker ; 146 struct tcf_walker ; 33 struct qdisc_size_table { struct callback_head rcu; struct list_head list; struct tc_sizespec szopts; int refcnt; u16 data[]; } ; 44 struct Qdisc { int (*enqueue)(struct sk_buff *, struct Qdisc *); struct sk_buff * (*dequeue)(struct Qdisc *); unsigned int flags; u32 limit; const struct Qdisc_ops *ops; struct qdisc_size_table *stab; struct list_head list; u32 handle; u32 parent; int (*reshape_fail)(struct sk_buff *, struct Qdisc *); void *u32_node; struct Qdisc *__parent; struct netdev_queue *dev_queue; struct gnet_stats_rate_est64 rate_est; struct Qdisc *next_sched; struct sk_buff *gso_skb; unsigned long state; struct sk_buff_head q; struct gnet_stats_basic_packed bstats; unsigned int __state; struct gnet_stats_queue qstats; struct callback_head callback_head; int padded; atomic_t refcnt; spinlock_t busylock; } ; 128 struct tcf_proto ; 128 struct Qdisc_class_ops { struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); int (*graft)(struct Qdisc *, unsigned long, struct Qdisc *, struct Qdisc **); struct Qdisc * (*leaf)(struct Qdisc *, unsigned long); void (*qlen_notify)(struct Qdisc *, unsigned long); unsigned long int (*get)(struct Qdisc *, u32 ); void (*put)(struct Qdisc *, unsigned long); int (*change)(struct Qdisc *, u32 , u32 , struct nlattr **, unsigned long *); int (*delete)(struct Qdisc *, unsigned long); void (*walk)(struct Qdisc *, struct qdisc_walker *); struct tcf_proto ** (*tcf_chain)(struct Qdisc *, unsigned long); unsigned long int (*bind_tcf)(struct Qdisc *, unsigned long, u32 ); void (*unbind_tcf)(struct Qdisc *, unsigned long); int (*dump)(struct Qdisc *, unsigned long, struct sk_buff *, struct tcmsg *); int (*dump_stats)(struct Qdisc *, unsigned long, struct gnet_dump *); } ; 156 struct Qdisc_ops { struct Qdisc_ops *next; const struct Qdisc_class_ops *cl_ops; char id[16U]; int priv_size; int (*enqueue)(struct sk_buff *, struct Qdisc *); struct sk_buff * (*dequeue)(struct Qdisc *); struct sk_buff * (*peek)(struct Qdisc *); unsigned int (*drop)(struct Qdisc *); int (*init)(struct Qdisc *, struct nlattr *); void (*reset)(struct Qdisc *); void (*destroy)(struct Qdisc *); int (*change)(struct Qdisc *, struct nlattr *); void (*attach)(struct Qdisc *); int (*dump)(struct Qdisc *, struct sk_buff *); int (*dump_stats)(struct Qdisc *, struct gnet_dump *); struct module *owner; } ; 180 struct tcf_result { unsigned long class; u32 classid; } ; 186 struct tcf_proto_ops { struct list_head head; char kind[16U]; int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *); int (*init)(struct tcf_proto *); void (*destroy)(struct tcf_proto *); unsigned long int (*get)(struct tcf_proto *, u32 ); void (*put)(struct tcf_proto *, unsigned long); int (*change)(struct net *, struct sk_buff *, struct tcf_proto *, unsigned long, u32 , struct nlattr **, unsigned long *, bool ); int (*delete)(struct tcf_proto *, unsigned long); void (*walk)(struct tcf_proto *, struct tcf_walker *); int (*dump)(struct net *, struct tcf_proto *, unsigned long, struct sk_buff *, struct tcmsg *); struct module *owner; } ; 212 struct tcf_proto { struct tcf_proto *next; void *root; int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *); __be16 protocol; u32 prio; u32 classid; struct Qdisc *q; void *data; const struct tcf_proto_ops *ops; } ; 722 struct qdisc_walker { int stop; int skip; int count; int (*fn)(struct Qdisc *, unsigned long, struct qdisc_walker *); } ; 34 typedef __u32 magic_t; 64 struct __anonstruct_qos_value_t_267 { __u32 value; __u16 bits; } ; 64 typedef struct __anonstruct_qos_value_t_267 qos_value_t; 65 struct qos_info { magic_t magic; qos_value_t baud_rate; qos_value_t max_turn_time; qos_value_t data_size; qos_value_t window_size; qos_value_t additional_bofs; qos_value_t min_turn_time; qos_value_t link_disc_time; qos_value_t power; } ; 93 struct irlap_cb ; 133 struct irda_skb_cb { unsigned int default_qdisc_pad; magic_t magic; __u32 next_speed; __u16 mtt; __u16 xbofs; __u16 next_xbofs; void *context; void (*destructor)(struct sk_buff *); __u16 xbofs_delay; __u8 line; } ; 170 struct __anonstruct_chipio_t_269 { int cfg_base; int sir_base; int fir_base; int mem_base; int sir_ext; int fir_ext; int irq; int irq2; int dma; int dma2; int fifo_size; int irqflags; int direction; int enabled; int suspended; __u32 speed; __u32 new_speed; int dongle_id; } ; 170 typedef struct __anonstruct_chipio_t_269 chipio_t; 185 struct __anonstruct_iobuff_t_270 { int state; int in_frame; __u8 *head; __u8 *data; int len; int truesize; __u16 fcs; struct sk_buff *skb; } ; 185 typedef struct __anonstruct_iobuff_t_270 iobuff_t; 56 struct ali_chip { char *name; int cfg[2U]; unsigned char entr1; unsigned char entr2; unsigned char cid_index; unsigned char cid_value; int (*probe)(struct ali_chip *, chipio_t *); int (*init)(struct ali_chip *, chipio_t *); } ; 144 typedef struct ali_chip ali_chip_t; 145 struct st_fifo_entry { int status; int len; } ; 165 struct st_fifo { struct st_fifo_entry entries[7U]; int pending_bytes; int head; int tail; int len; } ; 173 struct frame_cb { void *start; int len; } ; 178 struct tx_fifo { struct frame_cb queue[7U]; int ptr; int len; int free; void *tail; } ; 186 struct ali_ircc_cb { struct st_fifo st_fifo; struct tx_fifo tx_fifo; struct net_device *netdev; struct irlap_cb *irlap; struct qos_info qos; chipio_t io; iobuff_t tx_buff; iobuff_t rx_buff; dma_addr_t tx_buff_dma; dma_addr_t rx_buff_dma; __u8 ier; __u8 InterruptID; __u8 BusStatus; __u8 LineStatus; unsigned char rcvFramesOverflow; struct timeval stamp; struct timeval now; spinlock_t lock; __u32 new_speed; int index; unsigned char fifo_opti_buf; } ; 1 void * __builtin_memcpy(void *, const void *, unsigned long); 1 long int __builtin_expect(long exp, long c); 33 extern struct module __this_module; 358 extern struct pv_irq_ops pv_irq_ops; 72 void set_bit(long nr, volatile unsigned long *addr); 110 void clear_bit(long nr, volatile unsigned long *addr); 250 int test_and_clear_bit(long nr, volatile unsigned long *addr); 139 int printk(const char *, ...); 388 int sprintf(char *, const char *, ...); 88 void __bad_percpu_size(); 71 void warn_slowpath_null(const char *, const int); 55 void * memset(void *, int, size_t ); 802 unsigned long int arch_local_save_flags(); 155 int arch_irqs_disabled_flags(unsigned long flags); 8 extern int __preempt_count; 20 int preempt_count(); 93 void __raw_spin_lock_init(raw_spinlock_t *, const char *, struct lock_class_key *); 22 void _raw_spin_lock(raw_spinlock_t *); 39 void _raw_spin_unlock(raw_spinlock_t *); 43 void _raw_spin_unlock_irqrestore(raw_spinlock_t *, unsigned long); 290 raw_spinlock_t * spinlock_check(spinlock_t *lock); 301 void spin_lock(spinlock_t *lock); 365 void ldv_spin_lock_59(spinlock_t *lock); 349 void spin_unlock(spinlock_t *lock); 409 void ldv_spin_unlock_60(spinlock_t *lock); 452 void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags); 488 void ldv_spin_unlock_irqrestore_62(spinlock_t *lock, unsigned long flags); 5 void __ldv_spin_lock(spinlock_t *); 8 void ldv___ldv_spin_lock_7(spinlock_t *ldv_func_arg1); 12 void ldv___ldv_spin_lock_15(spinlock_t *ldv_func_arg1); 16 void ldv___ldv_spin_lock_17(spinlock_t *ldv_func_arg1); 20 void ldv___ldv_spin_lock_19(spinlock_t *ldv_func_arg1); 24 void ldv___ldv_spin_lock_22(spinlock_t *ldv_func_arg1); 28 void ldv___ldv_spin_lock_49(spinlock_t *ldv_func_arg1); 32 void ldv___ldv_spin_lock_53(spinlock_t *ldv_func_arg1); 36 void ldv___ldv_spin_lock_61(spinlock_t *ldv_func_arg1); 40 void ldv___ldv_spin_lock_63(spinlock_t *ldv_func_arg1); 44 void ldv___ldv_spin_lock_66(spinlock_t *ldv_func_arg1); 48 void ldv___ldv_spin_lock_69(spinlock_t *ldv_func_arg1); 52 void ldv___ldv_spin_lock_71(spinlock_t *ldv_func_arg1); 76 void ldv_spin_lock_addr_list_lock_of_net_device(); 92 void ldv_spin_lock_dma_spin_lock(); 108 void ldv_spin_lock_lock(); 116 void ldv_spin_lock_lock_of_NOT_ARG_SIGN(); 124 void ldv_spin_lock_lock_of_ali_ircc_cb(); 125 void ldv_spin_unlock_lock_of_ali_ircc_cb(); 140 void ldv_spin_lock_node_size_lock_of_pglist_data(); 156 void ldv_spin_lock_siglock_of_sighand_struct(); 156 void do_gettimeofday(struct timeval *); 138 extern struct resource ioport_resource; 192 struct resource * __request_region(struct resource *, resource_size_t , resource_size_t , const char *, int); 203 void __release_region(struct resource *, resource_size_t , resource_size_t ); 77 extern volatile unsigned long jiffies; 309 void outb(unsigned char value, int port); 309 unsigned char inb(int port); 223 int net_ratelimit(); 837 void * dev_get_drvdata(const struct device *dev); 76 int is_device_dma_capable(struct device *dev); 53 void debug_dma_alloc_coherent(struct device *, size_t , dma_addr_t , void *); 56 void debug_dma_free_coherent(struct device *, size_t , void *, dma_addr_t ); 27 extern struct device x86_dma_fallback_dev; 30 extern struct dma_map_ops *dma_ops; 32 struct dma_map_ops * get_dma_ops(struct device *dev); 103 unsigned long int dma_alloc_coherent_mask(struct device *dev, gfp_t gfp); 115 gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp); 131 void * dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs); 160 void dma_free_attrs(struct device *dev, size_t size, void *vaddr, dma_addr_t bus, struct dma_attrs *attrs); 176 void * dma_zalloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag); 211 bool capable(int); 717 void consume_skb(struct sk_buff *); 1565 unsigned char * skb_put(struct sk_buff *, unsigned int); 1666 void skb_reserve(struct sk_buff *skb, int len); 1783 void skb_reset_mac_header(struct sk_buff *skb); 2016 struct sk_buff * __netdev_alloc_skb(struct net_device *, unsigned int, gfp_t ); 2032 struct sk_buff * netdev_alloc_skb(struct net_device *dev, unsigned int length); 2046 struct sk_buff * dev_alloc_skb(unsigned int length); 2595 void skb_copy_from_linear_data(const struct sk_buff *skb, void *to, const unsigned int len); 2609 void skb_copy_to_linear_data(struct sk_buff *skb, const void *from, const unsigned int len); 8 void __udelay(unsigned long); 10 void __const_udelay(unsigned long); 123 int request_threaded_irq(unsigned int, irqreturn_t (*)(int, void *), irqreturn_t (*)(int, void *), unsigned long, const char *, void *); 128 int request_irq(unsigned int irq___0, irqreturn_t (*handler)(int, void *), unsigned long flags, const char *name, void *dev); 142 void free_irq(unsigned int, void *); 1621 struct netdev_queue * netdev_get_tx_queue(const struct net_device *dev, unsigned int index); 1687 void * netdev_priv(const struct net_device *dev); 1975 void free_netdev(struct net_device *); 2140 void __netif_schedule(struct Qdisc *); 2156 void netif_tx_start_queue(struct netdev_queue *dev_queue); 2167 void netif_start_queue(struct net_device *dev); 2182 void netif_tx_wake_queue(struct netdev_queue *dev_queue); 2195 void netif_wake_queue(struct net_device *dev); 2210 void netif_tx_stop_queue(struct netdev_queue *dev_queue); 2226 void netif_stop_queue(struct net_device *dev); 2596 int netif_rx(struct sk_buff *); 2998 int register_netdev(struct net_device *); 2999 void unregister_netdev(struct net_device *); 192 int __platform_driver_register(struct platform_driver *, struct module *); 194 void platform_driver_unregister(struct platform_driver *); 202 void * platform_get_drvdata(const struct platform_device *pdev); 174 void disable_dma(unsigned int dmanr); 305 int request_dma(unsigned int, const char *); 306 void free_dma(unsigned int); 402 bool qdisc_all_tx_empty(const struct net_device *dev); 59 extern unsigned int irda_debug; 83 void irda_init_max_qos_capabilies(struct qos_info *); 88 void irda_qos_bits_to_value(struct qos_info *); 214 struct irlap_cb * irlap_open(struct net_device *, struct qos_info *, const char *); 216 void irlap_close(struct irlap_cb *); 219 void irda_device_set_media_busy(struct net_device *, int); 224 int irda_device_txqueue_empty(const struct net_device *dev); 229 struct net_device * alloc_irdadev(int); 231 void irda_setup_dma(int, dma_addr_t , int, int); 239 __u16 irda_get_mtt(const struct sk_buff *skb); 252 __u32 irda_get_next_speed(const struct sk_buff *skb); 54 int async_wrap_skb(struct sk_buff *, __u8 *, int); 55 void async_unwrap_char(struct net_device *, struct net_device_stats *, iobuff_t *, __u8 ); 223 void switch_bank(int iobase, int bank); 180 int ali_ircc_suspend(struct platform_device *dev, pm_message_t state); 181 int ali_ircc_resume(struct platform_device *dev); 183 struct platform_driver ali_ircc_driver = { 0, 0, 0, &ali_ircc_suspend, &ali_ircc_resume, { "ali-ircc", 0, &__this_module, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, 0, 0 }; 193 int qos_mtt_bits = 7; 196 unsigned int io[4U] = { 4294967295U, 4294967295U, 4294967295U, 4294967295U }; 197 unsigned int irq[4U] = { 0U, 0U, 0U, 0U }; 198 unsigned int dma[4U] = { 0U, 0U, 0U, 0U }; 200 int ali_ircc_probe_53(ali_chip_t *chip, chipio_t *info); 201 int ali_ircc_init_43(ali_chip_t *chip, chipio_t *info); 202 int ali_ircc_init_53(ali_chip_t *chip, chipio_t *info); 207 ali_chip_t chips[4U] = { { (char *)"M1543", { 1008, 880 }, 81U, 35U, 32U, 67U, &ali_ircc_probe_53, &ali_ircc_init_43 }, { (char *)"M1535", { 1008, 880 }, 81U, 35U, 32U, 83U, &ali_ircc_probe_53, &ali_ircc_init_53 }, { (char *)"M1563", { 1008, 880 }, 81U, 35U, 32U, 99U, &ali_ircc_probe_53, &ali_ircc_init_53 }, { (char *)0, { 0, 0 }, 0U, 0U, 0U, 0U, 0, 0 } }; 216 struct ali_ircc_cb *dev_self[4U] = { (struct ali_ircc_cb *)0, (struct ali_ircc_cb *)0, (struct ali_ircc_cb *)0, (struct ali_ircc_cb *)0 }; 219 char *dongle_types[4U] = { (char *)"TFDS6000", (char *)"HP HSDL-3600", (char *)"HP HSDL-1100", (char *)"No dongle connected" }; 227 int ali_ircc_open(int i, chipio_t *info); 229 int ali_ircc_close(struct ali_ircc_cb *self); 231 int ali_ircc_setup(chipio_t *info); 232 int ali_ircc_is_receiving(struct ali_ircc_cb *self); 233 int ali_ircc_net_open(struct net_device *dev); 234 int ali_ircc_net_close(struct net_device *dev); 235 int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 236 void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud); 239 netdev_tx_t ali_ircc_sir_hard_xmit(struct sk_buff *skb, struct net_device *dev); 241 irqreturn_t ali_ircc_sir_interrupt(struct ali_ircc_cb *self); 242 void ali_ircc_sir_receive(struct ali_ircc_cb *self); 243 void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self); 244 int ali_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len); 245 void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed); 248 netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb, struct net_device *dev); 250 void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud); 251 irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self); 252 int ali_ircc_dma_receive(struct ali_ircc_cb *self); 253 int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self); 254 int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self); 255 void ali_ircc_dma_xmit(struct ali_ircc_cb *self); 258 int ali_ircc_read_dongle_id(int i, chipio_t *info); 259 void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed); 262 void SIR2FIR(int iobase); 263 void FIR2SIR(int iobase); 264 void SetCOMInterrupts(struct ali_ircc_cb *self, unsigned char enable); 272 int ali_ircc_init(); 373 void ali_ircc_cleanup(); 389 const struct net_device_ops ali_ircc_sir_ops = { 0, 0, &ali_ircc_net_open, &ali_ircc_net_close, &ali_ircc_sir_hard_xmit, 0, 0, 0, 0, 0, &ali_ircc_net_ioctl, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; 396 const struct net_device_ops ali_ircc_fir_ops = { 0, 0, &ali_ircc_net_open, &ali_ircc_net_close, &ali_ircc_fir_hard_xmit, 0, 0, 0, 0, 0, &ali_ircc_net_ioctl, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; 794 irqreturn_t ali_ircc_interrupt(int irq___0, void *dev_id); 2413 void ldv_check_final_state(); 2416 void ldv_check_return_value(int); 2422 void ldv_initialize(); 2425 void ldv_handler_precall(); 2428 int nondet_int(); 2431 int LDV_IN_INTERRUPT = 0; 2434 void ldv_main0_sequence_infinite_withcheck_stateful(); 10 void ldv_error(); 25 int ldv_undef_int(); 59 void __builtin_trap(); 8 int ldv_spin_NOT_ARG_SIGN = 0; 11 void ldv_spin_lock_NOT_ARG_SIGN(); 20 void ldv_spin_unlock_NOT_ARG_SIGN(); 29 int ldv_spin_trylock_NOT_ARG_SIGN(); 55 void ldv_spin_unlock_wait_NOT_ARG_SIGN(); 62 int ldv_spin_is_locked_NOT_ARG_SIGN(); 83 int ldv_spin_can_lock_NOT_ARG_SIGN(); 90 int ldv_spin_is_contended_NOT_ARG_SIGN(); 111 int ldv_atomic_dec_and_lock_NOT_ARG_SIGN(); 133 int ldv_spin__xmit_lock_of_netdev_queue = 0; 136 void ldv_spin_lock__xmit_lock_of_netdev_queue(); 145 void ldv_spin_unlock__xmit_lock_of_netdev_queue(); 154 int ldv_spin_trylock__xmit_lock_of_netdev_queue(); 180 void ldv_spin_unlock_wait__xmit_lock_of_netdev_queue(); 187 int ldv_spin_is_locked__xmit_lock_of_netdev_queue(); 208 int ldv_spin_can_lock__xmit_lock_of_netdev_queue(); 215 int ldv_spin_is_contended__xmit_lock_of_netdev_queue(); 236 int ldv_atomic_dec_and_lock__xmit_lock_of_netdev_queue(); 258 int ldv_spin_addr_list_lock_of_net_device = 0; 270 void ldv_spin_unlock_addr_list_lock_of_net_device(); 279 int ldv_spin_trylock_addr_list_lock_of_net_device(); 305 void ldv_spin_unlock_wait_addr_list_lock_of_net_device(); 312 int ldv_spin_is_locked_addr_list_lock_of_net_device(); 333 int ldv_spin_can_lock_addr_list_lock_of_net_device(); 340 int ldv_spin_is_contended_addr_list_lock_of_net_device(); 361 int ldv_atomic_dec_and_lock_addr_list_lock_of_net_device(); 383 int ldv_spin_alloc_lock_of_task_struct = 0; 386 void ldv_spin_lock_alloc_lock_of_task_struct(); 395 void ldv_spin_unlock_alloc_lock_of_task_struct(); 404 int ldv_spin_trylock_alloc_lock_of_task_struct(); 430 void ldv_spin_unlock_wait_alloc_lock_of_task_struct(); 437 int ldv_spin_is_locked_alloc_lock_of_task_struct(); 458 int ldv_spin_can_lock_alloc_lock_of_task_struct(); 465 int ldv_spin_is_contended_alloc_lock_of_task_struct(); 486 int ldv_atomic_dec_and_lock_alloc_lock_of_task_struct(); 508 int ldv_spin_dma_spin_lock = 0; 520 void ldv_spin_unlock_dma_spin_lock(); 529 int ldv_spin_trylock_dma_spin_lock(); 555 void ldv_spin_unlock_wait_dma_spin_lock(); 562 int ldv_spin_is_locked_dma_spin_lock(); 583 int ldv_spin_can_lock_dma_spin_lock(); 590 int ldv_spin_is_contended_dma_spin_lock(); 611 int ldv_atomic_dec_and_lock_dma_spin_lock(); 633 int ldv_spin_i_lock_of_inode = 0; 636 void ldv_spin_lock_i_lock_of_inode(); 645 void ldv_spin_unlock_i_lock_of_inode(); 654 int ldv_spin_trylock_i_lock_of_inode(); 680 void ldv_spin_unlock_wait_i_lock_of_inode(); 687 int ldv_spin_is_locked_i_lock_of_inode(); 708 int ldv_spin_can_lock_i_lock_of_inode(); 715 int ldv_spin_is_contended_i_lock_of_inode(); 736 int ldv_atomic_dec_and_lock_i_lock_of_inode(); 758 int ldv_spin_lock = 0; 770 void ldv_spin_unlock_lock(); 779 int ldv_spin_trylock_lock(); 805 void ldv_spin_unlock_wait_lock(); 812 int ldv_spin_is_locked_lock(); 833 int ldv_spin_can_lock_lock(); 840 int ldv_spin_is_contended_lock(); 861 int ldv_atomic_dec_and_lock_lock(); 883 int ldv_spin_lock_of_NOT_ARG_SIGN = 0; 895 void ldv_spin_unlock_lock_of_NOT_ARG_SIGN(); 904 int ldv_spin_trylock_lock_of_NOT_ARG_SIGN(); 930 void ldv_spin_unlock_wait_lock_of_NOT_ARG_SIGN(); 937 int ldv_spin_is_locked_lock_of_NOT_ARG_SIGN(); 958 int ldv_spin_can_lock_lock_of_NOT_ARG_SIGN(); 965 int ldv_spin_is_contended_lock_of_NOT_ARG_SIGN(); 986 int ldv_atomic_dec_and_lock_lock_of_NOT_ARG_SIGN(); 1008 int ldv_spin_lock_of_ali_ircc_cb = 0; 1029 int ldv_spin_trylock_lock_of_ali_ircc_cb(); 1055 void ldv_spin_unlock_wait_lock_of_ali_ircc_cb(); 1062 int ldv_spin_is_locked_lock_of_ali_ircc_cb(); 1083 int ldv_spin_can_lock_lock_of_ali_ircc_cb(); 1090 int ldv_spin_is_contended_lock_of_ali_ircc_cb(); 1111 int ldv_atomic_dec_and_lock_lock_of_ali_ircc_cb(); 1133 int ldv_spin_lru_lock_of_netns_frags = 0; 1136 void ldv_spin_lock_lru_lock_of_netns_frags(); 1145 void ldv_spin_unlock_lru_lock_of_netns_frags(); 1154 int ldv_spin_trylock_lru_lock_of_netns_frags(); 1180 void ldv_spin_unlock_wait_lru_lock_of_netns_frags(); 1187 int ldv_spin_is_locked_lru_lock_of_netns_frags(); 1208 int ldv_spin_can_lock_lru_lock_of_netns_frags(); 1215 int ldv_spin_is_contended_lru_lock_of_netns_frags(); 1236 int ldv_atomic_dec_and_lock_lru_lock_of_netns_frags(); 1258 int ldv_spin_node_size_lock_of_pglist_data = 0; 1270 void ldv_spin_unlock_node_size_lock_of_pglist_data(); 1279 int ldv_spin_trylock_node_size_lock_of_pglist_data(); 1305 void ldv_spin_unlock_wait_node_size_lock_of_pglist_data(); 1312 int ldv_spin_is_locked_node_size_lock_of_pglist_data(); 1333 int ldv_spin_can_lock_node_size_lock_of_pglist_data(); 1340 int ldv_spin_is_contended_node_size_lock_of_pglist_data(); 1361 int ldv_atomic_dec_and_lock_node_size_lock_of_pglist_data(); 1383 int ldv_spin_ptl = 0; 1386 void ldv_spin_lock_ptl(); 1395 void ldv_spin_unlock_ptl(); 1404 int ldv_spin_trylock_ptl(); 1430 void ldv_spin_unlock_wait_ptl(); 1437 int ldv_spin_is_locked_ptl(); 1458 int ldv_spin_can_lock_ptl(); 1465 int ldv_spin_is_contended_ptl(); 1486 int ldv_atomic_dec_and_lock_ptl(); 1508 int ldv_spin_siglock_of_sighand_struct = 0; 1520 void ldv_spin_unlock_siglock_of_sighand_struct(); 1529 int ldv_spin_trylock_siglock_of_sighand_struct(); 1555 void ldv_spin_unlock_wait_siglock_of_sighand_struct(); 1562 int ldv_spin_is_locked_siglock_of_sighand_struct(); 1583 int ldv_spin_can_lock_siglock_of_sighand_struct(); 1590 int ldv_spin_is_contended_siglock_of_sighand_struct(); 1611 int ldv_atomic_dec_and_lock_siglock_of_sighand_struct(); 1633 int ldv_spin_tx_global_lock_of_net_device = 0; 1636 void ldv_spin_lock_tx_global_lock_of_net_device(); 1645 void ldv_spin_unlock_tx_global_lock_of_net_device(); 1654 int ldv_spin_trylock_tx_global_lock_of_net_device(); 1680 void ldv_spin_unlock_wait_tx_global_lock_of_net_device(); 1687 int ldv_spin_is_locked_tx_global_lock_of_net_device(); 1708 int ldv_spin_can_lock_tx_global_lock_of_net_device(); 1715 int ldv_spin_is_contended_tx_global_lock_of_net_device(); 1736 int ldv_atomic_dec_and_lock_tx_global_lock_of_net_device(); return ; } { 2436 struct platform_device *var_group1; 2437 pm_message_t var_ali_ircc_suspend_29_p1; 2438 struct net_device *var_group2; 2439 int res_ali_ircc_net_open_19; 2440 int res_ali_ircc_net_close_20; 2441 struct sk_buff *var_group3; 2442 struct ifreq *var_group4; 2443 int var_ali_ircc_net_ioctl_27_p2; 2444 int var_ali_ircc_interrupt_9_p0; 2445 void *var_ali_ircc_interrupt_9_p1; 2446 int ldv_s_ali_ircc_driver_platform_driver; 2447 int ldv_s_ali_ircc_sir_ops_net_device_ops; 2448 int ldv_s_ali_ircc_fir_ops_net_device_ops; 2449 int tmp; 2450 int tmp___0; 2451 int tmp___1; 2566 ldv_s_ali_ircc_driver_platform_driver = 0; 2568 ldv_s_ali_ircc_sir_ops_net_device_ops = 0; 2571 ldv_s_ali_ircc_fir_ops_net_device_ops = 0; 2544 LDV_IN_INTERRUPT = 1; { 1763 ldv_spin_NOT_ARG_SIGN = 1; 1765 ldv_spin__xmit_lock_of_netdev_queue = 1; 1767 ldv_spin_addr_list_lock_of_net_device = 1; 1769 ldv_spin_alloc_lock_of_task_struct = 1; 1771 ldv_spin_dma_spin_lock = 1; 1773 ldv_spin_i_lock_of_inode = 1; 1775 ldv_spin_lock = 1; 1777 ldv_spin_lock_of_NOT_ARG_SIGN = 1; 1779 ldv_spin_lock_of_ali_ircc_cb = 1; 1781 ldv_spin_lru_lock_of_netns_frags = 1; 1783 ldv_spin_node_size_lock_of_pglist_data = 1; 1785 ldv_spin_ptl = 1; 1787 ldv_spin_siglock_of_sighand_struct = 1; 1789 ldv_spin_tx_global_lock_of_net_device = 1; } 2563 ldv_handler_precall() { /* Function call is skipped due to function is undefined */} { 274 ali_chip_t *chip; 275 chipio_t info; 276 int ret; 277 int cfg; 278 int cfg_base; 279 int reg; 280 int revision; 281 int i; 282 int tmp; 283 unsigned char tmp___0; 284 unsigned char tmp___1; 285 int tmp___2; 279 i = 0; 281 printk("\017%s(), ---------------- Start ----------------\n", "ali_ircc_init") { /* Function call is skipped due to function is undefined */} 283 ret = __platform_driver_register(&ali_ircc_driver, &__this_module) { /* Function call is skipped due to function is undefined */} 290 ret = -19; 293 chip = (ali_chip_t *)(&chips); 293 goto ldv_45903; 293 unsigned long __CPAchecker_TMP_4 = (unsigned long)(chip->name); 295 goto ldv_45902; 294 ldv_45902:; 295 printk("\017%s(), Probing for %s ...\n", "ali_ircc_init", chip->name) { /* Function call is skipped due to function is undefined */} 298 cfg = 0; 298 goto ldv_45900; 300 goto ldv_45899; 299 ldv_45899:; 300 cfg_base = (chip->cfg)[cfg]; 304 memset((void *)(&info), 0, 72UL) { /* Function call is skipped due to function is undefined */} 305 info.cfg_base = cfg_base; 306 info.fir_base = (int)(io[i]); 307 info.dma = (int)(dma[i]); 308 info.irq = (int)(irq[i]); 312 int __CPAchecker_TMP_0 = (int)(chip->entr1); { 309 Ignored inline assembler code 310 return ;; } 313 int __CPAchecker_TMP_1 = (int)(chip->entr2); { 309 Ignored inline assembler code 310 return ;; } { 309 Ignored inline assembler code 310 return ;; } { 309 Ignored inline assembler code 310 return ;; } 320 int __CPAchecker_TMP_2 = (int)(chip->cid_index); { 309 Ignored inline assembler code 310 return ;; } { 311 unsigned char value; 309 Ignored inline assembler code 309 return value;; } 321 reg = (int)tmp___0; 323 int __CPAchecker_TMP_3 = (int)(chip->cid_value); 352 printk("\017%s(), No %s chip at 0x%03x\n", "ali_ircc_init", chip->name, cfg_base) { /* Function call is skipped due to function is undefined */} { 309 Ignored inline assembler code 310 return ;; } 356 ldv_45898:; 298 cfg = cfg + 1; 299 ldv_45900:; 300 goto ldv_45899; 299 ldv_45899:; 300 cfg_base = (chip->cfg)[cfg]; 304 memset((void *)(&info), 0, 72UL) { /* Function call is skipped due to function is undefined */} 305 info.cfg_base = cfg_base; 306 info.fir_base = (int)(io[i]); 307 info.dma = (int)(dma[i]); 308 info.irq = (int)(irq[i]); 312 int __CPAchecker_TMP_0 = (int)(chip->entr1); { 309 Ignored inline assembler code 310 return ;; } 313 int __CPAchecker_TMP_1 = (int)(chip->entr2); { 309 Ignored inline assembler code 310 return ;; } { 309 Ignored inline assembler code 310 return ;; } { 309 Ignored inline assembler code 310 return ;; } 320 int __CPAchecker_TMP_2 = (int)(chip->cid_index); { 309 Ignored inline assembler code 310 return ;; } { 311 unsigned char value; 309 Ignored inline assembler code 309 return value;; } 321 reg = (int)tmp___0; 323 int __CPAchecker_TMP_3 = (int)(chip->cid_value); 325 printk("\017%s(), Chip found at 0x%03x\n", "ali_ircc_init", cfg_base) { /* Function call is skipped due to function is undefined */} { 309 Ignored inline assembler code 310 return ;; } { 311 unsigned char value; 309 Ignored inline assembler code 309 return value;; } 328 revision = (int)tmp___1; 329 printk("\017%s(), Found %s chip, revision=%d\n", "ali_ircc_init", chip->name, revision) { /* Function call is skipped due to function is undefined */} 343 (*(chip->probe))(chip, &info); { 411 struct net_device *dev; 412 struct ali_ircc_cb *self; 413 int dongle_id; 414 int err; 415 int tmp; 416 int tmp___0; 417 int tmp___1; 418 void *tmp___2; 419 struct lock_class_key __key; 420 int tmp___3; 421 struct resource *tmp___4; 422 void *tmp___5; 423 void *tmp___6; 424 int tmp___7; 425 int tmp___8; 426 int tmp___9; 427 int tmp___10; 428 int tmp___11; 416 printk("\017%s(), ---------------- Start ----------------\n", "ali_ircc_open") { /* Function call is skipped due to function is undefined */} { 681 unsigned char tmp; 682 int version; 683 int iobase; 684 unsigned char tmp___0; 685 int tmp___1; 686 unsigned char tmp___2; 687 int tmp___3; 683 iobase = info->fir_base; 685 printk("\017%s(), ---------------- Start ----------------\n", "ali_ircc_setup") { /* Function call is skipped due to function is undefined */} { 2339 printk("\017%s(), ---------------- Start ----------------\n", "SIR2FIR") { /* Function call is skipped due to function is undefined */} { 309 Ignored inline assembler code 310 return ;; } { 309 Ignored inline assembler code 310 return ;; } { 309 Ignored inline assembler code 310 return ;; } { 309 Ignored inline assembler code 310 return ;; } { 309 Ignored inline assembler code 310 return ;; } 2355 printk("\017%s(), ----------------- End ------------------\n", "SIR2FIR") { /* Function call is skipped due to function is undefined */} } { 309 Ignored inline assembler code 310 return ;; } { { 309 Ignored inline assembler code 310 return ;; } 226 return ;; } { 311 unsigned char value; 309 Ignored inline assembler code 309 return value;; } 700 version = (int)tmp___0; { { 309 Ignored inline assembler code 310 return ;; } 226 return ;; } { 309 Ignored inline assembler code 310 return ;; } { 309 Ignored inline assembler code 310 return ;; } { { 309 Ignored inline assembler code 310 return ;; } 226 return ;; } { 311 unsigned char value; 309 Ignored inline assembler code 309 return value;; } { 309 Ignored inline assembler code 310 return ;; } { { 309 Ignored inline assembler code 310 return ;; } 226 return ;; } { 311 unsigned char value; 309 Ignored inline assembler code 309 return value;; } 727 tmp = ((unsigned int)tmp) & 223U; 728 tmp = ((unsigned int)tmp) | 128U; 729 tmp = ((unsigned int)tmp) & 191U; { 309 Ignored inline assembler code 310 return ;; } { 309 Ignored inline assembler code 310 return ;; } { 2360 unsigned char val; 2362 printk("\017%s(), ---------------- Start ----------------\n", "FIR2SIR") { /* Function call is skipped due to function is undefined */} { 309 Ignored inline assembler code 310 return ;; } { 309 Ignored inline assembler code 310 return ;; } { 309 Ignored inline assembler code 310 return ;; } { 309 Ignored inline assembler code 310 return ;; } { 309 Ignored inline assembler code 310 return ;; } { 311 unsigned char value; 309 Ignored inline assembler code 309 return value;; } { 311 unsigned char value; 309 Ignored inline assembler code 309 return value;; } { 311 unsigned char value; 309 Ignored inline assembler code 309 return value;; } 2378 printk("\017%s(), ----------------- End ------------------\n", "FIR2SIR") { /* Function call is skipped due to function is undefined */} } 739 tmp___3 = net_ratelimit() { /* Function call is skipped due to function is undefined */} 746 printk("\017%s(), ----------------- End ------------------\n", "ali_ircc_setup") { /* Function call is skipped due to function is undefined */} } 428 dev = alloc_irdadev(608) { /* Function call is skipped due to function is undefined */} 435 self = (struct ali_ircc_cb *)tmp___2; 436 self->netdev = dev; 437 __raw_spin_lock_init(&(self->lock.ldv_6306.rlock), "&(&self->lock)->rlock", &__key) { /* Function call is skipped due to function is undefined */} 440 dev_self[i] = self; 441 self->index = i; 444 self->io.cfg_base = info->cfg_base; 445 self->io.fir_base = info->fir_base; 446 self->io.sir_base = info->sir_base; 447 self->io.irq = info->irq; 448 self->io.fir_ext = 8; 449 self->io.dma = info->dma; 450 self->io.fifo_size = 16; 453 tmp___4 = __request_region(&ioport_resource, (resource_size_t )(self->io.fir_base), (resource_size_t )(self->io.fir_ext), "ali-ircc", 0) { /* Function call is skipped due to function is undefined */} 462 irda_init_max_qos_capabilies(&(self->qos)) { /* Function call is skipped due to function is undefined */} 465 self->qos.baud_rate.bits = 510U; 468 self->qos.min_turn_time.bits = (__u16 )qos_mtt_bits; 470 irda_qos_bits_to_value(&(self->qos)) { /* Function call is skipped due to function is undefined */} 473 self->rx_buff.truesize = 14384; 474 self->tx_buff.truesize = 14384; 477 -dma_zalloc_coherent((struct device *)0, (size_t )(self->rx_buff.truesize), &(self->rx_buff_dma), 208U) { 179 void *ret; 180 void *tmp; { 134 struct dma_map_ops *ops; 135 struct dma_map_ops *tmp; 136 void *memory; 137 int tmp___0; 138 gfp_t tmp___1; { 34 long tmp; 37 assume(tmp != 0L); 38 return dma_ops;; } 134 ops = tmp; 137 gfp = gfp & 4294967288U; 142 assume(((unsigned long)dev) == 0UL); 143 dev = &x86_dma_fallback_dev; { 78 int __CPAchecker_TMP_0; 78 unsigned long __CPAchecker_TMP_1 = (unsigned long)(dev->dma_mask); 78 __CPAchecker_TMP_0 = 1; } 145 assume(!(tmp___0 == 0)); 148 unsigned long __CPAchecker_TMP_0 = (unsigned long)(ops->alloc); 148 assume(!(__CPAchecker_TMP_0 == 0UL)); { 117 unsigned long dma_mask; 118 unsigned long tmp; { 105 unsigned long dma_mask; 106 dma_mask = 0UL; 108 unsigned long __CPAchecker_TMP_0 = (unsigned long)(dev->coherent_dma_mask); 108 dma_mask = __CPAchecker_TMP_0; 109 assume(dma_mask == 0UL); 110 unsigned long int __CPAchecker_TMP_1; 110 assume(!((((int)gfp) & 1) == 0)); 110 __CPAchecker_TMP_1 = 16777215UL; 110 dma_mask = __CPAchecker_TMP_1; 112 return dma_mask;; } 117 dma_mask = tmp; 119 assume(((unsigned long long)dma_mask) <= 16777215ULL); 120 gfp = gfp | 1U; 122 assume(((unsigned long long)dma_mask) <= 4294967295ULL); 122 assume((gfp & 1U) == 0U); 123 gfp = gfp | 4U; 125 return gfp;; } 151 memory = (*(ops->alloc))(dev, size, dma_handle, tmp___1, attrs); 153 debug_dma_alloc_coherent(dev, size, *dma_handle, memory) { /* Function call is skipped due to function is undefined */} 155 return memory;; } 179 ret = tmp; } 477 self->rx_buff.head = (__u8 *)tmp___5; 485 -dma_zalloc_coherent((struct device *)0, (size_t )(self->tx_buff.truesize), &(self->tx_buff_dma), 208U) { 179 void *ret; 180 void *tmp; { 134 struct dma_map_ops *ops; 135 struct dma_map_ops *tmp; 136 void *memory; 137 int tmp___0; 138 gfp_t tmp___1; { 34 long tmp; 37 assume(tmp != 0L); 38 return dma_ops;; } 134 ops = tmp; 137 gfp = gfp & 4294967288U; 142 assume(((unsigned long)dev) == 0UL); 143 dev = &x86_dma_fallback_dev; { 78 int __CPAchecker_TMP_0; 78 unsigned long __CPAchecker_TMP_1 = (unsigned long)(dev->dma_mask); 78 __CPAchecker_TMP_0 = 1; } 145 assume(!(tmp___0 == 0)); 148 unsigned long __CPAchecker_TMP_0 = (unsigned long)(ops->alloc); 148 assume(!(__CPAchecker_TMP_0 == 0UL)); { 117 unsigned long dma_mask; 118 unsigned long tmp; { 105 unsigned long dma_mask; 106 dma_mask = 0UL; 108 unsigned long __CPAchecker_TMP_0 = (unsigned long)(dev->coherent_dma_mask); 108 dma_mask = __CPAchecker_TMP_0; 109 assume(dma_mask == 0UL); 110 unsigned long int __CPAchecker_TMP_1; 110 assume(!((((int)gfp) & 1) == 0)); 110 __CPAchecker_TMP_1 = 16777215UL; 110 dma_mask = __CPAchecker_TMP_1; 112 return dma_mask;; } 117 dma_mask = tmp; 119 assume(((unsigned long long)dma_mask) <= 16777215ULL); 120 gfp = gfp | 1U; 122 assume(((unsigned long long)dma_mask) <= 4294967295ULL); 122 assume((gfp & 1U) == 0U); 123 gfp = gfp | 4U; 125 return gfp;; } 151 memory = (*(ops->alloc))(dev, size, dma_handle, tmp___1, attrs); 153 debug_dma_alloc_coherent(dev, size, *dma_handle, memory) { /* Function call is skipped due to function is undefined */} 155 return memory;; } 179 ret = tmp; } 485 self->tx_buff.head = (__u8 *)tmp___6; 493 self->rx_buff.in_frame = 0; 494 self->rx_buff.state = 0; 495 self->tx_buff.data = self->tx_buff.head; 496 self->rx_buff.data = self->rx_buff.head; 499 tmp___8 = 0; 499 self->tx_fifo.free = tmp___8; 499 tmp___7 = tmp___8; 499 self->tx_fifo.ptr = tmp___7; 499 self->tx_fifo.len = tmp___7; 500 self->tx_fifo.tail = (void *)(self->tx_buff.head); 503 dev->netdev_ops = &ali_ircc_sir_ops; 505 err = register_netdev(dev) { /* Function call is skipped due to function is undefined */} 510 tmp___10 = net_ratelimit() { /* Function call is skipped due to function is undefined */} { 760 int dongle_id; 761 int reg; 762 int cfg_base; 763 unsigned char tmp; 761 cfg_base = info->cfg_base; 763 printk("\017%s(), ---------------- Start ----------------\n", "ali_ircc_read_dongle_id") { /* Function call is skipped due to function is undefined */} { 309 Ignored inline assembler code 310 return ;; } { 309 Ignored inline assembler code 310 return ;; } { 309 Ignored inline assembler code 310 return ;; } { 309 Ignored inline assembler code 310 return ;; } { 309 Ignored inline assembler code 310 return ;; } { 311 unsigned char value; 309 Ignored inline assembler code 309 return value;; } 775 reg = (int)tmp; 776 dongle_id = ((reg >> 6) & 2) | ((reg >> 5) & 1); 777 printk("\017%s(), probing dongle_id=%d, dongle_types=%s\n", "ali_ircc_read_dongle_id", dongle_id, dongle_types[dongle_id]) { /* Function call is skipped due to function is undefined */} { 309 Ignored inline assembler code 310 return ;; } 783 printk("\017%s(), ----------------- End ------------------\n", "ali_ircc_read_dongle_id") { /* Function call is skipped due to function is undefined */} } 514 tmp___11 = net_ratelimit() { /* Function call is skipped due to function is undefined */} 517 self->io.dongle_id = dongle_id; 519 printk("\017%s(), ----------------- End -----------------\n", "ali_ircc_open") { /* Function call is skipped due to function is undefined */} } 347 ret = 0; 348 i = i + 1; { 309 Ignored inline assembler code 310 return ;; } 356 ldv_45898:; 298 cfg = cfg + 1; 299 ldv_45900:; 293 chip = chip + 1; 293 i = i + 1; 294 ldv_45903:; 293 unsigned long __CPAchecker_TMP_4 = (unsigned long)(chip->name); 359 printk("\017%s(), ----------------- End -----------------\n", "ali_ircc_init") { /* Function call is skipped due to function is undefined */} } 2577 goto ldv_46302; 2577 tmp___1 = nondet_int() { /* Function call is skipped due to function is undefined */} 2582 goto ldv_46301; 2578 ldv_46301:; 2583 tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */} 2583 switch (tmp___0) 2810 LDV_IN_INTERRUPT = 2; 2819 ldv_handler_precall() { /* Function call is skipped due to function is undefined */} { } 796 struct net_device *dev; 797 struct ali_ircc_cb *self; 798 int ret; 799 void *tmp; 800 irqreturn_t tmp___0; 801 irqreturn_t tmp___1; 796 dev = (struct net_device *)dev_id; 800 printk("\017%s(), ---------------- Start ----------------\n", "ali_ircc_interrupt") { /* Function call is skipped due to function is undefined */} 802 self = (struct ali_ircc_cb *)tmp; { { } 303 _raw_spin_lock(&(lock->ldv_6306.rlock)) { /* Function call is skipped due to function is undefined */} } { } 947 int iobase; 948 int iir; 949 int lsr; 950 unsigned char tmp; 951 unsigned char tmp___0; 950 printk("\017%s(), ---------------- Start ----------------\n", "ali_ircc_sir_interrupt") { /* Function call is skipped due to function is undefined */} 952 iobase = self->io.sir_base; { 311 unsigned char value; 309 Ignored inline assembler code 309 return value;; } 954 iir = ((int)tmp) & 14; { 311 unsigned char value; 309 Ignored inline assembler code 309 return value;; } 957 lsr = (int)tmp___0; 959 printk("\017%s(), iir=%02x, lsr=%02x, iobase=%#x\n", "ali_ircc_sir_interrupt", iir, lsr, iobase) { /* Function call is skipped due to function is undefined */} 962 switch (iir) { } 1035 int actual; 1036 int iobase; 1037 unsigned char tmp; 1035 actual = 0; 1040 printk("\017%s(), ---------------- Start ----------------\n", "ali_ircc_sir_write_wakeup") { /* Function call is skipped due to function is undefined */} 1042 iobase = self->io.sir_base; 1058 goto ldv_46013; { 311 unsigned char value; 309 Ignored inline assembler code 309 return value;; } 1061 printk("\017%s(), Changing speed! self->new_speed = %d\n", "ali_ircc_sir_write_wakeup", self->new_speed) { /* Function call is skipped due to function is undefined */} { } 1091 struct net_device *dev; 1092 int iobase; 1091 dev = self->netdev; 1094 printk("\017%s(), ---------------- Start ----------------\n", "ali_ircc_change_speed") { /* Function call is skipped due to function is undefined */} 1096 printk("\017%s(), setting speed = %d\n", "ali_ircc_change_speed", baud) { /* Function call is skipped due to function is undefined */} 1101 iobase = self->io.fir_base; { 2286 unsigned char newMask; 2287 int iobase; 2289 iobase = self->io.fir_base; 2291 printk("\017%s(), -------- Start -------- ( Enable = %d )\n", "SetCOMInterrupts", (int)enable) { /* Function call is skipped due to function is undefined */} 2319 newMask = 0U; { 309 Ignored inline assembler code 310 return ;; } 2332 printk("\017%s(), ----------------- End ------------------\n", "SetCOMInterrupts") { /* Function call is skipped due to function is undefined */} } { } 1177 struct ali_ircc_cb *self; 1178 unsigned long flags; 1179 int iobase; 1180 int fcr; 1181 int lcr; 1182 int divisor; 1177 self = priv; 1184 printk("\017%s(), ---------------- Start ----------------\n", "ali_ircc_sir_change_speed") { /* Function call is skipped due to function is undefined */} 1186 printk("\017%s(), Setting speed to: %d\n", "ali_ircc_sir_change_speed", speed) { /* Function call is skipped due to function is undefined */} 1190 iobase = self->io.sir_base; { 311 unsigned char value; 309 Ignored inline assembler code 309 return value;; } { 311 unsigned char value; 309 Ignored inline assembler code 309 return value;; } 1207 self->io.speed = speed; { } { }} | Source code
1 #ifndef _ASM_X86_DMA_MAPPING_H
2 #define _ASM_X86_DMA_MAPPING_H
3
4 /*
5 * IOMMU interface. See Documentation/DMA-API-HOWTO.txt and
6 * Documentation/DMA-API.txt for documentation.
7 */
8
9 #include <linux/kmemcheck.h>
10 #include <linux/scatterlist.h>
11 #include <linux/dma-debug.h>
12 #include <linux/dma-attrs.h>
13 #include <asm/io.h>
14 #include <asm/swiotlb.h>
15 #include <asm-generic/dma-coherent.h>
16 #include <linux/dma-contiguous.h>
17
18 #ifdef CONFIG_ISA
19 # define ISA_DMA_BIT_MASK DMA_BIT_MASK(24)
20 #else
21 # define ISA_DMA_BIT_MASK DMA_BIT_MASK(32)
22 #endif
23
24 #define DMA_ERROR_CODE 0
25
26 extern int iommu_merge;
27 extern struct device x86_dma_fallback_dev;
28 extern int panic_on_overflow;
29
30 extern struct dma_map_ops *dma_ops;
31
32 static inline struct dma_map_ops *get_dma_ops(struct device *dev)
33 {
34 #ifndef CONFIG_X86_DEV_DMA_OPS
35 return dma_ops;
36 #else
37 if (unlikely(!dev) || !dev->archdata.dma_ops)
38 return dma_ops;
39 else
40 return dev->archdata.dma_ops;
41 #endif
42 }
43
44 #include <asm-generic/dma-mapping-common.h>
45
46 /* Make sure we keep the same behaviour */
47 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
48 {
49 struct dma_map_ops *ops = get_dma_ops(dev);
50 debug_dma_mapping_error(dev, dma_addr);
51 if (ops->mapping_error)
52 return ops->mapping_error(dev, dma_addr);
53
54 return (dma_addr == DMA_ERROR_CODE);
55 }
56
57 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
58 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
59
60 extern int dma_supported(struct device *hwdev, u64 mask);
61 extern int dma_set_mask(struct device *dev, u64 mask);
62
63 extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
64 dma_addr_t *dma_addr, gfp_t flag,
65 struct dma_attrs *attrs);
66
67 extern void dma_generic_free_coherent(struct device *dev, size_t size,
68 void *vaddr, dma_addr_t dma_addr,
69 struct dma_attrs *attrs);
70
71 #ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */
72 extern bool dma_capable(struct device *dev, dma_addr_t addr, size_t size);
73 extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
74 extern phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
75 #else
76
77 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
78 {
79 if (!dev->dma_mask)
80 return 0;
81
82 return addr + size - 1 <= *dev->dma_mask;
83 }
84
85 static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
86 {
87 return paddr;
88 }
89
90 static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
91 {
92 return daddr;
93 }
94 #endif /* CONFIG_X86_DMA_REMAP */
95
96 static inline void
97 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
98 enum dma_data_direction dir)
99 {
100 flush_write_buffers();
101 }
102
103 static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
104 gfp_t gfp)
105 {
106 unsigned long dma_mask = 0;
107
108 dma_mask = dev->coherent_dma_mask;
109 if (!dma_mask)
110 dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32);
111
112 return dma_mask;
113 }
114
115 static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
116 {
117 unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
118
119 if (dma_mask <= DMA_BIT_MASK(24))
120 gfp |= GFP_DMA;
121 #ifdef CONFIG_X86_64
122 if (dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
123 gfp |= GFP_DMA32;
124 #endif
125 return gfp;
126 }
127
128 #define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
129
130 static inline void *
131 dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
132 gfp_t gfp, struct dma_attrs *attrs)
133 {
134 struct dma_map_ops *ops = get_dma_ops(dev);
135 void *memory;
136
137 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
138
139 if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
140 return memory;
141
142 if (!dev)
143 dev = &x86_dma_fallback_dev;
144
145 if (!is_device_dma_capable(dev))
146 return NULL;
147
148 if (!ops->alloc)
149 return NULL;
150
151 memory = ops->alloc(dev, size, dma_handle,
152 dma_alloc_coherent_gfp_flags(dev, gfp), attrs);
153 debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
154
155 return memory;
156 }
157
158 #define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
159
160 static inline void dma_free_attrs(struct device *dev, size_t size,
161 void *vaddr, dma_addr_t bus,
162 struct dma_attrs *attrs)
163 {
164 struct dma_map_ops *ops = get_dma_ops(dev);
165
166 WARN_ON(irqs_disabled()); /* for portability */
167
168 if (dma_release_from_coherent(dev, get_order(size), vaddr))
169 return;
170
171 debug_dma_free_coherent(dev, size, vaddr, bus);
172 if (ops->free)
173 ops->free(dev, size, vaddr, bus, attrs);
174 }
175
176 #endif 1 #ifndef _ASM_X86_IO_H
2 #define _ASM_X86_IO_H
3
4 /*
5 * This file contains the definitions for the x86 IO instructions
6 * inb/inw/inl/outb/outw/outl and the "string versions" of the same
7 * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
8 * versions of the single-IO instructions (inb_p/inw_p/..).
9 *
10 * This file is not meant to be obfuscating: it's just complicated
11 * to (a) handle it all in a way that makes gcc able to optimize it
12 * as well as possible and (b) trying to avoid writing the same thing
13 * over and over again with slight variations and possibly making a
14 * mistake somewhere.
15 */
16
17 /*
18 * Thanks to James van Artsdalen for a better timing-fix than
19 * the two short jumps: using outb's to a nonexistent port seems
20 * to guarantee better timings even on fast machines.
21 *
22 * On the other hand, I'd like to be sure of a non-existent port:
23 * I feel a bit unsafe about using 0x80 (should be safe, though)
24 *
25 * Linus
26 */
27
28 /*
29 * Bit simplified and optimized by Jan Hubicka
30 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
31 *
32 * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
33 * isa_read[wl] and isa_write[wl] fixed
34 * - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
35 */
36
37 #define ARCH_HAS_IOREMAP_WC
38
39 #include <linux/string.h>
40 #include <linux/compiler.h>
41 #include <asm/page.h>
42 #include <asm/early_ioremap.h>
43
44 #define build_mmio_read(name, size, type, reg, barrier) \
45 static inline type name(const volatile void __iomem *addr) \
46 { type ret; asm volatile("mov" size " %1,%0":reg (ret) \
47 :"m" (*(volatile type __force *)addr) barrier); return ret; }
48
49 #define build_mmio_write(name, size, type, reg, barrier) \
50 static inline void name(type val, volatile void __iomem *addr) \
51 { asm volatile("mov" size " %0,%1": :reg (val), \
52 "m" (*(volatile type __force *)addr) barrier); }
53
54 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
55 build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
56 build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
57
58 build_mmio_read(__readb, "b", unsigned char, "=q", )
59 build_mmio_read(__readw, "w", unsigned short, "=r", )
60 build_mmio_read(__readl, "l", unsigned int, "=r", )
61
62 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
63 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
64 build_mmio_write(writel, "l", unsigned int, "r", :"memory")
65
66 build_mmio_write(__writeb, "b", unsigned char, "q", )
67 build_mmio_write(__writew, "w", unsigned short, "r", )
68 build_mmio_write(__writel, "l", unsigned int, "r", )
69
70 #define readb_relaxed(a) __readb(a)
71 #define readw_relaxed(a) __readw(a)
72 #define readl_relaxed(a) __readl(a)
73 #define __raw_readb __readb
74 #define __raw_readw __readw
75 #define __raw_readl __readl
76
77 #define __raw_writeb __writeb
78 #define __raw_writew __writew
79 #define __raw_writel __writel
80
81 #define mmiowb() barrier()
82
83 #ifdef CONFIG_X86_64
84
85 build_mmio_read(readq, "q", unsigned long, "=r", :"memory")
86 build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
87
88 #define readq_relaxed(a) readq(a)
89
90 #define __raw_readq(a) readq(a)
91 #define __raw_writeq(val, addr) writeq(val, addr)
92
93 /* Let people know that we have them */
94 #define readq readq
95 #define writeq writeq
96
97 #endif
98
99 /**
100 * virt_to_phys - map virtual addresses to physical
101 * @address: address to remap
102 *
103 * The returned physical address is the physical (CPU) mapping for
104 * the memory address given. It is only valid to use this function on
105 * addresses directly mapped or allocated via kmalloc.
106 *
107 * This function does not give bus mappings for DMA transfers. In
108 * almost all conceivable cases a device driver should not be using
109 * this function
110 */
111
112 static inline phys_addr_t virt_to_phys(volatile void *address)
113 {
114 return __pa(address);
115 }
116
117 /**
118 * phys_to_virt - map physical address to virtual
119 * @address: address to remap
120 *
121 * The returned virtual address is a current CPU mapping for
122 * the memory address given. It is only valid to use this function on
123 * addresses that have a kernel mapping
124 *
125 * This function does not handle bus mappings for DMA transfers. In
126 * almost all conceivable cases a device driver should not be using
127 * this function
128 */
129
130 static inline void *phys_to_virt(phys_addr_t address)
131 {
132 return __va(address);
133 }
134
135 /*
136 * Change "struct page" to physical address.
137 */
138 #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
139
140 /*
141 * ISA I/O bus memory addresses are 1:1 with the physical address.
142 * However, we truncate the address to unsigned int to avoid undesirable
143 * promitions in legacy drivers.
144 */
145 static inline unsigned int isa_virt_to_bus(volatile void *address)
146 {
147 return (unsigned int)virt_to_phys(address);
148 }
149 #define isa_page_to_bus(page) ((unsigned int)page_to_phys(page))
150 #define isa_bus_to_virt phys_to_virt
151
152 /*
153 * However PCI ones are not necessarily 1:1 and therefore these interfaces
154 * are forbidden in portable PCI drivers.
155 *
156 * Allow them on x86 for legacy drivers, though.
157 */
158 #define virt_to_bus virt_to_phys
159 #define bus_to_virt phys_to_virt
160
161 /**
162 * ioremap - map bus memory into CPU space
163 * @offset: bus address of the memory
164 * @size: size of the resource to map
165 *
166 * ioremap performs a platform specific sequence of operations to
167 * make bus memory CPU accessible via the readb/readw/readl/writeb/
168 * writew/writel functions and the other mmio helpers. The returned
169 * address is not guaranteed to be usable directly as a virtual
170 * address.
171 *
172 * If the area you are trying to map is a PCI BAR you should have a
173 * look at pci_iomap().
174 */
175 extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
176 extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
177 extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
178 unsigned long prot_val);
179
180 /*
181 * The default ioremap() behavior is non-cached:
182 */
183 static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
184 {
185 return ioremap_nocache(offset, size);
186 }
187
188 extern void iounmap(volatile void __iomem *addr);
189
190 extern void set_iounmap_nonlazy(void);
191
192 #ifdef __KERNEL__
193
194 #include <asm-generic/iomap.h>
195
196 #include <linux/vmalloc.h>
197
198 /*
199 * Convert a virtual cached pointer to an uncached pointer
200 */
201 #define xlate_dev_kmem_ptr(p) p
202
203 static inline void
204 memset_io(volatile void __iomem *addr, unsigned char val, size_t count)
205 {
206 memset((void __force *)addr, val, count);
207 }
208
209 static inline void
210 memcpy_fromio(void *dst, const volatile void __iomem *src, size_t count)
211 {
212 memcpy(dst, (const void __force *)src, count);
213 }
214
215 static inline void
216 memcpy_toio(volatile void __iomem *dst, const void *src, size_t count)
217 {
218 memcpy((void __force *)dst, src, count);
219 }
220
221 /*
222 * ISA space is 'always mapped' on a typical x86 system, no need to
223 * explicitly ioremap() it. The fact that the ISA IO space is mapped
224 * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
225 * are physical addresses. The following constant pointer can be
226 * used as the IO-area pointer (it can be iounmapped as well, so the
227 * analogy with PCI is quite large):
228 */
229 #define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))
230
231 /*
232 * Cache management
233 *
234 * This needed for two cases
235 * 1. Out of order aware processors
236 * 2. Accidentally out of order processors (PPro errata #51)
237 */
238
239 static inline void flush_write_buffers(void)
240 {
241 #if defined(CONFIG_X86_PPRO_FENCE)
242 asm volatile("lock; addl $0,0(%%esp)": : :"memory");
243 #endif
244 }
245
246 #endif /* __KERNEL__ */
247
248 extern void native_io_delay(void);
249
250 extern int io_delay_type;
251 extern void io_delay_init(void);
252
253 #if defined(CONFIG_PARAVIRT)
254 #include <asm/paravirt.h>
255 #else
256
257 static inline void slow_down_io(void)
258 {
259 native_io_delay();
260 #ifdef REALLY_SLOW_IO
261 native_io_delay();
262 native_io_delay();
263 native_io_delay();
264 #endif
265 }
266
267 #endif
268
269 #define BUILDIO(bwl, bw, type) \
270 static inline void out##bwl(unsigned type value, int port) \
271 { \
272 asm volatile("out" #bwl " %" #bw "0, %w1" \
273 : : "a"(value), "Nd"(port)); \
274 } \
275 \
276 static inline unsigned type in##bwl(int port) \
277 { \
278 unsigned type value; \
279 asm volatile("in" #bwl " %w1, %" #bw "0" \
280 : "=a"(value) : "Nd"(port)); \
281 return value; \
282 } \
283 \
284 static inline void out##bwl##_p(unsigned type value, int port) \
285 { \
286 out##bwl(value, port); \
287 slow_down_io(); \
288 } \
289 \
290 static inline unsigned type in##bwl##_p(int port) \
291 { \
292 unsigned type value = in##bwl(port); \
293 slow_down_io(); \
294 return value; \
295 } \
296 \
297 static inline void outs##bwl(int port, const void *addr, unsigned long count) \
298 { \
299 asm volatile("rep; outs" #bwl \
300 : "+S"(addr), "+c"(count) : "d"(port)); \
301 } \
302 \
303 static inline void ins##bwl(int port, void *addr, unsigned long count) \
304 { \
305 asm volatile("rep; ins" #bwl \
306 : "+D"(addr), "+c"(count) : "d"(port)); \
307 }
308
309 BUILDIO(b, b, char)
310 BUILDIO(w, w, short)
311 BUILDIO(l, , int)
312
313 extern void *xlate_dev_mem_ptr(unsigned long phys);
314 extern void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
315
316 extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
317 unsigned long prot_val);
318 extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size);
319
320 extern bool is_early_ioremap_ptep(pte_t *ptep);
321
322 #ifdef CONFIG_XEN
323 #include <xen/xen.h>
324 struct bio_vec;
325
326 extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
327 const struct bio_vec *vec2);
328
329 #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
330 (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \
331 (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2)))
332 #endif /* CONFIG_XEN */
333
334 #define IO_SPACE_LIMIT 0xffff
335
336 #ifdef CONFIG_MTRR
337 extern int __must_check arch_phys_wc_add(unsigned long base,
338 unsigned long size);
339 extern void arch_phys_wc_del(int handle);
340 #define arch_phys_wc_add arch_phys_wc_add
341 #endif
342
343 #endif /* _ASM_X86_IO_H */ 1
2 #include <linux/kernel.h>
3 #include <linux/spinlock.h>
4
5 extern void __ldv_spin_lock(spinlock_t *lock);
6 extern void __ldv_spin_unlock(spinlock_t *lock);
7 extern int __ldv_spin_trylock(spinlock_t *lock);
8 extern void __ldv_spin_unlock_wait(spinlock_t *lock);
9 extern void __ldv_spin_can_lock(spinlock_t *lock);
10 extern int __ldv_atomic_dec_and_lock(spinlock_t *lock);
11
12 extern void ldv_spin_lock_NOT_ARG_SIGN(void);
13 extern void ldv_spin_unlock_NOT_ARG_SIGN(void);
14 extern int ldv_spin_trylock_NOT_ARG_SIGN(void);
15 extern void ldv_spin_unlock_wait_NOT_ARG_SIGN(void);
16 extern int ldv_spin_is_locked_NOT_ARG_SIGN(void);
17 extern int ldv_spin_can_lock_NOT_ARG_SIGN(void);
18 extern int ldv_spin_is_contended_NOT_ARG_SIGN(void);
19 extern int ldv_atomic_dec_and_lock_NOT_ARG_SIGN(void);
20 extern void ldv_spin_lock__xmit_lock_of_netdev_queue(void);
21 extern void ldv_spin_unlock__xmit_lock_of_netdev_queue(void);
22 extern int ldv_spin_trylock__xmit_lock_of_netdev_queue(void);
23 extern void ldv_spin_unlock_wait__xmit_lock_of_netdev_queue(void);
24 extern int ldv_spin_is_locked__xmit_lock_of_netdev_queue(void);
25 extern int ldv_spin_can_lock__xmit_lock_of_netdev_queue(void);
26 extern int ldv_spin_is_contended__xmit_lock_of_netdev_queue(void);
27 extern int ldv_atomic_dec_and_lock__xmit_lock_of_netdev_queue(void);
28 extern void ldv_spin_lock_addr_list_lock_of_net_device(void);
29 extern void ldv_spin_unlock_addr_list_lock_of_net_device(void);
30 extern int ldv_spin_trylock_addr_list_lock_of_net_device(void);
31 extern void ldv_spin_unlock_wait_addr_list_lock_of_net_device(void);
32 extern int ldv_spin_is_locked_addr_list_lock_of_net_device(void);
33 extern int ldv_spin_can_lock_addr_list_lock_of_net_device(void);
34 extern int ldv_spin_is_contended_addr_list_lock_of_net_device(void);
35 extern int ldv_atomic_dec_and_lock_addr_list_lock_of_net_device(void);
36 extern void ldv_spin_lock_alloc_lock_of_task_struct(void);
37 extern void ldv_spin_unlock_alloc_lock_of_task_struct(void);
38 extern int ldv_spin_trylock_alloc_lock_of_task_struct(void);
39 extern void ldv_spin_unlock_wait_alloc_lock_of_task_struct(void);
40 extern int ldv_spin_is_locked_alloc_lock_of_task_struct(void);
41 extern int ldv_spin_can_lock_alloc_lock_of_task_struct(void);
42 extern int ldv_spin_is_contended_alloc_lock_of_task_struct(void);
43 extern int ldv_atomic_dec_and_lock_alloc_lock_of_task_struct(void);
44 extern void ldv_spin_lock_dma_spin_lock(void);
45 extern void ldv_spin_unlock_dma_spin_lock(void);
46 extern int ldv_spin_trylock_dma_spin_lock(void);
47 extern void ldv_spin_unlock_wait_dma_spin_lock(void);
48 extern int ldv_spin_is_locked_dma_spin_lock(void);
49 extern int ldv_spin_can_lock_dma_spin_lock(void);
50 extern int ldv_spin_is_contended_dma_spin_lock(void);
51 extern int ldv_atomic_dec_and_lock_dma_spin_lock(void);
52 extern void ldv_spin_lock_i_lock_of_inode(void);
53 extern void ldv_spin_unlock_i_lock_of_inode(void);
54 extern int ldv_spin_trylock_i_lock_of_inode(void);
55 extern void ldv_spin_unlock_wait_i_lock_of_inode(void);
56 extern int ldv_spin_is_locked_i_lock_of_inode(void);
57 extern int ldv_spin_can_lock_i_lock_of_inode(void);
58 extern int ldv_spin_is_contended_i_lock_of_inode(void);
59 extern int ldv_atomic_dec_and_lock_i_lock_of_inode(void);
60 extern void ldv_spin_lock_lock(void);
61 extern void ldv_spin_unlock_lock(void);
62 extern int ldv_spin_trylock_lock(void);
63 extern void ldv_spin_unlock_wait_lock(void);
64 extern int ldv_spin_is_locked_lock(void);
65 extern int ldv_spin_can_lock_lock(void);
66 extern int ldv_spin_is_contended_lock(void);
67 extern int ldv_atomic_dec_and_lock_lock(void);
68 extern void ldv_spin_lock_lock_of_NOT_ARG_SIGN(void);
69 extern void ldv_spin_unlock_lock_of_NOT_ARG_SIGN(void);
70 extern int ldv_spin_trylock_lock_of_NOT_ARG_SIGN(void);
71 extern void ldv_spin_unlock_wait_lock_of_NOT_ARG_SIGN(void);
72 extern int ldv_spin_is_locked_lock_of_NOT_ARG_SIGN(void);
73 extern int ldv_spin_can_lock_lock_of_NOT_ARG_SIGN(void);
74 extern int ldv_spin_is_contended_lock_of_NOT_ARG_SIGN(void);
75 extern int ldv_atomic_dec_and_lock_lock_of_NOT_ARG_SIGN(void);
76 extern void ldv_spin_lock_lock_of_ali_ircc_cb(void);
77 extern void ldv_spin_unlock_lock_of_ali_ircc_cb(void);
78 extern int ldv_spin_trylock_lock_of_ali_ircc_cb(void);
79 extern void ldv_spin_unlock_wait_lock_of_ali_ircc_cb(void);
80 extern int ldv_spin_is_locked_lock_of_ali_ircc_cb(void);
81 extern int ldv_spin_can_lock_lock_of_ali_ircc_cb(void);
82 extern int ldv_spin_is_contended_lock_of_ali_ircc_cb(void);
83 extern int ldv_atomic_dec_and_lock_lock_of_ali_ircc_cb(void);
84 extern void ldv_spin_lock_lru_lock_of_netns_frags(void);
85 extern void ldv_spin_unlock_lru_lock_of_netns_frags(void);
86 extern int ldv_spin_trylock_lru_lock_of_netns_frags(void);
87 extern void ldv_spin_unlock_wait_lru_lock_of_netns_frags(void);
88 extern int ldv_spin_is_locked_lru_lock_of_netns_frags(void);
89 extern int ldv_spin_can_lock_lru_lock_of_netns_frags(void);
90 extern int ldv_spin_is_contended_lru_lock_of_netns_frags(void);
91 extern int ldv_atomic_dec_and_lock_lru_lock_of_netns_frags(void);
92 extern void ldv_spin_lock_node_size_lock_of_pglist_data(void);
93 extern void ldv_spin_unlock_node_size_lock_of_pglist_data(void);
94 extern int ldv_spin_trylock_node_size_lock_of_pglist_data(void);
95 extern void ldv_spin_unlock_wait_node_size_lock_of_pglist_data(void);
96 extern int ldv_spin_is_locked_node_size_lock_of_pglist_data(void);
97 extern int ldv_spin_can_lock_node_size_lock_of_pglist_data(void);
98 extern int ldv_spin_is_contended_node_size_lock_of_pglist_data(void);
99 extern int ldv_atomic_dec_and_lock_node_size_lock_of_pglist_data(void);
100 extern void ldv_spin_lock_ptl(void);
101 extern void ldv_spin_unlock_ptl(void);
102 extern int ldv_spin_trylock_ptl(void);
103 extern void ldv_spin_unlock_wait_ptl(void);
104 extern int ldv_spin_is_locked_ptl(void);
105 extern int ldv_spin_can_lock_ptl(void);
106 extern int ldv_spin_is_contended_ptl(void);
107 extern int ldv_atomic_dec_and_lock_ptl(void);
108 extern void ldv_spin_lock_siglock_of_sighand_struct(void);
109 extern void ldv_spin_unlock_siglock_of_sighand_struct(void);
110 extern int ldv_spin_trylock_siglock_of_sighand_struct(void);
111 extern void ldv_spin_unlock_wait_siglock_of_sighand_struct(void);
112 extern int ldv_spin_is_locked_siglock_of_sighand_struct(void);
113 extern int ldv_spin_can_lock_siglock_of_sighand_struct(void);
114 extern int ldv_spin_is_contended_siglock_of_sighand_struct(void);
115 extern int ldv_atomic_dec_and_lock_siglock_of_sighand_struct(void);
116 extern void ldv_spin_lock_tx_global_lock_of_net_device(void);
117 extern void ldv_spin_unlock_tx_global_lock_of_net_device(void);
118 extern int ldv_spin_trylock_tx_global_lock_of_net_device(void);
119 extern void ldv_spin_unlock_wait_tx_global_lock_of_net_device(void);
120 extern int ldv_spin_is_locked_tx_global_lock_of_net_device(void);
121 extern int ldv_spin_can_lock_tx_global_lock_of_net_device(void);
122 extern int ldv_spin_is_contended_tx_global_lock_of_net_device(void);
123 extern int ldv_atomic_dec_and_lock_tx_global_lock_of_net_device(void);
124
125 /*********************************************************************
126 *
127 * Filename: ali-ircc.h
128 * Version: 0.5
129 * Description: Driver for the ALI M1535D and M1543C FIR Controller
130 * Status: Experimental.
131 * Author: Benjamin Kong <benjamin_kong@ali.com.tw>
132 * Created at: 2000/10/16 03:46PM
133 * Modified at: 2001/1/3 02:55PM
134 * Modified by: Benjamin Kong <benjamin_kong@ali.com.tw>
135 * Modified at: 2003/11/6 and support for ALi south-bridge chipsets M1563
136 * Modified by: Clear Zhang <clear_zhang@ali.com.tw>
137 *
138 * Copyright (c) 2000 Benjamin Kong <benjamin_kong@ali.com.tw>
139 * All Rights Reserved
140 *
141 * This program is free software; you can redistribute it and/or
142 * modify it under the terms of the GNU General Public License as
143 * published by the Free Software Foundation; either version 2 of
144 * the License, or (at your option) any later version.
145 *
146 ********************************************************************/
147
148 #include <linux/module.h>
149 #include <linux/gfp.h>
150
151 #include <linux/kernel.h>
152 #include <linux/types.h>
153 #include <linux/skbuff.h>
154 #include <linux/netdevice.h>
155 #include <linux/ioport.h>
156 #include <linux/delay.h>
157 #include <linux/init.h>
158 #include <linux/interrupt.h>
159 #include <linux/rtnetlink.h>
160 #include <linux/serial_reg.h>
161 #include <linux/dma-mapping.h>
162 #include <linux/platform_device.h>
163
164 #include <asm/io.h>
165 #include <asm/dma.h>
166 #include <asm/byteorder.h>
167
168 #include <net/irda/wrapper.h>
169 #include <net/irda/irda.h>
170 #include <net/irda/irda_device.h>
171
172 #include "ali-ircc.h"
173
174 #define CHIP_IO_EXTENT 8
175 #define BROKEN_DONGLE_ID
176
177 #define ALI_IRCC_DRIVER_NAME "ali-ircc"
178
179 /* Power Management */
180 static int ali_ircc_suspend(struct platform_device *dev, pm_message_t state);
181 static int ali_ircc_resume(struct platform_device *dev);
182
183 static struct platform_driver ali_ircc_driver = {
184 .suspend = ali_ircc_suspend,
185 .resume = ali_ircc_resume,
186 .driver = {
187 .name = ALI_IRCC_DRIVER_NAME,
188 .owner = THIS_MODULE,
189 },
190 };
191
192 /* Module parameters */
193 static int qos_mtt_bits = 0x07; /* 1 ms or more */
194
195 /* Use BIOS settions by default, but user may supply module parameters */
196 static unsigned int io[] = { ~0, ~0, ~0, ~0 };
197 static unsigned int irq[] = { 0, 0, 0, 0 };
198 static unsigned int dma[] = { 0, 0, 0, 0 };
199
200 static int ali_ircc_probe_53(ali_chip_t *chip, chipio_t *info);
201 static int ali_ircc_init_43(ali_chip_t *chip, chipio_t *info);
202 static int ali_ircc_init_53(ali_chip_t *chip, chipio_t *info);
203
204 /* These are the currently known ALi south-bridge chipsets, the only one difference
205 * is that M1543C doesn't support HP HDSL-3600
206 */
207 static ali_chip_t chips[] =
208 {
209 { "M1543", { 0x3f0, 0x370 }, 0x51, 0x23, 0x20, 0x43, ali_ircc_probe_53, ali_ircc_init_43 },
210 { "M1535", { 0x3f0, 0x370 }, 0x51, 0x23, 0x20, 0x53, ali_ircc_probe_53, ali_ircc_init_53 },
211 { "M1563", { 0x3f0, 0x370 }, 0x51, 0x23, 0x20, 0x63, ali_ircc_probe_53, ali_ircc_init_53 },
212 { NULL }
213 };
214
215 /* Max 4 instances for now */
216 static struct ali_ircc_cb *dev_self[] = { NULL, NULL, NULL, NULL };
217
218 /* Dongle Types */
219 static char *dongle_types[] = {
220 "TFDS6000",
221 "HP HSDL-3600",
222 "HP HSDL-1100",
223 "No dongle connected",
224 };
225
226 /* Some prototypes */
227 static int ali_ircc_open(int i, chipio_t *info);
228
229 static int ali_ircc_close(struct ali_ircc_cb *self);
230
231 static int ali_ircc_setup(chipio_t *info);
232 static int ali_ircc_is_receiving(struct ali_ircc_cb *self);
233 static int ali_ircc_net_open(struct net_device *dev);
234 static int ali_ircc_net_close(struct net_device *dev);
235 static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
236 static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud);
237
238 /* SIR function */
239 static netdev_tx_t ali_ircc_sir_hard_xmit(struct sk_buff *skb,
240 struct net_device *dev);
241 static irqreturn_t ali_ircc_sir_interrupt(struct ali_ircc_cb *self);
242 static void ali_ircc_sir_receive(struct ali_ircc_cb *self);
243 static void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self);
244 static int ali_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len);
245 static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed);
246
247 /* FIR function */
248 static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb,
249 struct net_device *dev);
250 static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 speed);
251 static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self);
252 static int ali_ircc_dma_receive(struct ali_ircc_cb *self);
253 static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self);
254 static int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self);
255 static void ali_ircc_dma_xmit(struct ali_ircc_cb *self);
256
257 /* My Function */
258 static int ali_ircc_read_dongle_id (int i, chipio_t *info);
259 static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed);
260
261 /* ALi chip function */
262 static void SIR2FIR(int iobase);
263 static void FIR2SIR(int iobase);
264 static void SetCOMInterrupts(struct ali_ircc_cb *self , unsigned char enable);
265
266 /*
267 * Function ali_ircc_init ()
268 *
269 * Initialize chip. Find out whay kinds of chips we are dealing with
270 * and their configuration registers address
271 */
272 static int __init ali_ircc_init(void)
273 {
274 ali_chip_t *chip;
275 chipio_t info;
276 int ret;
277 int cfg, cfg_base;
278 int reg, revision;
279 int i = 0;
280
281 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
282
283 ret = platform_driver_register(&ali_ircc_driver);
284 if (ret) {
285 IRDA_ERROR("%s, Can't register driver!\n",
286 ALI_IRCC_DRIVER_NAME);
287 return ret;
288 }
289
290 ret = -ENODEV;
291
292 /* Probe for all the ALi chipsets we know about */
293 for (chip= chips; chip->name; chip++, i++)
294 {
295 IRDA_DEBUG(2, "%s(), Probing for %s ...\n", __func__, chip->name);
296
297 /* Try all config registers for this chip */
298 for (cfg=0; cfg<2; cfg++)
299 {
300 cfg_base = chip->cfg[cfg];
301 if (!cfg_base)
302 continue;
303
304 memset(&info, 0, sizeof(chipio_t));
305 info.cfg_base = cfg_base;
306 info.fir_base = io[i];
307 info.dma = dma[i];
308 info.irq = irq[i];
309
310
311 /* Enter Configuration */
312 outb(chip->entr1, cfg_base);
313 outb(chip->entr2, cfg_base);
314
315 /* Select Logical Device 5 Registers (UART2) */
316 outb(0x07, cfg_base);
317 outb(0x05, cfg_base+1);
318
319 /* Read Chip Identification Register */
320 outb(chip->cid_index, cfg_base);
321 reg = inb(cfg_base+1);
322
323 if (reg == chip->cid_value)
324 {
325 IRDA_DEBUG(2, "%s(), Chip found at 0x%03x\n", __func__, cfg_base);
326
327 outb(0x1F, cfg_base);
328 revision = inb(cfg_base+1);
329 IRDA_DEBUG(2, "%s(), Found %s chip, revision=%d\n", __func__,
330 chip->name, revision);
331
332 /*
333 * If the user supplies the base address, then
334 * we init the chip, if not we probe the values
335 * set by the BIOS
336 */
337 if (io[i] < 2000)
338 {
339 chip->init(chip, &info);
340 }
341 else
342 {
343 chip->probe(chip, &info);
344 }
345
346 if (ali_ircc_open(i, &info) == 0)
347 ret = 0;
348 i++;
349 }
350 else
351 {
352 IRDA_DEBUG(2, "%s(), No %s chip at 0x%03x\n", __func__, chip->name, cfg_base);
353 }
354 /* Exit configuration */
355 outb(0xbb, cfg_base);
356 }
357 }
358
359 IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__);
360
361 if (ret)
362 platform_driver_unregister(&ali_ircc_driver);
363
364 return ret;
365 }
366
367 /*
368 * Function ali_ircc_cleanup ()
369 *
370 * Close all configured chips
371 *
372 */
373 static void __exit ali_ircc_cleanup(void)
374 {
375 int i;
376
377 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
378
379 for (i=0; i < ARRAY_SIZE(dev_self); i++) {
380 if (dev_self[i])
381 ali_ircc_close(dev_self[i]);
382 }
383
384 platform_driver_unregister(&ali_ircc_driver);
385
386 IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__);
387 }
388
389 static const struct net_device_ops ali_ircc_sir_ops = {
390 .ndo_open = ali_ircc_net_open,
391 .ndo_stop = ali_ircc_net_close,
392 .ndo_start_xmit = ali_ircc_sir_hard_xmit,
393 .ndo_do_ioctl = ali_ircc_net_ioctl,
394 };
395
396 static const struct net_device_ops ali_ircc_fir_ops = {
397 .ndo_open = ali_ircc_net_open,
398 .ndo_stop = ali_ircc_net_close,
399 .ndo_start_xmit = ali_ircc_fir_hard_xmit,
400 .ndo_do_ioctl = ali_ircc_net_ioctl,
401 };
402
403 /*
404 * Function ali_ircc_open (int i, chipio_t *inf)
405 *
406 * Open driver instance
407 *
408 */
409 static int ali_ircc_open(int i, chipio_t *info)
410 {
411 struct net_device *dev;
412 struct ali_ircc_cb *self;
413 int dongle_id;
414 int err;
415
416 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
417
418 if (i >= ARRAY_SIZE(dev_self)) {
419 IRDA_ERROR("%s(), maximum number of supported chips reached!\n",
420 __func__);
421 return -ENOMEM;
422 }
423
424 /* Set FIR FIFO and DMA Threshold */
425 if ((ali_ircc_setup(info)) == -1)
426 return -1;
427
428 dev = alloc_irdadev(sizeof(*self));
429 if (dev == NULL) {
430 IRDA_ERROR("%s(), can't allocate memory for control block!\n",
431 __func__);
432 return -ENOMEM;
433 }
434
435 self = netdev_priv(dev);
436 self->netdev = dev;
437 spin_lock_init(&self->lock);
438
439 /* Need to store self somewhere */
440 dev_self[i] = self;
441 self->index = i;
442
443 /* Initialize IO */
444 self->io.cfg_base = info->cfg_base; /* In ali_ircc_probe_53 assign */
445 self->io.fir_base = info->fir_base; /* info->sir_base = info->fir_base */
446 self->io.sir_base = info->sir_base; /* ALi SIR and FIR use the same address */
447 self->io.irq = info->irq;
448 self->io.fir_ext = CHIP_IO_EXTENT;
449 self->io.dma = info->dma;
450 self->io.fifo_size = 16; /* SIR: 16, FIR: 32 Benjamin 2000/11/1 */
451
452 /* Reserve the ioports that we need */
453 if (!request_region(self->io.fir_base, self->io.fir_ext,
454 ALI_IRCC_DRIVER_NAME)) {
455 IRDA_WARNING("%s(), can't get iobase of 0x%03x\n", __func__,
456 self->io.fir_base);
457 err = -ENODEV;
458 goto err_out1;
459 }
460
461 /* Initialize QoS for this device */
462 irda_init_max_qos_capabilies(&self->qos);
463
464 /* The only value we must override it the baudrate */
465 self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
466 IR_115200|IR_576000|IR_1152000|(IR_4000000 << 8); // benjamin 2000/11/8 05:27PM
467
468 self->qos.min_turn_time.bits = qos_mtt_bits;
469
470 irda_qos_bits_to_value(&self->qos);
471
472 /* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
473 self->rx_buff.truesize = 14384;
474 self->tx_buff.truesize = 14384;
475
476 /* Allocate memory if needed */
477 self->rx_buff.head =
478 dma_zalloc_coherent(NULL, self->rx_buff.truesize,
479 &self->rx_buff_dma, GFP_KERNEL);
480 if (self->rx_buff.head == NULL) {
481 err = -ENOMEM;
482 goto err_out2;
483 }
484
485 self->tx_buff.head =
486 dma_zalloc_coherent(NULL, self->tx_buff.truesize,
487 &self->tx_buff_dma, GFP_KERNEL);
488 if (self->tx_buff.head == NULL) {
489 err = -ENOMEM;
490 goto err_out3;
491 }
492
493 self->rx_buff.in_frame = FALSE;
494 self->rx_buff.state = OUTSIDE_FRAME;
495 self->tx_buff.data = self->tx_buff.head;
496 self->rx_buff.data = self->rx_buff.head;
497
498 /* Reset Tx queue info */
499 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
500 self->tx_fifo.tail = self->tx_buff.head;
501
502 /* Override the network functions we need to use */
503 dev->netdev_ops = &ali_ircc_sir_ops;
504
505 err = register_netdev(dev);
506 if (err) {
507 IRDA_ERROR("%s(), register_netdev() failed!\n", __func__);
508 goto err_out4;
509 }
510 IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name);
511
512 /* Check dongle id */
513 dongle_id = ali_ircc_read_dongle_id(i, info);
514 IRDA_MESSAGE("%s(), %s, Found dongle: %s\n", __func__,
515 ALI_IRCC_DRIVER_NAME, dongle_types[dongle_id]);
516
517 self->io.dongle_id = dongle_id;
518
519 IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__);
520
521 return 0;
522
523 err_out4:
524 dma_free_coherent(NULL, self->tx_buff.truesize,
525 self->tx_buff.head, self->tx_buff_dma);
526 err_out3:
527 dma_free_coherent(NULL, self->rx_buff.truesize,
528 self->rx_buff.head, self->rx_buff_dma);
529 err_out2:
530 release_region(self->io.fir_base, self->io.fir_ext);
531 err_out1:
532 dev_self[i] = NULL;
533 free_netdev(dev);
534 return err;
535 }
536
537
538 /*
539 * Function ali_ircc_close (self)
540 *
541 * Close driver instance
542 *
543 */
544 static int __exit ali_ircc_close(struct ali_ircc_cb *self)
545 {
546 int iobase;
547
548 IRDA_DEBUG(4, "%s(), ---------------- Start ----------------\n", __func__);
549
550 IRDA_ASSERT(self != NULL, return -1;);
551
552 iobase = self->io.fir_base;
553
554 /* Remove netdevice */
555 unregister_netdev(self->netdev);
556
557 /* Release the PORT that this driver is using */
558 IRDA_DEBUG(4, "%s(), Releasing Region %03x\n", __func__, self->io.fir_base);
559 release_region(self->io.fir_base, self->io.fir_ext);
560
561 if (self->tx_buff.head)
562 dma_free_coherent(NULL, self->tx_buff.truesize,
563 self->tx_buff.head, self->tx_buff_dma);
564
565 if (self->rx_buff.head)
566 dma_free_coherent(NULL, self->rx_buff.truesize,
567 self->rx_buff.head, self->rx_buff_dma);
568
569 dev_self[self->index] = NULL;
570 free_netdev(self->netdev);
571
572 IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__);
573
574 return 0;
575 }
576
577 /*
578 * Function ali_ircc_init_43 (chip, info)
579 *
580 * Initialize the ALi M1543 chip.
581 */
582 static int ali_ircc_init_43(ali_chip_t *chip, chipio_t *info)
583 {
584 /* All controller information like I/O address, DMA channel, IRQ
585 * are set by BIOS
586 */
587
588 return 0;
589 }
590
591 /*
592 * Function ali_ircc_init_53 (chip, info)
593 *
594 * Initialize the ALi M1535 chip.
595 */
596 static int ali_ircc_init_53(ali_chip_t *chip, chipio_t *info)
597 {
598 /* All controller information like I/O address, DMA channel, IRQ
599 * are set by BIOS
600 */
601
602 return 0;
603 }
604
605 /*
606 * Function ali_ircc_probe_53 (chip, info)
607 *
608 * Probes for the ALi M1535D or M1535
609 */
610 static int ali_ircc_probe_53(ali_chip_t *chip, chipio_t *info)
611 {
612 int cfg_base = info->cfg_base;
613 int hi, low, reg;
614
615 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
616
617 /* Enter Configuration */
618 outb(chip->entr1, cfg_base);
619 outb(chip->entr2, cfg_base);
620
621 /* Select Logical Device 5 Registers (UART2) */
622 outb(0x07, cfg_base);
623 outb(0x05, cfg_base+1);
624
625 /* Read address control register */
626 outb(0x60, cfg_base);
627 hi = inb(cfg_base+1);
628 outb(0x61, cfg_base);
629 low = inb(cfg_base+1);
630 info->fir_base = (hi<<8) + low;
631
632 info->sir_base = info->fir_base;
633
634 IRDA_DEBUG(2, "%s(), probing fir_base=0x%03x\n", __func__, info->fir_base);
635
636 /* Read IRQ control register */
637 outb(0x70, cfg_base);
638 reg = inb(cfg_base+1);
639 info->irq = reg & 0x0f;
640 IRDA_DEBUG(2, "%s(), probing irq=%d\n", __func__, info->irq);
641
642 /* Read DMA channel */
643 outb(0x74, cfg_base);
644 reg = inb(cfg_base+1);
645 info->dma = reg & 0x07;
646
647 if(info->dma == 0x04)
648 IRDA_WARNING("%s(), No DMA channel assigned !\n", __func__);
649 else
650 IRDA_DEBUG(2, "%s(), probing dma=%d\n", __func__, info->dma);
651
652 /* Read Enabled Status */
653 outb(0x30, cfg_base);
654 reg = inb(cfg_base+1);
655 info->enabled = (reg & 0x80) && (reg & 0x01);
656 IRDA_DEBUG(2, "%s(), probing enabled=%d\n", __func__, info->enabled);
657
658 /* Read Power Status */
659 outb(0x22, cfg_base);
660 reg = inb(cfg_base+1);
661 info->suspended = (reg & 0x20);
662 IRDA_DEBUG(2, "%s(), probing suspended=%d\n", __func__, info->suspended);
663
664 /* Exit configuration */
665 outb(0xbb, cfg_base);
666
667 IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__);
668
669 return 0;
670 }
671
672 /*
673 * Function ali_ircc_setup (info)
674 *
675 * Set FIR FIFO and DMA Threshold
676 * Returns non-negative on success.
677 *
678 */
679 static int ali_ircc_setup(chipio_t *info)
680 {
681 unsigned char tmp;
682 int version;
683 int iobase = info->fir_base;
684
685 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
686
687 /* Locking comments :
688 * Most operations here need to be protected. We are called before
689 * the device instance is created in ali_ircc_open(), therefore
690 * nobody can bother us - Jean II */
691
692 /* Switch to FIR space */
693 SIR2FIR(iobase);
694
695 /* Master Reset */
696 outb(0x40, iobase+FIR_MCR); // benjamin 2000/11/30 11:45AM
697
698 /* Read FIR ID Version Register */
699 switch_bank(iobase, BANK3);
700 version = inb(iobase+FIR_ID_VR);
701
702 /* Should be 0x00 in the M1535/M1535D */
703 if(version != 0x00)
704 {
705 IRDA_ERROR("%s, Wrong chip version %02x\n",
706 ALI_IRCC_DRIVER_NAME, version);
707 return -1;
708 }
709
710 /* Set FIR FIFO Threshold Register */
711 switch_bank(iobase, BANK1);
712 outb(RX_FIFO_Threshold, iobase+FIR_FIFO_TR);
713
714 /* Set FIR DMA Threshold Register */
715 outb(RX_DMA_Threshold, iobase+FIR_DMA_TR);
716
717 /* CRC enable */
718 switch_bank(iobase, BANK2);
719 outb(inb(iobase+FIR_IRDA_CR) | IRDA_CR_CRC, iobase+FIR_IRDA_CR);
720
721 /* NDIS driver set TX Length here BANK2 Alias 3, Alias4*/
722
723 /* Switch to Bank 0 */
724 switch_bank(iobase, BANK0);
725
726 tmp = inb(iobase+FIR_LCR_B);
727 tmp &=~0x20; // disable SIP
728 tmp |= 0x80; // these two steps make RX mode
729 tmp &= 0xbf;
730 outb(tmp, iobase+FIR_LCR_B);
731
732 /* Disable Interrupt */
733 outb(0x00, iobase+FIR_IER);
734
735
736 /* Switch to SIR space */
737 FIR2SIR(iobase);
738
739 IRDA_MESSAGE("%s, driver loaded (Benjamin Kong)\n",
740 ALI_IRCC_DRIVER_NAME);
741
742 /* Enable receive interrupts */
743 // outb(UART_IER_RDI, iobase+UART_IER); //benjamin 2000/11/23 01:25PM
744 // Turn on the interrupts in ali_ircc_net_open
745
746 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__);
747
748 return 0;
749 }
750
751 /*
752 * Function ali_ircc_read_dongle_id (int index, info)
753 *
754 * Try to read dongle identification. This procedure needs to be executed
755 * once after power-on/reset. It also needs to be used whenever you suspect
756 * that the user may have plugged/unplugged the IrDA Dongle.
757 */
758 static int ali_ircc_read_dongle_id (int i, chipio_t *info)
759 {
760 int dongle_id, reg;
761 int cfg_base = info->cfg_base;
762
763 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
764
765 /* Enter Configuration */
766 outb(chips[i].entr1, cfg_base);
767 outb(chips[i].entr2, cfg_base);
768
769 /* Select Logical Device 5 Registers (UART2) */
770 outb(0x07, cfg_base);
771 outb(0x05, cfg_base+1);
772
773 /* Read Dongle ID */
774 outb(0xf0, cfg_base);
775 reg = inb(cfg_base+1);
776 dongle_id = ((reg>>6)&0x02) | ((reg>>5)&0x01);
777 IRDA_DEBUG(2, "%s(), probing dongle_id=%d, dongle_types=%s\n", __func__,
778 dongle_id, dongle_types[dongle_id]);
779
780 /* Exit configuration */
781 outb(0xbb, cfg_base);
782
783 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__);
784
785 return dongle_id;
786 }
787
788 /*
789 * Function ali_ircc_interrupt (irq, dev_id, regs)
790 *
791 * An interrupt from the chip has arrived. Time to do some work
792 *
793 */
794 static irqreturn_t ali_ircc_interrupt(int irq, void *dev_id)
795 {
796 struct net_device *dev = dev_id;
797 struct ali_ircc_cb *self;
798 int ret;
799
800 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
801
802 self = netdev_priv(dev);
803
804 spin_lock(&self->lock);
805
806 /* Dispatch interrupt handler for the current speed */
807 if (self->io.speed > 115200)
808 ret = ali_ircc_fir_interrupt(self);
809 else
810 ret = ali_ircc_sir_interrupt(self);
811
812 spin_unlock(&self->lock);
813
814 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__);
815 return ret;
816 }
817 /*
818 * Function ali_ircc_fir_interrupt(irq, struct ali_ircc_cb *self)
819 *
820 * Handle MIR/FIR interrupt
821 *
822 */
823 static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self)
824 {
825 __u8 eir, OldMessageCount;
826 int iobase, tmp;
827
828 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__);
829
830 iobase = self->io.fir_base;
831
832 switch_bank(iobase, BANK0);
833 self->InterruptID = inb(iobase+FIR_IIR);
834 self->BusStatus = inb(iobase+FIR_BSR);
835
836 OldMessageCount = (self->LineStatus + 1) & 0x07;
837 self->LineStatus = inb(iobase+FIR_LSR);
838 //self->ier = inb(iobase+FIR_IER); 2000/12/1 04:32PM
839 eir = self->InterruptID & self->ier; /* Mask out the interesting ones */
840
841 IRDA_DEBUG(1, "%s(), self->InterruptID = %x\n", __func__,self->InterruptID);
842 IRDA_DEBUG(1, "%s(), self->LineStatus = %x\n", __func__,self->LineStatus);
843 IRDA_DEBUG(1, "%s(), self->ier = %x\n", __func__,self->ier);
844 IRDA_DEBUG(1, "%s(), eir = %x\n", __func__,eir);
845
846 /* Disable interrupts */
847 SetCOMInterrupts(self, FALSE);
848
849 /* Tx or Rx Interrupt */
850
851 if (eir & IIR_EOM)
852 {
853 if (self->io.direction == IO_XMIT) /* TX */
854 {
855 IRDA_DEBUG(1, "%s(), ******* IIR_EOM (Tx) *******\n", __func__);
856
857 if(ali_ircc_dma_xmit_complete(self))
858 {
859 if (irda_device_txqueue_empty(self->netdev))
860 {
861 /* Prepare for receive */
862 ali_ircc_dma_receive(self);
863 self->ier = IER_EOM;
864 }
865 }
866 else
867 {
868 self->ier = IER_EOM;
869 }
870
871 }
872 else /* RX */
873 {
874 IRDA_DEBUG(1, "%s(), ******* IIR_EOM (Rx) *******\n", __func__);
875
876 if(OldMessageCount > ((self->LineStatus+1) & 0x07))
877 {
878 self->rcvFramesOverflow = TRUE;
879 IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE ********\n", __func__);
880 }
881
882 if (ali_ircc_dma_receive_complete(self))
883 {
884 IRDA_DEBUG(1, "%s(), ******* receive complete ********\n", __func__);
885
886 self->ier = IER_EOM;
887 }
888 else
889 {
890 IRDA_DEBUG(1, "%s(), ******* Not receive complete ********\n", __func__);
891
892 self->ier = IER_EOM | IER_TIMER;
893 }
894
895 }
896 }
897 /* Timer Interrupt */
898 else if (eir & IIR_TIMER)
899 {
900 if(OldMessageCount > ((self->LineStatus+1) & 0x07))
901 {
902 self->rcvFramesOverflow = TRUE;
903 IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE *******\n", __func__);
904 }
905 /* Disable Timer */
906 switch_bank(iobase, BANK1);
907 tmp = inb(iobase+FIR_CR);
908 outb( tmp& ~CR_TIMER_EN, iobase+FIR_CR);
909
910 /* Check if this is a Tx timer interrupt */
911 if (self->io.direction == IO_XMIT)
912 {
913 ali_ircc_dma_xmit(self);
914
915 /* Interrupt on EOM */
916 self->ier = IER_EOM;
917
918 }
919 else /* Rx */
920 {
921 if(ali_ircc_dma_receive_complete(self))
922 {
923 self->ier = IER_EOM;
924 }
925 else
926 {
927 self->ier = IER_EOM | IER_TIMER;
928 }
929 }
930 }
931
932 /* Restore Interrupt */
933 SetCOMInterrupts(self, TRUE);
934
935 IRDA_DEBUG(1, "%s(), ----------------- End ---------------\n", __func__);
936 return IRQ_RETVAL(eir);
937 }
938
939 /*
940 * Function ali_ircc_sir_interrupt (irq, self, eir)
941 *
942 * Handle SIR interrupt
943 *
944 */
945 static irqreturn_t ali_ircc_sir_interrupt(struct ali_ircc_cb *self)
946 {
947 int iobase;
948 int iir, lsr;
949
950 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
951
952 iobase = self->io.sir_base;
953
954 iir = inb(iobase+UART_IIR) & UART_IIR_ID;
955 if (iir) {
956 /* Clear interrupt */
957 lsr = inb(iobase+UART_LSR);
958
959 IRDA_DEBUG(4, "%s(), iir=%02x, lsr=%02x, iobase=%#x\n", __func__,
960 iir, lsr, iobase);
961
962 switch (iir)
963 {
964 case UART_IIR_RLSI:
965 IRDA_DEBUG(2, "%s(), RLSI\n", __func__);
966 break;
967 case UART_IIR_RDI:
968 /* Receive interrupt */
969 ali_ircc_sir_receive(self);
970 break;
971 case UART_IIR_THRI:
972 if (lsr & UART_LSR_THRE)
973 {
974 /* Transmitter ready for data */
975 ali_ircc_sir_write_wakeup(self);
976 }
977 break;
978 default:
979 IRDA_DEBUG(0, "%s(), unhandled IIR=%#x\n", __func__, iir);
980 break;
981 }
982
983 }
984
985
986 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__);
987
988 return IRQ_RETVAL(iir);
989 }
990
991
992 /*
993 * Function ali_ircc_sir_receive (self)
994 *
995 * Receive one frame from the infrared port
996 *
997 */
998 static void ali_ircc_sir_receive(struct ali_ircc_cb *self)
999 {
1000 int boguscount = 0;
1001 int iobase;
1002
1003 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
1004 IRDA_ASSERT(self != NULL, return;);
1005
1006 iobase = self->io.sir_base;
1007
1008 /*
1009 * Receive all characters in Rx FIFO, unwrap and unstuff them.
1010 * async_unwrap_char will deliver all found frames
1011 */
1012 do {
1013 async_unwrap_char(self->netdev, &self->netdev->stats, &self->rx_buff,
1014 inb(iobase+UART_RX));
1015
1016 /* Make sure we don't stay here too long */
1017 if (boguscount++ > 32) {
1018 IRDA_DEBUG(2,"%s(), breaking!\n", __func__);
1019 break;
1020 }
1021 } while (inb(iobase+UART_LSR) & UART_LSR_DR);
1022
1023 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
1024 }
1025
1026 /*
1027 * Function ali_ircc_sir_write_wakeup (tty)
1028 *
1029 * Called by the driver when there's room for more data. If we have
1030 * more packets to send, we send them here.
1031 *
1032 */
1033 static void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self)
1034 {
1035 int actual = 0;
1036 int iobase;
1037
1038 IRDA_ASSERT(self != NULL, return;);
1039
1040 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ );
1041
1042 iobase = self->io.sir_base;
1043
1044 /* Finished with frame? */
1045 if (self->tx_buff.len > 0)
1046 {
1047 /* Write data left in transmit buffer */
1048 actual = ali_ircc_sir_write(iobase, self->io.fifo_size,
1049 self->tx_buff.data, self->tx_buff.len);
1050 self->tx_buff.data += actual;
1051 self->tx_buff.len -= actual;
1052 }
1053 else
1054 {
1055 if (self->new_speed)
1056 {
1057 /* We must wait until all data are gone */
1058 while(!(inb(iobase+UART_LSR) & UART_LSR_TEMT))
1059 IRDA_DEBUG(1, "%s(), UART_LSR_THRE\n", __func__ );
1060
1061 IRDA_DEBUG(1, "%s(), Changing speed! self->new_speed = %d\n", __func__ , self->new_speed);
1062 ali_ircc_change_speed(self, self->new_speed);
1063 self->new_speed = 0;
1064
1065 // benjamin 2000/11/10 06:32PM
1066 if (self->io.speed > 115200)
1067 {
1068 IRDA_DEBUG(2, "%s(), ali_ircc_change_speed from UART_LSR_TEMT\n", __func__ );
1069
1070 self->ier = IER_EOM;
1071 // SetCOMInterrupts(self, TRUE);
1072 return;
1073 }
1074 }
1075 else
1076 {
1077 netif_wake_queue(self->netdev);
1078 }
1079
1080 self->netdev->stats.tx_packets++;
1081
1082 /* Turn on receive interrupts */
1083 outb(UART_IER_RDI, iobase+UART_IER);
1084 }
1085
1086 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
1087 }
1088
1089 static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud)
1090 {
1091 struct net_device *dev = self->netdev;
1092 int iobase;
1093
1094 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
1095
1096 IRDA_DEBUG(2, "%s(), setting speed = %d\n", __func__ , baud);
1097
1098 /* This function *must* be called with irq off and spin-lock.
1099 * - Jean II */
1100
1101 iobase = self->io.fir_base;
1102
1103 SetCOMInterrupts(self, FALSE); // 2000/11/24 11:43AM
1104
1105 /* Go to MIR, FIR Speed */
1106 if (baud > 115200)
1107 {
1108
1109
1110 ali_ircc_fir_change_speed(self, baud);
1111
1112 /* Install FIR xmit handler*/
1113 dev->netdev_ops = &ali_ircc_fir_ops;
1114
1115 /* Enable Interuupt */
1116 self->ier = IER_EOM; // benjamin 2000/11/20 07:24PM
1117
1118 /* Be ready for incoming frames */
1119 ali_ircc_dma_receive(self); // benajmin 2000/11/8 07:46PM not complete
1120 }
1121 /* Go to SIR Speed */
1122 else
1123 {
1124 ali_ircc_sir_change_speed(self, baud);
1125
1126 /* Install SIR xmit handler*/
1127 dev->netdev_ops = &ali_ircc_sir_ops;
1128 }
1129
1130
1131 SetCOMInterrupts(self, TRUE); // 2000/11/24 11:43AM
1132
1133 netif_wake_queue(self->netdev);
1134
1135 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
1136 }
1137
1138 static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud)
1139 {
1140
1141 int iobase;
1142 struct ali_ircc_cb *self = priv;
1143 struct net_device *dev;
1144
1145 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
1146
1147 IRDA_ASSERT(self != NULL, return;);
1148
1149 dev = self->netdev;
1150 iobase = self->io.fir_base;
1151
1152 IRDA_DEBUG(1, "%s(), self->io.speed = %d, change to speed = %d\n", __func__ ,self->io.speed,baud);
1153
1154 /* Come from SIR speed */
1155 if(self->io.speed <=115200)
1156 {
1157 SIR2FIR(iobase);
1158 }
1159
1160 /* Update accounting for new speed */
1161 self->io.speed = baud;
1162
1163 // Set Dongle Speed mode
1164 ali_ircc_change_dongle_speed(self, baud);
1165
1166 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
1167 }
1168
1169 /*
1170 * Function ali_sir_change_speed (self, speed)
1171 *
1172 * Set speed of IrDA port to specified baudrate
1173 *
1174 */
1175 static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed)
1176 {
1177 struct ali_ircc_cb *self = priv;
1178 unsigned long flags;
1179 int iobase;
1180 int fcr; /* FIFO control reg */
1181 int lcr; /* Line control reg */
1182 int divisor;
1183
1184 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
1185
1186 IRDA_DEBUG(1, "%s(), Setting speed to: %d\n", __func__ , speed);
1187
1188 IRDA_ASSERT(self != NULL, return;);
1189
1190 iobase = self->io.sir_base;
1191
1192 /* Come from MIR or FIR speed */
1193 if(self->io.speed >115200)
1194 {
1195 // Set Dongle Speed mode first
1196 ali_ircc_change_dongle_speed(self, speed);
1197
1198 FIR2SIR(iobase);
1199 }
1200
1201 // Clear Line and Auxiluary status registers 2000/11/24 11:47AM
1202
1203 inb(iobase+UART_LSR);
1204 inb(iobase+UART_SCR);
1205
1206 /* Update accounting for new speed */
1207 self->io.speed = speed;
1208
1209 spin_lock_irqsave(&self->lock, flags);
1210
1211 divisor = 115200/speed;
1212
1213 fcr = UART_FCR_ENABLE_FIFO;
1214
1215 /*
1216 * Use trigger level 1 to avoid 3 ms. timeout delay at 9600 bps, and
1217 * almost 1,7 ms at 19200 bps. At speeds above that we can just forget
1218 * about this timeout since it will always be fast enough.
1219 */
1220 if (self->io.speed < 38400)
1221 fcr |= UART_FCR_TRIGGER_1;
1222 else
1223 fcr |= UART_FCR_TRIGGER_14;
1224
1225 /* IrDA ports use 8N1 */
1226 lcr = UART_LCR_WLEN8;
1227
1228 outb(UART_LCR_DLAB | lcr, iobase+UART_LCR); /* Set DLAB */
1229 outb(divisor & 0xff, iobase+UART_DLL); /* Set speed */
1230 outb(divisor >> 8, iobase+UART_DLM);
1231 outb(lcr, iobase+UART_LCR); /* Set 8N1 */
1232 outb(fcr, iobase+UART_FCR); /* Enable FIFO's */
1233
1234 /* without this, the connection will be broken after come back from FIR speed,
1235 but with this, the SIR connection is harder to established */
1236 outb((UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2), iobase+UART_MCR);
1237
1238 spin_unlock_irqrestore(&self->lock, flags);
1239
1240 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
1241 }
1242
1243 static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed)
1244 {
1245
1246 struct ali_ircc_cb *self = priv;
1247 int iobase,dongle_id;
1248 int tmp = 0;
1249
1250 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
1251
1252 iobase = self->io.fir_base; /* or iobase = self->io.sir_base; */
1253 dongle_id = self->io.dongle_id;
1254
1255 /* We are already locked, no need to do it again */
1256
1257 IRDA_DEBUG(1, "%s(), Set Speed for %s , Speed = %d\n", __func__ , dongle_types[dongle_id], speed);
1258
1259 switch_bank(iobase, BANK2);
1260 tmp = inb(iobase+FIR_IRDA_CR);
1261
1262 /* IBM type dongle */
1263 if(dongle_id == 0)
1264 {
1265 if(speed == 4000000)
1266 {
1267 // __ __
1268 // SD/MODE __| |__ __
1269 // __ __
1270 // IRTX __ __| |__
1271 // T1 T2 T3 T4 T5
1272
1273 tmp &= ~IRDA_CR_HDLC; // HDLC=0
1274 tmp |= IRDA_CR_CRC; // CRC=1
1275
1276 switch_bank(iobase, BANK2);
1277 outb(tmp, iobase+FIR_IRDA_CR);
1278
1279 // T1 -> SD/MODE:0 IRTX:0
1280 tmp &= ~0x09;
1281 tmp |= 0x02;
1282 outb(tmp, iobase+FIR_IRDA_CR);
1283 udelay(2);
1284
1285 // T2 -> SD/MODE:1 IRTX:0
1286 tmp &= ~0x01;
1287 tmp |= 0x0a;
1288 outb(tmp, iobase+FIR_IRDA_CR);
1289 udelay(2);
1290
1291 // T3 -> SD/MODE:1 IRTX:1
1292 tmp |= 0x0b;
1293 outb(tmp, iobase+FIR_IRDA_CR);
1294 udelay(2);
1295
1296 // T4 -> SD/MODE:0 IRTX:1
1297 tmp &= ~0x08;
1298 tmp |= 0x03;
1299 outb(tmp, iobase+FIR_IRDA_CR);
1300 udelay(2);
1301
1302 // T5 -> SD/MODE:0 IRTX:0
1303 tmp &= ~0x09;
1304 tmp |= 0x02;
1305 outb(tmp, iobase+FIR_IRDA_CR);
1306 udelay(2);
1307
1308 // reset -> Normal TX output Signal
1309 outb(tmp & ~0x02, iobase+FIR_IRDA_CR);
1310 }
1311 else /* speed <=1152000 */
1312 {
1313 // __
1314 // SD/MODE __| |__
1315 //
1316 // IRTX ________
1317 // T1 T2 T3
1318
1319 /* MIR 115200, 57600 */
1320 if (speed==1152000)
1321 {
1322 tmp |= 0xA0; //HDLC=1, 1.152Mbps=1
1323 }
1324 else
1325 {
1326 tmp &=~0x80; //HDLC 0.576Mbps
1327 tmp |= 0x20; //HDLC=1,
1328 }
1329
1330 tmp |= IRDA_CR_CRC; // CRC=1
1331
1332 switch_bank(iobase, BANK2);
1333 outb(tmp, iobase+FIR_IRDA_CR);
1334
1335 /* MIR 115200, 57600 */
1336
1337 //switch_bank(iobase, BANK2);
1338 // T1 -> SD/MODE:0 IRTX:0
1339 tmp &= ~0x09;
1340 tmp |= 0x02;
1341 outb(tmp, iobase+FIR_IRDA_CR);
1342 udelay(2);
1343
1344 // T2 -> SD/MODE:1 IRTX:0
1345 tmp &= ~0x01;
1346 tmp |= 0x0a;
1347 outb(tmp, iobase+FIR_IRDA_CR);
1348
1349 // T3 -> SD/MODE:0 IRTX:0
1350 tmp &= ~0x09;
1351 tmp |= 0x02;
1352 outb(tmp, iobase+FIR_IRDA_CR);
1353 udelay(2);
1354
1355 // reset -> Normal TX output Signal
1356 outb(tmp & ~0x02, iobase+FIR_IRDA_CR);
1357 }
1358 }
1359 else if (dongle_id == 1) /* HP HDSL-3600 */
1360 {
1361 switch(speed)
1362 {
1363 case 4000000:
1364 tmp &= ~IRDA_CR_HDLC; // HDLC=0
1365 break;
1366
1367 case 1152000:
1368 tmp |= 0xA0; // HDLC=1, 1.152Mbps=1
1369 break;
1370
1371 case 576000:
1372 tmp &=~0x80; // HDLC 0.576Mbps
1373 tmp |= 0x20; // HDLC=1,
1374 break;
1375 }
1376
1377 tmp |= IRDA_CR_CRC; // CRC=1
1378
1379 switch_bank(iobase, BANK2);
1380 outb(tmp, iobase+FIR_IRDA_CR);
1381 }
1382 else /* HP HDSL-1100 */
1383 {
1384 if(speed <= 115200) /* SIR */
1385 {
1386
1387 tmp &= ~IRDA_CR_FIR_SIN; // HP sin select = 0
1388
1389 switch_bank(iobase, BANK2);
1390 outb(tmp, iobase+FIR_IRDA_CR);
1391 }
1392 else /* MIR FIR */
1393 {
1394
1395 switch(speed)
1396 {
1397 case 4000000:
1398 tmp &= ~IRDA_CR_HDLC; // HDLC=0
1399 break;
1400
1401 case 1152000:
1402 tmp |= 0xA0; // HDLC=1, 1.152Mbps=1
1403 break;
1404
1405 case 576000:
1406 tmp &=~0x80; // HDLC 0.576Mbps
1407 tmp |= 0x20; // HDLC=1,
1408 break;
1409 }
1410
1411 tmp |= IRDA_CR_CRC; // CRC=1
1412 tmp |= IRDA_CR_FIR_SIN; // HP sin select = 1
1413
1414 switch_bank(iobase, BANK2);
1415 outb(tmp, iobase+FIR_IRDA_CR);
1416 }
1417 }
1418
1419 switch_bank(iobase, BANK0);
1420
1421 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
1422 }
1423
1424 /*
1425 * Function ali_ircc_sir_write (driver)
1426 *
1427 * Fill Tx FIFO with transmit data
1428 *
1429 */
1430 static int ali_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len)
1431 {
1432 int actual = 0;
1433
1434 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ );
1435
1436 /* Tx FIFO should be empty! */
1437 if (!(inb(iobase+UART_LSR) & UART_LSR_THRE)) {
1438 IRDA_DEBUG(0, "%s(), failed, fifo not empty!\n", __func__ );
1439 return 0;
1440 }
1441
1442 /* Fill FIFO with current frame */
1443 while ((fifo_size-- > 0) && (actual < len)) {
1444 /* Transmit next byte */
1445 outb(buf[actual], iobase+UART_TX);
1446
1447 actual++;
1448 }
1449
1450 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
1451 return actual;
1452 }
1453
1454 /*
1455 * Function ali_ircc_net_open (dev)
1456 *
1457 * Start the device
1458 *
1459 */
1460 static int ali_ircc_net_open(struct net_device *dev)
1461 {
1462 struct ali_ircc_cb *self;
1463 int iobase;
1464 char hwname[32];
1465
1466 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ );
1467
1468 IRDA_ASSERT(dev != NULL, return -1;);
1469
1470 self = netdev_priv(dev);
1471
1472 IRDA_ASSERT(self != NULL, return 0;);
1473
1474 iobase = self->io.fir_base;
1475
1476 /* Request IRQ and install Interrupt Handler */
1477 if (request_irq(self->io.irq, ali_ircc_interrupt, 0, dev->name, dev))
1478 {
1479 IRDA_WARNING("%s, unable to allocate irq=%d\n",
1480 ALI_IRCC_DRIVER_NAME,
1481 self->io.irq);
1482 return -EAGAIN;
1483 }
1484
1485 /*
1486 * Always allocate the DMA channel after the IRQ, and clean up on
1487 * failure.
1488 */
1489 if (request_dma(self->io.dma, dev->name)) {
1490 IRDA_WARNING("%s, unable to allocate dma=%d\n",
1491 ALI_IRCC_DRIVER_NAME,
1492 self->io.dma);
1493 free_irq(self->io.irq, dev);
1494 return -EAGAIN;
1495 }
1496
1497 /* Turn on interrups */
1498 outb(UART_IER_RDI , iobase+UART_IER);
1499
1500 /* Ready to play! */
1501 netif_start_queue(dev); //benjamin by irport
1502
1503 /* Give self a hardware name */
1504 sprintf(hwname, "ALI-FIR @ 0x%03x", self->io.fir_base);
1505
1506 /*
1507 * Open new IrLAP layer instance, now that everything should be
1508 * initialized properly
1509 */
1510 self->irlap = irlap_open(dev, &self->qos, hwname);
1511
1512 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
1513
1514 return 0;
1515 }
1516
1517 /*
1518 * Function ali_ircc_net_close (dev)
1519 *
1520 * Stop the device
1521 *
1522 */
1523 static int ali_ircc_net_close(struct net_device *dev)
1524 {
1525
1526 struct ali_ircc_cb *self;
1527 //int iobase;
1528
1529 IRDA_DEBUG(4, "%s(), ---------------- Start ----------------\n", __func__ );
1530
1531 IRDA_ASSERT(dev != NULL, return -1;);
1532
1533 self = netdev_priv(dev);
1534 IRDA_ASSERT(self != NULL, return 0;);
1535
1536 /* Stop device */
1537 netif_stop_queue(dev);
1538
1539 /* Stop and remove instance of IrLAP */
1540 if (self->irlap)
1541 irlap_close(self->irlap);
1542 self->irlap = NULL;
1543
1544 disable_dma(self->io.dma);
1545
1546 /* Disable interrupts */
1547 SetCOMInterrupts(self, FALSE);
1548
1549 free_irq(self->io.irq, dev);
1550 free_dma(self->io.dma);
1551
1552 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
1553
1554 return 0;
1555 }
1556
1557 /*
1558 * Function ali_ircc_fir_hard_xmit (skb, dev)
1559 *
1560 * Transmit the frame
1561 *
1562 */
1563 static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb,
1564 struct net_device *dev)
1565 {
1566 struct ali_ircc_cb *self;
1567 unsigned long flags;
1568 int iobase;
1569 __u32 speed;
1570 int mtt, diff;
1571
1572 IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ );
1573
1574 self = netdev_priv(dev);
1575 iobase = self->io.fir_base;
1576
1577 netif_stop_queue(dev);
1578
1579 /* Make sure tests *& speed change are atomic */
1580 spin_lock_irqsave(&self->lock, flags);
1581
1582 /* Note : you should make sure that speed changes are not going
1583 * to corrupt any outgoing frame. Look at nsc-ircc for the gory
1584 * details - Jean II */
1585
1586 /* Check if we need to change the speed */
1587 speed = irda_get_next_speed(skb);
1588 if ((speed != self->io.speed) && (speed != -1)) {
1589 /* Check for empty frame */
1590 if (!skb->len) {
1591 ali_ircc_change_speed(self, speed);
1592 dev->trans_start = jiffies;
1593 spin_unlock_irqrestore(&self->lock, flags);
1594 dev_kfree_skb(skb);
1595 return NETDEV_TX_OK;
1596 } else
1597 self->new_speed = speed;
1598 }
1599
1600 /* Register and copy this frame to DMA memory */
1601 self->tx_fifo.queue[self->tx_fifo.free].start = self->tx_fifo.tail;
1602 self->tx_fifo.queue[self->tx_fifo.free].len = skb->len;
1603 self->tx_fifo.tail += skb->len;
1604
1605 dev->stats.tx_bytes += skb->len;
1606
1607 skb_copy_from_linear_data(skb, self->tx_fifo.queue[self->tx_fifo.free].start,
1608 skb->len);
1609 self->tx_fifo.len++;
1610 self->tx_fifo.free++;
1611
1612 /* Start transmit only if there is currently no transmit going on */
1613 if (self->tx_fifo.len == 1)
1614 {
1615 /* Check if we must wait the min turn time or not */
1616 mtt = irda_get_mtt(skb);
1617
1618 if (mtt)
1619 {
1620 /* Check how much time we have used already */
1621 do_gettimeofday(&self->now);
1622
1623 diff = self->now.tv_usec - self->stamp.tv_usec;
1624 /* self->stamp is set from ali_ircc_dma_receive_complete() */
1625
1626 IRDA_DEBUG(1, "%s(), ******* diff = %d *******\n", __func__ , diff);
1627
1628 if (diff < 0)
1629 diff += 1000000;
1630
1631 /* Check if the mtt is larger than the time we have
1632 * already used by all the protocol processing
1633 */
1634 if (mtt > diff)
1635 {
1636 mtt -= diff;
1637
1638 /*
1639 * Use timer if delay larger than 1000 us, and
1640 * use udelay for smaller values which should
1641 * be acceptable
1642 */
1643 if (mtt > 500)
1644 {
1645 /* Adjust for timer resolution */
1646 mtt = (mtt+250) / 500; /* 4 discard, 5 get advanced, Let's round off */
1647
1648 IRDA_DEBUG(1, "%s(), ************** mtt = %d ***********\n", __func__ , mtt);
1649
1650 /* Setup timer */
1651 if (mtt == 1) /* 500 us */
1652 {
1653 switch_bank(iobase, BANK1);
1654 outb(TIMER_IIR_500, iobase+FIR_TIMER_IIR);
1655 }
1656 else if (mtt == 2) /* 1 ms */
1657 {
1658 switch_bank(iobase, BANK1);
1659 outb(TIMER_IIR_1ms, iobase+FIR_TIMER_IIR);
1660 }
1661 else /* > 2ms -> 4ms */
1662 {
1663 switch_bank(iobase, BANK1);
1664 outb(TIMER_IIR_2ms, iobase+FIR_TIMER_IIR);
1665 }
1666
1667
1668 /* Start timer */
1669 outb(inb(iobase+FIR_CR) | CR_TIMER_EN, iobase+FIR_CR);
1670 self->io.direction = IO_XMIT;
1671
1672 /* Enable timer interrupt */
1673 self->ier = IER_TIMER;
1674 SetCOMInterrupts(self, TRUE);
1675
1676 /* Timer will take care of the rest */
1677 goto out;
1678 }
1679 else
1680 udelay(mtt);
1681 } // if (if (mtt > diff)
1682 }// if (mtt)
1683
1684 /* Enable EOM interrupt */
1685 self->ier = IER_EOM;
1686 SetCOMInterrupts(self, TRUE);
1687
1688 /* Transmit frame */
1689 ali_ircc_dma_xmit(self);
1690 } // if (self->tx_fifo.len == 1)
1691
1692 out:
1693
1694 /* Not busy transmitting anymore if window is not full */
1695 if (self->tx_fifo.free < MAX_TX_WINDOW)
1696 netif_wake_queue(self->netdev);
1697
1698 /* Restore bank register */
1699 switch_bank(iobase, BANK0);
1700
1701 dev->trans_start = jiffies;
1702 spin_unlock_irqrestore(&self->lock, flags);
1703 dev_kfree_skb(skb);
1704
1705 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
1706 return NETDEV_TX_OK;
1707 }
1708
1709
1710 static void ali_ircc_dma_xmit(struct ali_ircc_cb *self)
1711 {
1712 int iobase, tmp;
1713 unsigned char FIFO_OPTI, Hi, Lo;
1714
1715
1716 IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ );
1717
1718 iobase = self->io.fir_base;
1719
1720 /* FIFO threshold , this method comes from NDIS5 code */
1721
1722 if(self->tx_fifo.queue[self->tx_fifo.ptr].len < TX_FIFO_Threshold)
1723 FIFO_OPTI = self->tx_fifo.queue[self->tx_fifo.ptr].len-1;
1724 else
1725 FIFO_OPTI = TX_FIFO_Threshold;
1726
1727 /* Disable DMA */
1728 switch_bank(iobase, BANK1);
1729 outb(inb(iobase+FIR_CR) & ~CR_DMA_EN, iobase+FIR_CR);
1730
1731 self->io.direction = IO_XMIT;
1732
1733 irda_setup_dma(self->io.dma,
1734 ((u8 *)self->tx_fifo.queue[self->tx_fifo.ptr].start -
1735 self->tx_buff.head) + self->tx_buff_dma,
1736 self->tx_fifo.queue[self->tx_fifo.ptr].len,
1737 DMA_TX_MODE);
1738
1739 /* Reset Tx FIFO */
1740 switch_bank(iobase, BANK0);
1741 outb(LCR_A_FIFO_RESET, iobase+FIR_LCR_A);
1742
1743 /* Set Tx FIFO threshold */
1744 if (self->fifo_opti_buf!=FIFO_OPTI)
1745 {
1746 switch_bank(iobase, BANK1);
1747 outb(FIFO_OPTI, iobase+FIR_FIFO_TR) ;
1748 self->fifo_opti_buf=FIFO_OPTI;
1749 }
1750
1751 /* Set Tx DMA threshold */
1752 switch_bank(iobase, BANK1);
1753 outb(TX_DMA_Threshold, iobase+FIR_DMA_TR);
1754
1755 /* Set max Tx frame size */
1756 Hi = (self->tx_fifo.queue[self->tx_fifo.ptr].len >> 8) & 0x0f;
1757 Lo = self->tx_fifo.queue[self->tx_fifo.ptr].len & 0xff;
1758 switch_bank(iobase, BANK2);
1759 outb(Hi, iobase+FIR_TX_DSR_HI);
1760 outb(Lo, iobase+FIR_TX_DSR_LO);
1761
1762 /* Disable SIP , Disable Brick Wall (we don't support in TX mode), Change to TX mode */
1763 switch_bank(iobase, BANK0);
1764 tmp = inb(iobase+FIR_LCR_B);
1765 tmp &= ~0x20; // Disable SIP
1766 outb(((unsigned char)(tmp & 0x3f) | LCR_B_TX_MODE) & ~LCR_B_BW, iobase+FIR_LCR_B);
1767 IRDA_DEBUG(1, "%s(), *** Change to TX mode: FIR_LCR_B = 0x%x ***\n", __func__ , inb(iobase+FIR_LCR_B));
1768
1769 outb(0, iobase+FIR_LSR);
1770
1771 /* Enable DMA and Burst Mode */
1772 switch_bank(iobase, BANK1);
1773 outb(inb(iobase+FIR_CR) | CR_DMA_EN | CR_DMA_BURST, iobase+FIR_CR);
1774
1775 switch_bank(iobase, BANK0);
1776
1777 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
1778 }
1779
1780 static int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self)
1781 {
1782 int iobase;
1783 int ret = TRUE;
1784
1785 IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ );
1786
1787 iobase = self->io.fir_base;
1788
1789 /* Disable DMA */
1790 switch_bank(iobase, BANK1);
1791 outb(inb(iobase+FIR_CR) & ~CR_DMA_EN, iobase+FIR_CR);
1792
1793 /* Check for underrun! */
1794 switch_bank(iobase, BANK0);
1795 if((inb(iobase+FIR_LSR) & LSR_FRAME_ABORT) == LSR_FRAME_ABORT)
1796
1797 {
1798 IRDA_ERROR("%s(), ********* LSR_FRAME_ABORT *********\n", __func__);
1799 self->netdev->stats.tx_errors++;
1800 self->netdev->stats.tx_fifo_errors++;
1801 }
1802 else
1803 {
1804 self->netdev->stats.tx_packets++;
1805 }
1806
1807 /* Check if we need to change the speed */
1808 if (self->new_speed)
1809 {
1810 ali_ircc_change_speed(self, self->new_speed);
1811 self->new_speed = 0;
1812 }
1813
1814 /* Finished with this frame, so prepare for next */
1815 self->tx_fifo.ptr++;
1816 self->tx_fifo.len--;
1817
1818 /* Any frames to be sent back-to-back? */
1819 if (self->tx_fifo.len)
1820 {
1821 ali_ircc_dma_xmit(self);
1822
1823 /* Not finished yet! */
1824 ret = FALSE;
1825 }
1826 else
1827 { /* Reset Tx FIFO info */
1828 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
1829 self->tx_fifo.tail = self->tx_buff.head;
1830 }
1831
1832 /* Make sure we have room for more frames */
1833 if (self->tx_fifo.free < MAX_TX_WINDOW) {
1834 /* Not busy transmitting anymore */
1835 /* Tell the network layer, that we can accept more frames */
1836 netif_wake_queue(self->netdev);
1837 }
1838
1839 switch_bank(iobase, BANK0);
1840
1841 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
1842 return ret;
1843 }
1844
1845 /*
1846 * Function ali_ircc_dma_receive (self)
1847 *
1848 * Get ready for receiving a frame. The device will initiate a DMA
1849 * if it starts to receive a frame.
1850 *
1851 */
1852 static int ali_ircc_dma_receive(struct ali_ircc_cb *self)
1853 {
1854 int iobase, tmp;
1855
1856 IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ );
1857
1858 iobase = self->io.fir_base;
1859
1860 /* Reset Tx FIFO info */
1861 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
1862 self->tx_fifo.tail = self->tx_buff.head;
1863
1864 /* Disable DMA */
1865 switch_bank(iobase, BANK1);
1866 outb(inb(iobase+FIR_CR) & ~CR_DMA_EN, iobase+FIR_CR);
1867
1868 /* Reset Message Count */
1869 switch_bank(iobase, BANK0);
1870 outb(0x07, iobase+FIR_LSR);
1871
1872 self->rcvFramesOverflow = FALSE;
1873
1874 self->LineStatus = inb(iobase+FIR_LSR) ;
1875
1876 /* Reset Rx FIFO info */
1877 self->io.direction = IO_RECV;
1878 self->rx_buff.data = self->rx_buff.head;
1879
1880 /* Reset Rx FIFO */
1881 // switch_bank(iobase, BANK0);
1882 outb(LCR_A_FIFO_RESET, iobase+FIR_LCR_A);
1883
1884 self->st_fifo.len = self->st_fifo.pending_bytes = 0;
1885 self->st_fifo.tail = self->st_fifo.head = 0;
1886
1887 irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize,
1888 DMA_RX_MODE);
1889
1890 /* Set Receive Mode,Brick Wall */
1891 //switch_bank(iobase, BANK0);
1892 tmp = inb(iobase+FIR_LCR_B);
1893 outb((unsigned char)(tmp &0x3f) | LCR_B_RX_MODE | LCR_B_BW , iobase + FIR_LCR_B); // 2000/12/1 05:16PM
1894 IRDA_DEBUG(1, "%s(), *** Change To RX mode: FIR_LCR_B = 0x%x ***\n", __func__ , inb(iobase+FIR_LCR_B));
1895
1896 /* Set Rx Threshold */
1897 switch_bank(iobase, BANK1);
1898 outb(RX_FIFO_Threshold, iobase+FIR_FIFO_TR);
1899 outb(RX_DMA_Threshold, iobase+FIR_DMA_TR);
1900
1901 /* Enable DMA and Burst Mode */
1902 // switch_bank(iobase, BANK1);
1903 outb(CR_DMA_EN | CR_DMA_BURST, iobase+FIR_CR);
1904
1905 switch_bank(iobase, BANK0);
1906 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
1907 return 0;
1908 }
1909
1910 static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
1911 {
1912 struct st_fifo *st_fifo;
1913 struct sk_buff *skb;
1914 __u8 status, MessageCount;
1915 int len, i, iobase, val;
1916
1917 IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ );
1918
1919 st_fifo = &self->st_fifo;
1920 iobase = self->io.fir_base;
1921
1922 switch_bank(iobase, BANK0);
1923 MessageCount = inb(iobase+ FIR_LSR)&0x07;
1924
1925 if (MessageCount > 0)
1926 IRDA_DEBUG(0, "%s(), Message count = %d,\n", __func__ , MessageCount);
1927
1928 for (i=0; i<=MessageCount; i++)
1929 {
1930 /* Bank 0 */
1931 switch_bank(iobase, BANK0);
1932 status = inb(iobase+FIR_LSR);
1933
1934 switch_bank(iobase, BANK2);
1935 len = inb(iobase+FIR_RX_DSR_HI) & 0x0f;
1936 len = len << 8;
1937 len |= inb(iobase+FIR_RX_DSR_LO);
1938
1939 IRDA_DEBUG(1, "%s(), RX Length = 0x%.2x,\n", __func__ , len);
1940 IRDA_DEBUG(1, "%s(), RX Status = 0x%.2x,\n", __func__ , status);
1941
1942 if (st_fifo->tail >= MAX_RX_WINDOW) {
1943 IRDA_DEBUG(0, "%s(), window is full!\n", __func__ );
1944 continue;
1945 }
1946
1947 st_fifo->entries[st_fifo->tail].status = status;
1948 st_fifo->entries[st_fifo->tail].len = len;
1949 st_fifo->pending_bytes += len;
1950 st_fifo->tail++;
1951 st_fifo->len++;
1952 }
1953
1954 for (i=0; i<=MessageCount; i++)
1955 {
1956 /* Get first entry */
1957 status = st_fifo->entries[st_fifo->head].status;
1958 len = st_fifo->entries[st_fifo->head].len;
1959 st_fifo->pending_bytes -= len;
1960 st_fifo->head++;
1961 st_fifo->len--;
1962
1963 /* Check for errors */
1964 if ((status & 0xd8) || self->rcvFramesOverflow || (len==0))
1965 {
1966 IRDA_DEBUG(0,"%s(), ************* RX Errors ************\n", __func__ );
1967
1968 /* Skip frame */
1969 self->netdev->stats.rx_errors++;
1970
1971 self->rx_buff.data += len;
1972
1973 if (status & LSR_FIFO_UR)
1974 {
1975 self->netdev->stats.rx_frame_errors++;
1976 IRDA_DEBUG(0,"%s(), ************* FIFO Errors ************\n", __func__ );
1977 }
1978 if (status & LSR_FRAME_ERROR)
1979 {
1980 self->netdev->stats.rx_frame_errors++;
1981 IRDA_DEBUG(0,"%s(), ************* FRAME Errors ************\n", __func__ );
1982 }
1983
1984 if (status & LSR_CRC_ERROR)
1985 {
1986 self->netdev->stats.rx_crc_errors++;
1987 IRDA_DEBUG(0,"%s(), ************* CRC Errors ************\n", __func__ );
1988 }
1989
1990 if(self->rcvFramesOverflow)
1991 {
1992 self->netdev->stats.rx_frame_errors++;
1993 IRDA_DEBUG(0,"%s(), ************* Overran DMA buffer ************\n", __func__ );
1994 }
1995 if(len == 0)
1996 {
1997 self->netdev->stats.rx_frame_errors++;
1998 IRDA_DEBUG(0,"%s(), ********** Receive Frame Size = 0 *********\n", __func__ );
1999 }
2000 }
2001 else
2002 {
2003
2004 if (st_fifo->pending_bytes < 32)
2005 {
2006 switch_bank(iobase, BANK0);
2007 val = inb(iobase+FIR_BSR);
2008 if ((val& BSR_FIFO_NOT_EMPTY)== 0x80)
2009 {
2010 IRDA_DEBUG(0, "%s(), ************* BSR_FIFO_NOT_EMPTY ************\n", __func__ );
2011
2012 /* Put this entry back in fifo */
2013 st_fifo->head--;
2014 st_fifo->len++;
2015 st_fifo->pending_bytes += len;
2016 st_fifo->entries[st_fifo->head].status = status;
2017 st_fifo->entries[st_fifo->head].len = len;
2018
2019 /*
2020 * DMA not finished yet, so try again
2021 * later, set timer value, resolution
2022 * 500 us
2023 */
2024
2025 switch_bank(iobase, BANK1);
2026 outb(TIMER_IIR_500, iobase+FIR_TIMER_IIR); // 2001/1/2 05:07PM
2027
2028 /* Enable Timer */
2029 outb(inb(iobase+FIR_CR) | CR_TIMER_EN, iobase+FIR_CR);
2030
2031 return FALSE; /* I'll be back! */
2032 }
2033 }
2034
2035 /*
2036 * Remember the time we received this frame, so we can
2037 * reduce the min turn time a bit since we will know
2038 * how much time we have used for protocol processing
2039 */
2040 do_gettimeofday(&self->stamp);
2041
2042 skb = dev_alloc_skb(len+1);
2043 if (skb == NULL)
2044 {
2045 IRDA_WARNING("%s(), memory squeeze, "
2046 "dropping frame.\n",
2047 __func__);
2048 self->netdev->stats.rx_dropped++;
2049
2050 return FALSE;
2051 }
2052
2053 /* Make sure IP header gets aligned */
2054 skb_reserve(skb, 1);
2055
2056 /* Copy frame without CRC, CRC is removed by hardware*/
2057 skb_put(skb, len);
2058 skb_copy_to_linear_data(skb, self->rx_buff.data, len);
2059
2060 /* Move to next frame */
2061 self->rx_buff.data += len;
2062 self->netdev->stats.rx_bytes += len;
2063 self->netdev->stats.rx_packets++;
2064
2065 skb->dev = self->netdev;
2066 skb_reset_mac_header(skb);
2067 skb->protocol = htons(ETH_P_IRDA);
2068 netif_rx(skb);
2069 }
2070 }
2071
2072 switch_bank(iobase, BANK0);
2073
2074 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
2075 return TRUE;
2076 }
2077
2078
2079
2080 /*
2081 * Function ali_ircc_sir_hard_xmit (skb, dev)
2082 *
2083 * Transmit the frame!
2084 *
2085 */
2086 static netdev_tx_t ali_ircc_sir_hard_xmit(struct sk_buff *skb,
2087 struct net_device *dev)
2088 {
2089 struct ali_ircc_cb *self;
2090 unsigned long flags;
2091 int iobase;
2092 __u32 speed;
2093
2094 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ );
2095
2096 IRDA_ASSERT(dev != NULL, return NETDEV_TX_OK;);
2097
2098 self = netdev_priv(dev);
2099 IRDA_ASSERT(self != NULL, return NETDEV_TX_OK;);
2100
2101 iobase = self->io.sir_base;
2102
2103 netif_stop_queue(dev);
2104
2105 /* Make sure tests *& speed change are atomic */
2106 spin_lock_irqsave(&self->lock, flags);
2107
2108 /* Note : you should make sure that speed changes are not going
2109 * to corrupt any outgoing frame. Look at nsc-ircc for the gory
2110 * details - Jean II */
2111
2112 /* Check if we need to change the speed */
2113 speed = irda_get_next_speed(skb);
2114 if ((speed != self->io.speed) && (speed != -1)) {
2115 /* Check for empty frame */
2116 if (!skb->len) {
2117 ali_ircc_change_speed(self, speed);
2118 dev->trans_start = jiffies;
2119 spin_unlock_irqrestore(&self->lock, flags);
2120 dev_kfree_skb(skb);
2121 return NETDEV_TX_OK;
2122 } else
2123 self->new_speed = speed;
2124 }
2125
2126 /* Init tx buffer */
2127 self->tx_buff.data = self->tx_buff.head;
2128
2129 /* Copy skb to tx_buff while wrapping, stuffing and making CRC */
2130 self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
2131 self->tx_buff.truesize);
2132
2133 self->netdev->stats.tx_bytes += self->tx_buff.len;
2134
2135 /* Turn on transmit finished interrupt. Will fire immediately! */
2136 outb(UART_IER_THRI, iobase+UART_IER);
2137
2138 dev->trans_start = jiffies;
2139 spin_unlock_irqrestore(&self->lock, flags);
2140
2141 dev_kfree_skb(skb);
2142
2143 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
2144
2145 return NETDEV_TX_OK;
2146 }
2147
2148
2149 /*
2150 * Function ali_ircc_net_ioctl (dev, rq, cmd)
2151 *
2152 * Process IOCTL commands for this device
2153 *
2154 */
2155 static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2156 {
2157 struct if_irda_req *irq = (struct if_irda_req *) rq;
2158 struct ali_ircc_cb *self;
2159 unsigned long flags;
2160 int ret = 0;
2161
2162 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ );
2163
2164 IRDA_ASSERT(dev != NULL, return -1;);
2165
2166 self = netdev_priv(dev);
2167
2168 IRDA_ASSERT(self != NULL, return -1;);
2169
2170 IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__ , dev->name, cmd);
2171
2172 switch (cmd) {
2173 case SIOCSBANDWIDTH: /* Set bandwidth */
2174 IRDA_DEBUG(1, "%s(), SIOCSBANDWIDTH\n", __func__ );
2175 /*
2176 * This function will also be used by IrLAP to change the
2177 * speed, so we still must allow for speed change within
2178 * interrupt context.
2179 */
2180 if (!in_interrupt() && !capable(CAP_NET_ADMIN))
2181 return -EPERM;
2182
2183 spin_lock_irqsave(&self->lock, flags);
2184 ali_ircc_change_speed(self, irq->ifr_baudrate);
2185 spin_unlock_irqrestore(&self->lock, flags);
2186 break;
2187 case SIOCSMEDIABUSY: /* Set media busy */
2188 IRDA_DEBUG(1, "%s(), SIOCSMEDIABUSY\n", __func__ );
2189 if (!capable(CAP_NET_ADMIN))
2190 return -EPERM;
2191 irda_device_set_media_busy(self->netdev, TRUE);
2192 break;
2193 case SIOCGRECEIVING: /* Check if we are receiving right now */
2194 IRDA_DEBUG(2, "%s(), SIOCGRECEIVING\n", __func__ );
2195 /* This is protected */
2196 irq->ifr_receiving = ali_ircc_is_receiving(self);
2197 break;
2198 default:
2199 ret = -EOPNOTSUPP;
2200 }
2201
2202 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
2203
2204 return ret;
2205 }
2206
2207 /*
2208 * Function ali_ircc_is_receiving (self)
2209 *
2210 * Return TRUE is we are currently receiving a frame
2211 *
2212 */
2213 static int ali_ircc_is_receiving(struct ali_ircc_cb *self)
2214 {
2215 unsigned long flags;
2216 int status = FALSE;
2217 int iobase;
2218
2219 IRDA_DEBUG(2, "%s(), ---------------- Start -----------------\n", __func__ );
2220
2221 IRDA_ASSERT(self != NULL, return FALSE;);
2222
2223 spin_lock_irqsave(&self->lock, flags);
2224
2225 if (self->io.speed > 115200)
2226 {
2227 iobase = self->io.fir_base;
2228
2229 switch_bank(iobase, BANK1);
2230 if((inb(iobase+FIR_FIFO_FR) & 0x3f) != 0)
2231 {
2232 /* We are receiving something */
2233 IRDA_DEBUG(1, "%s(), We are receiving something\n", __func__ );
2234 status = TRUE;
2235 }
2236 switch_bank(iobase, BANK0);
2237 }
2238 else
2239 {
2240 status = (self->rx_buff.state != OUTSIDE_FRAME);
2241 }
2242
2243 spin_unlock_irqrestore(&self->lock, flags);
2244
2245 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
2246
2247 return status;
2248 }
2249
2250 static int ali_ircc_suspend(struct platform_device *dev, pm_message_t state)
2251 {
2252 struct ali_ircc_cb *self = platform_get_drvdata(dev);
2253
2254 IRDA_MESSAGE("%s, Suspending\n", ALI_IRCC_DRIVER_NAME);
2255
2256 if (self->io.suspended)
2257 return 0;
2258
2259 ali_ircc_net_close(self->netdev);
2260
2261 self->io.suspended = 1;
2262
2263 return 0;
2264 }
2265
2266 static int ali_ircc_resume(struct platform_device *dev)
2267 {
2268 struct ali_ircc_cb *self = platform_get_drvdata(dev);
2269
2270 if (!self->io.suspended)
2271 return 0;
2272
2273 ali_ircc_net_open(self->netdev);
2274
2275 IRDA_MESSAGE("%s, Waking up\n", ALI_IRCC_DRIVER_NAME);
2276
2277 self->io.suspended = 0;
2278
2279 return 0;
2280 }
2281
2282 /* ALi Chip Function */
2283
2284 static void SetCOMInterrupts(struct ali_ircc_cb *self , unsigned char enable)
2285 {
2286
2287 unsigned char newMask;
2288
2289 int iobase = self->io.fir_base; /* or sir_base */
2290
2291 IRDA_DEBUG(2, "%s(), -------- Start -------- ( Enable = %d )\n", __func__ , enable);
2292
2293 /* Enable the interrupt which we wish to */
2294 if (enable){
2295 if (self->io.direction == IO_XMIT)
2296 {
2297 if (self->io.speed > 115200) /* FIR, MIR */
2298 {
2299 newMask = self->ier;
2300 }
2301 else /* SIR */
2302 {
2303 newMask = UART_IER_THRI | UART_IER_RDI;
2304 }
2305 }
2306 else {
2307 if (self->io.speed > 115200) /* FIR, MIR */
2308 {
2309 newMask = self->ier;
2310 }
2311 else /* SIR */
2312 {
2313 newMask = UART_IER_RDI;
2314 }
2315 }
2316 }
2317 else /* Disable all the interrupts */
2318 {
2319 newMask = 0x00;
2320
2321 }
2322
2323 //SIR and FIR has different registers
2324 if (self->io.speed > 115200)
2325 {
2326 switch_bank(iobase, BANK0);
2327 outb(newMask, iobase+FIR_IER);
2328 }
2329 else
2330 outb(newMask, iobase+UART_IER);
2331
2332 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
2333 }
2334
2335 static void SIR2FIR(int iobase)
2336 {
2337 //unsigned char tmp;
2338
2339 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
2340
2341 /* Already protected (change_speed() or setup()), no need to lock.
2342 * Jean II */
2343
2344 outb(0x28, iobase+UART_MCR);
2345 outb(0x68, iobase+UART_MCR);
2346 outb(0x88, iobase+UART_MCR);
2347
2348 outb(0x60, iobase+FIR_MCR); /* Master Reset */
2349 outb(0x20, iobase+FIR_MCR); /* Master Interrupt Enable */
2350
2351 //tmp = inb(iobase+FIR_LCR_B); /* SIP enable */
2352 //tmp |= 0x20;
2353 //outb(tmp, iobase+FIR_LCR_B);
2354
2355 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
2356 }
2357
2358 static void FIR2SIR(int iobase)
2359 {
2360 unsigned char val;
2361
2362 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
2363
2364 /* Already protected (change_speed() or setup()), no need to lock.
2365 * Jean II */
2366
2367 outb(0x20, iobase+FIR_MCR); /* IRQ to low */
2368 outb(0x00, iobase+UART_IER);
2369
2370 outb(0xA0, iobase+FIR_MCR); /* Don't set master reset */
2371 outb(0x00, iobase+UART_FCR);
2372 outb(0x07, iobase+UART_FCR);
2373
2374 val = inb(iobase+UART_RX);
2375 val = inb(iobase+UART_LSR);
2376 val = inb(iobase+UART_MSR);
2377
2378 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
2379 }
2380
2381 MODULE_AUTHOR("Benjamin Kong <benjamin_kong@ali.com.tw>");
2382 MODULE_DESCRIPTION("ALi FIR Controller Driver");
2383 MODULE_LICENSE("GPL");
2384 MODULE_ALIAS("platform:" ALI_IRCC_DRIVER_NAME);
2385
2386
2387 module_param_array(io, int, NULL, 0);
2388 MODULE_PARM_DESC(io, "Base I/O addresses");
2389 module_param_array(irq, int, NULL, 0);
2390 MODULE_PARM_DESC(irq, "IRQ lines");
2391 module_param_array(dma, int, NULL, 0);
2392 MODULE_PARM_DESC(dma, "DMA channels");
2393
2394 module_init(ali_ircc_init);
2395 module_exit(ali_ircc_cleanup);
2396
2397
2398
2399
2400
2401 /* LDV_COMMENT_BEGIN_MAIN */
2402 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful
2403
2404 /*###########################################################################*/
2405
2406 /*############## Driver Environment Generator 0.2 output ####################*/
2407
2408 /*###########################################################################*/
2409
2410
2411
2412 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
2413 void ldv_check_final_state(void);
2414
2415 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
2416 void ldv_check_return_value(int res);
2417
2418 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
2419 void ldv_check_return_value_probe(int res);
2420
2421 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
2422 void ldv_initialize(void);
2423
2424 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
2425 void ldv_handler_precall(void);
2426
2427 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
2428 int nondet_int(void);
2429
2430 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
2431 int LDV_IN_INTERRUPT;
2432
2433 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
2434 void ldv_main0_sequence_infinite_withcheck_stateful(void) {
2435
2436
2437
2438 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
2439 /*============================= VARIABLE DECLARATION PART =============================*/
2440 /** STRUCT: struct type: platform_driver, struct name: ali_ircc_driver **/
2441 /* content: static int ali_ircc_suspend(struct platform_device *dev, pm_message_t state)*/
2442 /* LDV_COMMENT_BEGIN_PREP */
2443 #define CHIP_IO_EXTENT 8
2444 #define BROKEN_DONGLE_ID
2445 #define ALI_IRCC_DRIVER_NAME "ali-ircc"
2446 /* LDV_COMMENT_END_PREP */
2447 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "ali_ircc_suspend" */
2448 struct platform_device * var_group1;
2449 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "ali_ircc_suspend" */
2450 pm_message_t var_ali_ircc_suspend_29_p1;
2451 /* content: static int ali_ircc_resume(struct platform_device *dev)*/
2452 /* LDV_COMMENT_BEGIN_PREP */
2453 #define CHIP_IO_EXTENT 8
2454 #define BROKEN_DONGLE_ID
2455 #define ALI_IRCC_DRIVER_NAME "ali-ircc"
2456 /* LDV_COMMENT_END_PREP */
2457
2458 /** STRUCT: struct type: net_device_ops, struct name: ali_ircc_sir_ops **/
2459 /* content: static int ali_ircc_net_open(struct net_device *dev)*/
2460 /* LDV_COMMENT_BEGIN_PREP */
2461 #define CHIP_IO_EXTENT 8
2462 #define BROKEN_DONGLE_ID
2463 #define ALI_IRCC_DRIVER_NAME "ali-ircc"
2464 /* LDV_COMMENT_END_PREP */
2465 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "ali_ircc_net_open" */
2466 struct net_device * var_group2;
2467 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "ali_ircc_net_open" */
2468 static int res_ali_ircc_net_open_19;
2469 /* content: static int ali_ircc_net_close(struct net_device *dev)*/
2470 /* LDV_COMMENT_BEGIN_PREP */
2471 #define CHIP_IO_EXTENT 8
2472 #define BROKEN_DONGLE_ID
2473 #define ALI_IRCC_DRIVER_NAME "ali-ircc"
2474 /* LDV_COMMENT_END_PREP */
2475 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "ali_ircc_net_close" */
2476 static int res_ali_ircc_net_close_20;
2477 /* content: static netdev_tx_t ali_ircc_sir_hard_xmit(struct sk_buff *skb, struct net_device *dev)*/
2478 /* LDV_COMMENT_BEGIN_PREP */
2479 #define CHIP_IO_EXTENT 8
2480 #define BROKEN_DONGLE_ID
2481 #define ALI_IRCC_DRIVER_NAME "ali-ircc"
2482 /* LDV_COMMENT_END_PREP */
2483 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "ali_ircc_sir_hard_xmit" */
2484 struct sk_buff * var_group3;
2485 /* content: static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)*/
2486 /* LDV_COMMENT_BEGIN_PREP */
2487 #define CHIP_IO_EXTENT 8
2488 #define BROKEN_DONGLE_ID
2489 #define ALI_IRCC_DRIVER_NAME "ali-ircc"
2490 /* LDV_COMMENT_END_PREP */
2491 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "ali_ircc_net_ioctl" */
2492 struct ifreq * var_group4;
2493 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "ali_ircc_net_ioctl" */
2494 int var_ali_ircc_net_ioctl_27_p2;
2495
2496 /** STRUCT: struct type: net_device_ops, struct name: ali_ircc_fir_ops **/
2497 /* content: static int ali_ircc_net_open(struct net_device *dev)*/
2498 /* LDV_COMMENT_BEGIN_PREP */
2499 #define CHIP_IO_EXTENT 8
2500 #define BROKEN_DONGLE_ID
2501 #define ALI_IRCC_DRIVER_NAME "ali-ircc"
2502 /* LDV_COMMENT_END_PREP */
2503 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "ali_ircc_net_open" */
2504
2505 /* content: static int ali_ircc_net_close(struct net_device *dev)*/
2506 /* LDV_COMMENT_BEGIN_PREP */
2507 #define CHIP_IO_EXTENT 8
2508 #define BROKEN_DONGLE_ID
2509 #define ALI_IRCC_DRIVER_NAME "ali-ircc"
2510 /* LDV_COMMENT_END_PREP */
2511 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "ali_ircc_net_close" */
2512
2513 /* content: static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb, struct net_device *dev)*/
2514 /* LDV_COMMENT_BEGIN_PREP */
2515 #define CHIP_IO_EXTENT 8
2516 #define BROKEN_DONGLE_ID
2517 #define ALI_IRCC_DRIVER_NAME "ali-ircc"
2518 /* LDV_COMMENT_END_PREP */
2519 /* content: static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)*/
2520 /* LDV_COMMENT_BEGIN_PREP */
2521 #define CHIP_IO_EXTENT 8
2522 #define BROKEN_DONGLE_ID
2523 #define ALI_IRCC_DRIVER_NAME "ali-ircc"
2524 /* LDV_COMMENT_END_PREP */
2525
2526 /** CALLBACK SECTION request_irq **/
2527 /* content: static irqreturn_t ali_ircc_interrupt(int irq, void *dev_id)*/
2528 /* LDV_COMMENT_BEGIN_PREP */
2529 #define CHIP_IO_EXTENT 8
2530 #define BROKEN_DONGLE_ID
2531 #define ALI_IRCC_DRIVER_NAME "ali-ircc"
2532 /* LDV_COMMENT_END_PREP */
2533 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "ali_ircc_interrupt" */
2534 int var_ali_ircc_interrupt_9_p0;
2535 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "ali_ircc_interrupt" */
2536 void * var_ali_ircc_interrupt_9_p1;
2537
2538
2539
2540
2541 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
2542 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
2543 /*============================= VARIABLE INITIALIZING PART =============================*/
2544 LDV_IN_INTERRUPT=1;
2545
2546
2547
2548
2549 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
2550 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
2551 /*============================= FUNCTION CALL SECTION =============================*/
2552 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
2553 ldv_initialize();
2554
2555 /** INIT: init_type: ST_MODULE_INIT **/
2556 /* content: static int __init ali_ircc_init(void)*/
2557 /* LDV_COMMENT_BEGIN_PREP */
2558 #define CHIP_IO_EXTENT 8
2559 #define BROKEN_DONGLE_ID
2560 #define ALI_IRCC_DRIVER_NAME "ali-ircc"
2561 /* LDV_COMMENT_END_PREP */
2562 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver init function after driver loading to kernel. This function declared as "MODULE_INIT(function name)". */
2563 ldv_handler_precall();
2564 if(ali_ircc_init())
2565 goto ldv_final;
2566 int ldv_s_ali_ircc_driver_platform_driver = 0;
2567
2568 int ldv_s_ali_ircc_sir_ops_net_device_ops = 0;
2569
2570
2571 int ldv_s_ali_ircc_fir_ops_net_device_ops = 0;
2572
2573
2574
2575
2576
2577 while( nondet_int()
2578 || !(ldv_s_ali_ircc_driver_platform_driver == 0)
2579 || !(ldv_s_ali_ircc_sir_ops_net_device_ops == 0)
2580 || !(ldv_s_ali_ircc_fir_ops_net_device_ops == 0)
2581 ) {
2582
2583 switch(nondet_int()) {
2584
2585 case 0: {
2586
2587 /** STRUCT: struct type: platform_driver, struct name: ali_ircc_driver **/
2588 if(ldv_s_ali_ircc_driver_platform_driver==0) {
2589
2590 /* content: static int ali_ircc_suspend(struct platform_device *dev, pm_message_t state)*/
2591 /* LDV_COMMENT_BEGIN_PREP */
2592 #define CHIP_IO_EXTENT 8
2593 #define BROKEN_DONGLE_ID
2594 #define ALI_IRCC_DRIVER_NAME "ali-ircc"
2595 /* LDV_COMMENT_END_PREP */
2596 /* LDV_COMMENT_FUNCTION_CALL Function from field "suspend" from driver structure with callbacks "ali_ircc_driver" */
2597 ldv_handler_precall();
2598 ali_ircc_suspend( var_group1, var_ali_ircc_suspend_29_p1);
2599 ldv_s_ali_ircc_driver_platform_driver++;
2600
2601 }
2602
2603 }
2604
2605 break;
2606 case 1: {
2607
2608 /** STRUCT: struct type: platform_driver, struct name: ali_ircc_driver **/
2609 if(ldv_s_ali_ircc_driver_platform_driver==1) {
2610
2611 /* content: static int ali_ircc_resume(struct platform_device *dev)*/
2612 /* LDV_COMMENT_BEGIN_PREP */
2613 #define CHIP_IO_EXTENT 8
2614 #define BROKEN_DONGLE_ID
2615 #define ALI_IRCC_DRIVER_NAME "ali-ircc"
2616 /* LDV_COMMENT_END_PREP */
2617 /* LDV_COMMENT_FUNCTION_CALL Function from field "resume" from driver structure with callbacks "ali_ircc_driver" */
2618 ldv_handler_precall();
2619 ali_ircc_resume( var_group1);
2620 ldv_s_ali_ircc_driver_platform_driver=0;
2621
2622 }
2623
2624 }
2625
2626 break;
2627 case 2: {
2628
2629 /** STRUCT: struct type: net_device_ops, struct name: ali_ircc_sir_ops **/
2630 if(ldv_s_ali_ircc_sir_ops_net_device_ops==0) {
2631
2632 /* content: static int ali_ircc_net_open(struct net_device *dev)*/
2633 /* LDV_COMMENT_BEGIN_PREP */
2634 #define CHIP_IO_EXTENT 8
2635 #define BROKEN_DONGLE_ID
2636 #define ALI_IRCC_DRIVER_NAME "ali-ircc"
2637 /* LDV_COMMENT_END_PREP */
2638 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_open" from driver structure with callbacks "ali_ircc_sir_ops". Standart function test for correct return result. */
2639 ldv_handler_precall();
2640 res_ali_ircc_net_open_19 = ali_ircc_net_open( var_group2);
2641 ldv_check_return_value(res_ali_ircc_net_open_19);
2642 if(res_ali_ircc_net_open_19 < 0)
2643 goto ldv_module_exit;
2644 ldv_s_ali_ircc_sir_ops_net_device_ops++;
2645
2646 }
2647
2648 }
2649
2650 break;
2651 case 3: {
2652
2653 /** STRUCT: struct type: net_device_ops, struct name: ali_ircc_sir_ops **/
2654 if(ldv_s_ali_ircc_sir_ops_net_device_ops==1) {
2655
2656 /* content: static int ali_ircc_net_close(struct net_device *dev)*/
2657 /* LDV_COMMENT_BEGIN_PREP */
2658 #define CHIP_IO_EXTENT 8
2659 #define BROKEN_DONGLE_ID
2660 #define ALI_IRCC_DRIVER_NAME "ali-ircc"
2661 /* LDV_COMMENT_END_PREP */
2662 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_stop" from driver structure with callbacks "ali_ircc_sir_ops". Standart function test for correct return result. */
2663 ldv_handler_precall();
2664 res_ali_ircc_net_close_20 = ali_ircc_net_close( var_group2);
2665 ldv_check_return_value(res_ali_ircc_net_close_20);
2666 if(res_ali_ircc_net_close_20)
2667 goto ldv_module_exit;
2668 ldv_s_ali_ircc_sir_ops_net_device_ops=0;
2669
2670 }
2671
2672 }
2673
2674 break;
2675 case 4: {
2676
2677 /** STRUCT: struct type: net_device_ops, struct name: ali_ircc_sir_ops **/
2678
2679
2680 /* content: static netdev_tx_t ali_ircc_sir_hard_xmit(struct sk_buff *skb, struct net_device *dev)*/
2681 /* LDV_COMMENT_BEGIN_PREP */
2682 #define CHIP_IO_EXTENT 8
2683 #define BROKEN_DONGLE_ID
2684 #define ALI_IRCC_DRIVER_NAME "ali-ircc"
2685 /* LDV_COMMENT_END_PREP */
2686 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_start_xmit" from driver structure with callbacks "ali_ircc_sir_ops" */
2687 ldv_handler_precall();
2688 ali_ircc_sir_hard_xmit( var_group3, var_group2);
2689
2690
2691
2692
2693 }
2694
2695 break;
2696 case 5: {
2697
2698 /** STRUCT: struct type: net_device_ops, struct name: ali_ircc_sir_ops **/
2699
2700
2701 /* content: static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)*/
2702 /* LDV_COMMENT_BEGIN_PREP */
2703 #define CHIP_IO_EXTENT 8
2704 #define BROKEN_DONGLE_ID
2705 #define ALI_IRCC_DRIVER_NAME "ali-ircc"
2706 /* LDV_COMMENT_END_PREP */
2707 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_do_ioctl" from driver structure with callbacks "ali_ircc_sir_ops" */
2708 ldv_handler_precall();
2709 ali_ircc_net_ioctl( var_group2, var_group4, var_ali_ircc_net_ioctl_27_p2);
2710
2711
2712
2713
2714 }
2715
2716 break;
2717 case 6: {
2718
2719 /** STRUCT: struct type: net_device_ops, struct name: ali_ircc_fir_ops **/
2720 if(ldv_s_ali_ircc_fir_ops_net_device_ops==0) {
2721
2722 /* content: static int ali_ircc_net_open(struct net_device *dev)*/
2723 /* LDV_COMMENT_BEGIN_PREP */
2724 #define CHIP_IO_EXTENT 8
2725 #define BROKEN_DONGLE_ID
2726 #define ALI_IRCC_DRIVER_NAME "ali-ircc"
2727 /* LDV_COMMENT_END_PREP */
2728 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_open" from driver structure with callbacks "ali_ircc_fir_ops". Standart function test for correct return result. */
2729 ldv_handler_precall();
2730 res_ali_ircc_net_open_19 = ali_ircc_net_open( var_group2);
2731 ldv_check_return_value(res_ali_ircc_net_open_19);
2732 if(res_ali_ircc_net_open_19 < 0)
2733 goto ldv_module_exit;
2734 ldv_s_ali_ircc_fir_ops_net_device_ops++;
2735
2736 }
2737
2738 }
2739
2740 break;
2741 case 7: {
2742
2743 /** STRUCT: struct type: net_device_ops, struct name: ali_ircc_fir_ops **/
2744 if(ldv_s_ali_ircc_fir_ops_net_device_ops==1) {
2745
2746 /* content: static int ali_ircc_net_close(struct net_device *dev)*/
2747 /* LDV_COMMENT_BEGIN_PREP */
2748 #define CHIP_IO_EXTENT 8
2749 #define BROKEN_DONGLE_ID
2750 #define ALI_IRCC_DRIVER_NAME "ali-ircc"
2751 /* LDV_COMMENT_END_PREP */
2752 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_stop" from driver structure with callbacks "ali_ircc_fir_ops". Standart function test for correct return result. */
2753 ldv_handler_precall();
2754 res_ali_ircc_net_close_20 = ali_ircc_net_close( var_group2);
2755 ldv_check_return_value(res_ali_ircc_net_close_20);
2756 if(res_ali_ircc_net_close_20)
2757 goto ldv_module_exit;
2758 ldv_s_ali_ircc_fir_ops_net_device_ops=0;
2759
2760 }
2761
2762 }
2763
2764 break;
2765 case 8: {
2766
2767 /** STRUCT: struct type: net_device_ops, struct name: ali_ircc_fir_ops **/
2768
2769
2770 /* content: static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb, struct net_device *dev)*/
2771 /* LDV_COMMENT_BEGIN_PREP */
2772 #define CHIP_IO_EXTENT 8
2773 #define BROKEN_DONGLE_ID
2774 #define ALI_IRCC_DRIVER_NAME "ali-ircc"
2775 /* LDV_COMMENT_END_PREP */
2776 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_start_xmit" from driver structure with callbacks "ali_ircc_fir_ops" */
2777 ldv_handler_precall();
2778 ali_ircc_fir_hard_xmit( var_group3, var_group2);
2779
2780
2781
2782
2783 }
2784
2785 break;
2786 case 9: {
2787
2788 /** STRUCT: struct type: net_device_ops, struct name: ali_ircc_fir_ops **/
2789
2790
2791 /* content: static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)*/
2792 /* LDV_COMMENT_BEGIN_PREP */
2793 #define CHIP_IO_EXTENT 8
2794 #define BROKEN_DONGLE_ID
2795 #define ALI_IRCC_DRIVER_NAME "ali-ircc"
2796 /* LDV_COMMENT_END_PREP */
2797 /* LDV_COMMENT_FUNCTION_CALL Function from field "ndo_do_ioctl" from driver structure with callbacks "ali_ircc_fir_ops" */
2798 ldv_handler_precall();
2799 ali_ircc_net_ioctl( var_group2, var_group4, var_ali_ircc_net_ioctl_27_p2);
2800
2801
2802
2803
2804 }
2805
2806 break;
2807 case 10: {
2808
2809 /** CALLBACK SECTION request_irq **/
2810 LDV_IN_INTERRUPT=2;
2811
2812 /* content: static irqreturn_t ali_ircc_interrupt(int irq, void *dev_id)*/
2813 /* LDV_COMMENT_BEGIN_PREP */
2814 #define CHIP_IO_EXTENT 8
2815 #define BROKEN_DONGLE_ID
2816 #define ALI_IRCC_DRIVER_NAME "ali-ircc"
2817 /* LDV_COMMENT_END_PREP */
2818 /* LDV_COMMENT_FUNCTION_CALL */
2819 ldv_handler_precall();
2820 ali_ircc_interrupt( var_ali_ircc_interrupt_9_p0, var_ali_ircc_interrupt_9_p1);
2821 LDV_IN_INTERRUPT=1;
2822
2823
2824
2825 }
2826
2827 break;
2828 default: break;
2829
2830 }
2831
2832 }
2833
2834 ldv_module_exit:
2835
2836 /** INIT: init_type: ST_MODULE_EXIT **/
2837 /* content: static void __exit ali_ircc_cleanup(void)*/
2838 /* LDV_COMMENT_BEGIN_PREP */
2839 #define CHIP_IO_EXTENT 8
2840 #define BROKEN_DONGLE_ID
2841 #define ALI_IRCC_DRIVER_NAME "ali-ircc"
2842 /* LDV_COMMENT_END_PREP */
2843 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver release function before driver will be uploaded from kernel. This function declared as "MODULE_EXIT(function name)". */
2844 ldv_handler_precall();
2845 ali_ircc_cleanup();
2846
2847 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
2848 ldv_final: ldv_check_final_state();
2849
2850 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
2851 return;
2852
2853 }
2854 #endif
2855
2856 /* LDV_COMMENT_END_MAIN */ 1
2
3 #include <linux/kernel.h>
4 #include <linux/spinlock.h>
5
6 #include <verifier/rcv.h>
7
8 static int ldv_spin_NOT_ARG_SIGN;
9
10 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_NOT_ARG_SIGN') Check that spin 'NOT_ARG_SIGN' was not locked and lock it */
11 void ldv_spin_lock_NOT_ARG_SIGN(void)
12 {
13 /* LDV_COMMENT_ASSERT Spin 'NOT_ARG_SIGN' must be unlocked */
14 ldv_assert(ldv_spin_NOT_ARG_SIGN == 1);
15 /* LDV_COMMENT_CHANGE_STATE Lock spin 'NOT_ARG_SIGN' */
16 ldv_spin_NOT_ARG_SIGN = 2;
17 }
18
19 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_NOT_ARG_SIGN') Check that spin 'NOT_ARG_SIGN' was locked and unlock it */
20 void ldv_spin_unlock_NOT_ARG_SIGN(void)
21 {
22 /* LDV_COMMENT_ASSERT Spin 'NOT_ARG_SIGN' must be locked */
23 ldv_assert(ldv_spin_NOT_ARG_SIGN == 2);
24 /* LDV_COMMENT_CHANGE_STATE Unlock spin 'NOT_ARG_SIGN' */
25 ldv_spin_NOT_ARG_SIGN = 1;
26 }
27
28 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock_NOT_ARG_SIGN') Check that spin 'NOT_ARG_SIGN' was not locked and nondeterministically lock it. Return 0 on fails */
29 int ldv_spin_trylock_NOT_ARG_SIGN(void)
30 {
31 int is_spin_held_by_another_thread;
32
33 /* LDV_COMMENT_ASSERT It may be an error if spin 'NOT_ARG_SIGN' is locked at this point */
34 ldv_assert(ldv_spin_NOT_ARG_SIGN == 1);
35
36 /* LDV_COMMENT_OTHER Construct nondetermined result */
37 is_spin_held_by_another_thread = ldv_undef_int();
38
39 /* LDV_COMMENT_ASSERT Nondeterministically lock spin 'NOT_ARG_SIGN' */
40 if (is_spin_held_by_another_thread)
41 {
42 /* LDV_COMMENT_RETURN Spin 'NOT_ARG_SIGN' was not locked. Finish with fail */
43 return 0;
44 }
45 else
46 {
47 /* LDV_COMMENT_CHANGE_STATE Lock spin 'NOT_ARG_SIGN' */
48 ldv_spin_NOT_ARG_SIGN = 2;
49 /* LDV_COMMENT_RETURN Finish with success */
50 return 1;
51 }
52 }
53
54 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_wait_NOT_ARG_SIGN') The same process can not both lock spin 'NOT_ARG_SIGN' and wait until it will be unlocked */
55 void ldv_spin_unlock_wait_NOT_ARG_SIGN(void)
56 {
57 /* LDV_COMMENT_ASSERT Spin 'NOT_ARG_SIGN' must not be locked by a current process */
58 ldv_assert(ldv_spin_NOT_ARG_SIGN == 1);
59 }
60
61 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_locked_NOT_ARG_SIGN') Check whether spin 'NOT_ARG_SIGN' was locked */
62 int ldv_spin_is_locked_NOT_ARG_SIGN(void)
63 {
64 int is_spin_held_by_another_thread;
65
66 /* LDV_COMMENT_OTHER Construct nondetermined result */
67 is_spin_held_by_another_thread = ldv_undef_int();
68
69 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'NOT_ARG_SIGN' was locked */
70 if(ldv_spin_NOT_ARG_SIGN == 1 && !is_spin_held_by_another_thread)
71 {
72 /* LDV_COMMENT_RETURN Spin 'NOT_ARG_SIGN' was unlocked */
73 return 0;
74 }
75 else
76 {
77 /* LDV_COMMENT_RETURN Spin 'NOT_ARG_SIGN' was locked */
78 return 1;
79 }
80 }
81
82 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_can_lock_NOT_ARG_SIGN') Check whether spin 'NOT_ARG_SIGN' was locked */
83 int ldv_spin_can_lock_NOT_ARG_SIGN(void)
84 {
85 /* LDV_COMMENT_RETURN Inverse function for spin_is_locked() */
86 return !ldv_spin_is_locked_NOT_ARG_SIGN();
87 }
88
89 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_contended_NOT_ARG_SIGN') Check whether spin 'NOT_ARG_SIGN' is contended */
90 int ldv_spin_is_contended_NOT_ARG_SIGN(void)
91 {
92 int is_spin_contended;
93
94 /* LDV_COMMENT_OTHER Construct nondetermined result */
95 is_spin_contended = ldv_undef_int();
96
97 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'NOT_ARG_SIGN' is contended */
98 if(is_spin_contended)
99 {
100 /* LDV_COMMENT_RETURN Spin 'NOT_ARG_SIGN' is contended */
101 return 0;
102 }
103 else
104 {
105 /* LDV_COMMENT_RETURN Spin 'NOT_ARG_SIGN' isn't contended */
106 return 1;
107 }
108 }
109
110 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_lock_NOT_ARG_SIGN') Lock spin 'NOT_ARG_SIGN' if atomic decrement result is zero */
111 int ldv_atomic_dec_and_lock_NOT_ARG_SIGN(void)
112 {
113 int atomic_value_after_dec;
114
115 /* LDV_COMMENT_ASSERT Spin 'NOT_ARG_SIGN' must be unlocked (since we may lock it in this function) */
116 ldv_assert(ldv_spin_NOT_ARG_SIGN == 1);
117
118 /* LDV_COMMENT_OTHER Assign the result of atomic decrement */
119 atomic_value_after_dec = ldv_undef_int();
120
121 /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */
122 if (atomic_value_after_dec == 0)
123 {
124 /* LDV_COMMENT_CHANGE_STATE Lock spin 'NOT_ARG_SIGN', as atomic has decremented to zero */
125 ldv_spin_NOT_ARG_SIGN = 2;
126 /* LDV_COMMENT_RETURN Return 1 with locked spin 'NOT_ARG_SIGN' */
127 return 1;
128 }
129
130 /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking spin 'NOT_ARG_SIGN' */
131 return 0;
132 }
133 static int ldv_spin__xmit_lock_of_netdev_queue;
134
135 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock__xmit_lock_of_netdev_queue') Check that spin '_xmit_lock_of_netdev_queue' was not locked and lock it */
136 void ldv_spin_lock__xmit_lock_of_netdev_queue(void)
137 {
138 /* LDV_COMMENT_ASSERT Spin '_xmit_lock_of_netdev_queue' must be unlocked */
139 ldv_assert(ldv_spin__xmit_lock_of_netdev_queue == 1);
140 /* LDV_COMMENT_CHANGE_STATE Lock spin '_xmit_lock_of_netdev_queue' */
141 ldv_spin__xmit_lock_of_netdev_queue = 2;
142 }
143
144 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock__xmit_lock_of_netdev_queue') Check that spin '_xmit_lock_of_netdev_queue' was locked and unlock it */
145 void ldv_spin_unlock__xmit_lock_of_netdev_queue(void)
146 {
147 /* LDV_COMMENT_ASSERT Spin '_xmit_lock_of_netdev_queue' must be locked */
148 ldv_assert(ldv_spin__xmit_lock_of_netdev_queue == 2);
149 /* LDV_COMMENT_CHANGE_STATE Unlock spin '_xmit_lock_of_netdev_queue' */
150 ldv_spin__xmit_lock_of_netdev_queue = 1;
151 }
152
153 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock__xmit_lock_of_netdev_queue') Check that spin '_xmit_lock_of_netdev_queue' was not locked and nondeterministically lock it. Return 0 on fails */
154 int ldv_spin_trylock__xmit_lock_of_netdev_queue(void)
155 {
156 int is_spin_held_by_another_thread;
157
158 /* LDV_COMMENT_ASSERT It may be an error if spin '_xmit_lock_of_netdev_queue' is locked at this point */
159 ldv_assert(ldv_spin__xmit_lock_of_netdev_queue == 1);
160
161 /* LDV_COMMENT_OTHER Construct nondetermined result */
162 is_spin_held_by_another_thread = ldv_undef_int();
163
164 /* LDV_COMMENT_ASSERT Nondeterministically lock spin '_xmit_lock_of_netdev_queue' */
165 if (is_spin_held_by_another_thread)
166 {
167 /* LDV_COMMENT_RETURN Spin '_xmit_lock_of_netdev_queue' was not locked. Finish with fail */
168 return 0;
169 }
170 else
171 {
172 /* LDV_COMMENT_CHANGE_STATE Lock spin '_xmit_lock_of_netdev_queue' */
173 ldv_spin__xmit_lock_of_netdev_queue = 2;
174 /* LDV_COMMENT_RETURN Finish with success */
175 return 1;
176 }
177 }
178
179 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_wait__xmit_lock_of_netdev_queue') The same process can not both lock spin '_xmit_lock_of_netdev_queue' and wait until it will be unlocked */
180 void ldv_spin_unlock_wait__xmit_lock_of_netdev_queue(void)
181 {
182 /* LDV_COMMENT_ASSERT Spin '_xmit_lock_of_netdev_queue' must not be locked by a current process */
183 ldv_assert(ldv_spin__xmit_lock_of_netdev_queue == 1);
184 }
185
186 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_locked__xmit_lock_of_netdev_queue') Check whether spin '_xmit_lock_of_netdev_queue' was locked */
187 int ldv_spin_is_locked__xmit_lock_of_netdev_queue(void)
188 {
189 int is_spin_held_by_another_thread;
190
191 /* LDV_COMMENT_OTHER Construct nondetermined result */
192 is_spin_held_by_another_thread = ldv_undef_int();
193
194 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin '_xmit_lock_of_netdev_queue' was locked */
195 if(ldv_spin__xmit_lock_of_netdev_queue == 1 && !is_spin_held_by_another_thread)
196 {
197 /* LDV_COMMENT_RETURN Spin '_xmit_lock_of_netdev_queue' was unlocked */
198 return 0;
199 }
200 else
201 {
202 /* LDV_COMMENT_RETURN Spin '_xmit_lock_of_netdev_queue' was locked */
203 return 1;
204 }
205 }
206
207 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_can_lock__xmit_lock_of_netdev_queue') Check whether spin '_xmit_lock_of_netdev_queue' was locked */
208 int ldv_spin_can_lock__xmit_lock_of_netdev_queue(void)
209 {
210 /* LDV_COMMENT_RETURN Inverse function for spin_is_locked() */
211 return !ldv_spin_is_locked__xmit_lock_of_netdev_queue();
212 }
213
214 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_contended__xmit_lock_of_netdev_queue') Check whether spin '_xmit_lock_of_netdev_queue' is contended */
215 int ldv_spin_is_contended__xmit_lock_of_netdev_queue(void)
216 {
217 int is_spin_contended;
218
219 /* LDV_COMMENT_OTHER Construct nondetermined result */
220 is_spin_contended = ldv_undef_int();
221
222 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin '_xmit_lock_of_netdev_queue' is contended */
223 if(is_spin_contended)
224 {
225 /* LDV_COMMENT_RETURN Spin '_xmit_lock_of_netdev_queue' is contended */
226 return 0;
227 }
228 else
229 {
230 /* LDV_COMMENT_RETURN Spin '_xmit_lock_of_netdev_queue' isn't contended */
231 return 1;
232 }
233 }
234
235 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_lock__xmit_lock_of_netdev_queue') Lock spin '_xmit_lock_of_netdev_queue' if atomic decrement result is zero */
236 int ldv_atomic_dec_and_lock__xmit_lock_of_netdev_queue(void)
237 {
238 int atomic_value_after_dec;
239
240 /* LDV_COMMENT_ASSERT Spin '_xmit_lock_of_netdev_queue' must be unlocked (since we may lock it in this function) */
241 ldv_assert(ldv_spin__xmit_lock_of_netdev_queue == 1);
242
243 /* LDV_COMMENT_OTHER Assign the result of atomic decrement */
244 atomic_value_after_dec = ldv_undef_int();
245
246 /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */
247 if (atomic_value_after_dec == 0)
248 {
249 /* LDV_COMMENT_CHANGE_STATE Lock spin '_xmit_lock_of_netdev_queue', as atomic has decremented to zero */
250 ldv_spin__xmit_lock_of_netdev_queue = 2;
251 /* LDV_COMMENT_RETURN Return 1 with locked spin '_xmit_lock_of_netdev_queue' */
252 return 1;
253 }
254
255 /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking spin '_xmit_lock_of_netdev_queue' */
256 return 0;
257 }
258 static int ldv_spin_addr_list_lock_of_net_device;
259
260 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_addr_list_lock_of_net_device') Check that spin 'addr_list_lock_of_net_device' was not locked and lock it */
261 void ldv_spin_lock_addr_list_lock_of_net_device(void)
262 {
263 /* LDV_COMMENT_ASSERT Spin 'addr_list_lock_of_net_device' must be unlocked */
264 ldv_assert(ldv_spin_addr_list_lock_of_net_device == 1);
265 /* LDV_COMMENT_CHANGE_STATE Lock spin 'addr_list_lock_of_net_device' */
266 ldv_spin_addr_list_lock_of_net_device = 2;
267 }
268
269 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_addr_list_lock_of_net_device') Check that spin 'addr_list_lock_of_net_device' was locked and unlock it */
270 void ldv_spin_unlock_addr_list_lock_of_net_device(void)
271 {
272 /* LDV_COMMENT_ASSERT Spin 'addr_list_lock_of_net_device' must be locked */
273 ldv_assert(ldv_spin_addr_list_lock_of_net_device == 2);
274 /* LDV_COMMENT_CHANGE_STATE Unlock spin 'addr_list_lock_of_net_device' */
275 ldv_spin_addr_list_lock_of_net_device = 1;
276 }
277
278 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock_addr_list_lock_of_net_device') Check that spin 'addr_list_lock_of_net_device' was not locked and nondeterministically lock it. Return 0 on fails */
279 int ldv_spin_trylock_addr_list_lock_of_net_device(void)
280 {
281 int is_spin_held_by_another_thread;
282
283 /* LDV_COMMENT_ASSERT It may be an error if spin 'addr_list_lock_of_net_device' is locked at this point */
284 ldv_assert(ldv_spin_addr_list_lock_of_net_device == 1);
285
286 /* LDV_COMMENT_OTHER Construct nondetermined result */
287 is_spin_held_by_another_thread = ldv_undef_int();
288
289 /* LDV_COMMENT_ASSERT Nondeterministically lock spin 'addr_list_lock_of_net_device' */
290 if (is_spin_held_by_another_thread)
291 {
292 /* LDV_COMMENT_RETURN Spin 'addr_list_lock_of_net_device' was not locked. Finish with fail */
293 return 0;
294 }
295 else
296 {
297 /* LDV_COMMENT_CHANGE_STATE Lock spin 'addr_list_lock_of_net_device' */
298 ldv_spin_addr_list_lock_of_net_device = 2;
299 /* LDV_COMMENT_RETURN Finish with success */
300 return 1;
301 }
302 }
303
304 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_wait_addr_list_lock_of_net_device') The same process can not both lock spin 'addr_list_lock_of_net_device' and wait until it will be unlocked */
305 void ldv_spin_unlock_wait_addr_list_lock_of_net_device(void)
306 {
307 /* LDV_COMMENT_ASSERT Spin 'addr_list_lock_of_net_device' must not be locked by a current process */
308 ldv_assert(ldv_spin_addr_list_lock_of_net_device == 1);
309 }
310
311 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_locked_addr_list_lock_of_net_device') Check whether spin 'addr_list_lock_of_net_device' was locked */
312 int ldv_spin_is_locked_addr_list_lock_of_net_device(void)
313 {
314 int is_spin_held_by_another_thread;
315
316 /* LDV_COMMENT_OTHER Construct nondetermined result */
317 is_spin_held_by_another_thread = ldv_undef_int();
318
319 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'addr_list_lock_of_net_device' was locked */
320 if(ldv_spin_addr_list_lock_of_net_device == 1 && !is_spin_held_by_another_thread)
321 {
322 /* LDV_COMMENT_RETURN Spin 'addr_list_lock_of_net_device' was unlocked */
323 return 0;
324 }
325 else
326 {
327 /* LDV_COMMENT_RETURN Spin 'addr_list_lock_of_net_device' was locked */
328 return 1;
329 }
330 }
331
332 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_can_lock_addr_list_lock_of_net_device') Check whether spin 'addr_list_lock_of_net_device' was locked */
333 int ldv_spin_can_lock_addr_list_lock_of_net_device(void)
334 {
335 /* LDV_COMMENT_RETURN Inverse function for spin_is_locked() */
336 return !ldv_spin_is_locked_addr_list_lock_of_net_device();
337 }
338
339 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_contended_addr_list_lock_of_net_device') Check whether spin 'addr_list_lock_of_net_device' is contended */
340 int ldv_spin_is_contended_addr_list_lock_of_net_device(void)
341 {
342 int is_spin_contended;
343
344 /* LDV_COMMENT_OTHER Construct nondetermined result */
345 is_spin_contended = ldv_undef_int();
346
347 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'addr_list_lock_of_net_device' is contended */
348 if(is_spin_contended)
349 {
350 /* LDV_COMMENT_RETURN Spin 'addr_list_lock_of_net_device' is contended */
351 return 0;
352 }
353 else
354 {
355 /* LDV_COMMENT_RETURN Spin 'addr_list_lock_of_net_device' isn't contended */
356 return 1;
357 }
358 }
359
360 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_lock_addr_list_lock_of_net_device') Lock spin 'addr_list_lock_of_net_device' if atomic decrement result is zero */
361 int ldv_atomic_dec_and_lock_addr_list_lock_of_net_device(void)
362 {
363 int atomic_value_after_dec;
364
365 /* LDV_COMMENT_ASSERT Spin 'addr_list_lock_of_net_device' must be unlocked (since we may lock it in this function) */
366 ldv_assert(ldv_spin_addr_list_lock_of_net_device == 1);
367
368 /* LDV_COMMENT_OTHER Assign the result of atomic decrement */
369 atomic_value_after_dec = ldv_undef_int();
370
371 /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */
372 if (atomic_value_after_dec == 0)
373 {
374 /* LDV_COMMENT_CHANGE_STATE Lock spin 'addr_list_lock_of_net_device', as atomic has decremented to zero */
375 ldv_spin_addr_list_lock_of_net_device = 2;
376 /* LDV_COMMENT_RETURN Return 1 with locked spin 'addr_list_lock_of_net_device' */
377 return 1;
378 }
379
380 /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking spin 'addr_list_lock_of_net_device' */
381 return 0;
382 }
383 static int ldv_spin_alloc_lock_of_task_struct;
384
385 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_alloc_lock_of_task_struct') Check that spin 'alloc_lock_of_task_struct' was not locked and lock it */
386 void ldv_spin_lock_alloc_lock_of_task_struct(void)
387 {
388 /* LDV_COMMENT_ASSERT Spin 'alloc_lock_of_task_struct' must be unlocked */
389 ldv_assert(ldv_spin_alloc_lock_of_task_struct == 1);
390 /* LDV_COMMENT_CHANGE_STATE Lock spin 'alloc_lock_of_task_struct' */
391 ldv_spin_alloc_lock_of_task_struct = 2;
392 }
393
394 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_alloc_lock_of_task_struct') Check that spin 'alloc_lock_of_task_struct' was locked and unlock it */
395 void ldv_spin_unlock_alloc_lock_of_task_struct(void)
396 {
397 /* LDV_COMMENT_ASSERT Spin 'alloc_lock_of_task_struct' must be locked */
398 ldv_assert(ldv_spin_alloc_lock_of_task_struct == 2);
399 /* LDV_COMMENT_CHANGE_STATE Unlock spin 'alloc_lock_of_task_struct' */
400 ldv_spin_alloc_lock_of_task_struct = 1;
401 }
402
403 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock_alloc_lock_of_task_struct') Check that spin 'alloc_lock_of_task_struct' was not locked and nondeterministically lock it. Return 0 on fails */
404 int ldv_spin_trylock_alloc_lock_of_task_struct(void)
405 {
406 int is_spin_held_by_another_thread;
407
408 /* LDV_COMMENT_ASSERT It may be an error if spin 'alloc_lock_of_task_struct' is locked at this point */
409 ldv_assert(ldv_spin_alloc_lock_of_task_struct == 1);
410
411 /* LDV_COMMENT_OTHER Construct nondetermined result */
412 is_spin_held_by_another_thread = ldv_undef_int();
413
414 /* LDV_COMMENT_ASSERT Nondeterministically lock spin 'alloc_lock_of_task_struct' */
415 if (is_spin_held_by_another_thread)
416 {
417 /* LDV_COMMENT_RETURN Spin 'alloc_lock_of_task_struct' was not locked. Finish with fail */
418 return 0;
419 }
420 else
421 {
422 /* LDV_COMMENT_CHANGE_STATE Lock spin 'alloc_lock_of_task_struct' */
423 ldv_spin_alloc_lock_of_task_struct = 2;
424 /* LDV_COMMENT_RETURN Finish with success */
425 return 1;
426 }
427 }
428
429 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_wait_alloc_lock_of_task_struct') The same process can not both lock spin 'alloc_lock_of_task_struct' and wait until it will be unlocked */
430 void ldv_spin_unlock_wait_alloc_lock_of_task_struct(void)
431 {
432 /* LDV_COMMENT_ASSERT Spin 'alloc_lock_of_task_struct' must not be locked by a current process */
433 ldv_assert(ldv_spin_alloc_lock_of_task_struct == 1);
434 }
435
436 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_locked_alloc_lock_of_task_struct') Check whether spin 'alloc_lock_of_task_struct' was locked */
437 int ldv_spin_is_locked_alloc_lock_of_task_struct(void)
438 {
439 int is_spin_held_by_another_thread;
440
441 /* LDV_COMMENT_OTHER Construct nondetermined result */
442 is_spin_held_by_another_thread = ldv_undef_int();
443
444 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'alloc_lock_of_task_struct' was locked */
445 if(ldv_spin_alloc_lock_of_task_struct == 1 && !is_spin_held_by_another_thread)
446 {
447 /* LDV_COMMENT_RETURN Spin 'alloc_lock_of_task_struct' was unlocked */
448 return 0;
449 }
450 else
451 {
452 /* LDV_COMMENT_RETURN Spin 'alloc_lock_of_task_struct' was locked */
453 return 1;
454 }
455 }
456
457 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_can_lock_alloc_lock_of_task_struct') Check whether spin 'alloc_lock_of_task_struct' was locked */
458 int ldv_spin_can_lock_alloc_lock_of_task_struct(void)
459 {
460 /* LDV_COMMENT_RETURN Inverse function for spin_is_locked() */
461 return !ldv_spin_is_locked_alloc_lock_of_task_struct();
462 }
463
464 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_contended_alloc_lock_of_task_struct') Check whether spin 'alloc_lock_of_task_struct' is contended */
465 int ldv_spin_is_contended_alloc_lock_of_task_struct(void)
466 {
467 int is_spin_contended;
468
469 /* LDV_COMMENT_OTHER Construct nondetermined result */
470 is_spin_contended = ldv_undef_int();
471
472 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'alloc_lock_of_task_struct' is contended */
473 if(is_spin_contended)
474 {
475 /* LDV_COMMENT_RETURN Spin 'alloc_lock_of_task_struct' is contended */
476 return 0;
477 }
478 else
479 {
480 /* LDV_COMMENT_RETURN Spin 'alloc_lock_of_task_struct' isn't contended */
481 return 1;
482 }
483 }
484
485 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_lock_alloc_lock_of_task_struct') Lock spin 'alloc_lock_of_task_struct' if atomic decrement result is zero */
486 int ldv_atomic_dec_and_lock_alloc_lock_of_task_struct(void)
487 {
488 int atomic_value_after_dec;
489
490 /* LDV_COMMENT_ASSERT Spin 'alloc_lock_of_task_struct' must be unlocked (since we may lock it in this function) */
491 ldv_assert(ldv_spin_alloc_lock_of_task_struct == 1);
492
493 /* LDV_COMMENT_OTHER Assign the result of atomic decrement */
494 atomic_value_after_dec = ldv_undef_int();
495
496 /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */
497 if (atomic_value_after_dec == 0)
498 {
499 /* LDV_COMMENT_CHANGE_STATE Lock spin 'alloc_lock_of_task_struct', as atomic has decremented to zero */
500 ldv_spin_alloc_lock_of_task_struct = 2;
501 /* LDV_COMMENT_RETURN Return 1 with locked spin 'alloc_lock_of_task_struct' */
502 return 1;
503 }
504
505 /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking spin 'alloc_lock_of_task_struct' */
506 return 0;
507 }
508 static int ldv_spin_dma_spin_lock;
509
510 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_dma_spin_lock') Check that spin 'dma_spin_lock' was not locked and lock it */
511 void ldv_spin_lock_dma_spin_lock(void)
512 {
513 /* LDV_COMMENT_ASSERT Spin 'dma_spin_lock' must be unlocked */
514 ldv_assert(ldv_spin_dma_spin_lock == 1);
515 /* LDV_COMMENT_CHANGE_STATE Lock spin 'dma_spin_lock' */
516 ldv_spin_dma_spin_lock = 2;
517 }
518
519 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_dma_spin_lock') Check that spin 'dma_spin_lock' was locked and unlock it */
520 void ldv_spin_unlock_dma_spin_lock(void)
521 {
522 /* LDV_COMMENT_ASSERT Spin 'dma_spin_lock' must be locked */
523 ldv_assert(ldv_spin_dma_spin_lock == 2);
524 /* LDV_COMMENT_CHANGE_STATE Unlock spin 'dma_spin_lock' */
525 ldv_spin_dma_spin_lock = 1;
526 }
527
528 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock_dma_spin_lock') Check that spin 'dma_spin_lock' was not locked and nondeterministically lock it. Return 0 on fails */
529 int ldv_spin_trylock_dma_spin_lock(void)
530 {
531 int is_spin_held_by_another_thread;
532
533 /* LDV_COMMENT_ASSERT It may be an error if spin 'dma_spin_lock' is locked at this point */
534 ldv_assert(ldv_spin_dma_spin_lock == 1);
535
536 /* LDV_COMMENT_OTHER Construct nondetermined result */
537 is_spin_held_by_another_thread = ldv_undef_int();
538
539 /* LDV_COMMENT_ASSERT Nondeterministically lock spin 'dma_spin_lock' */
540 if (is_spin_held_by_another_thread)
541 {
542 /* LDV_COMMENT_RETURN Spin 'dma_spin_lock' was not locked. Finish with fail */
543 return 0;
544 }
545 else
546 {
547 /* LDV_COMMENT_CHANGE_STATE Lock spin 'dma_spin_lock' */
548 ldv_spin_dma_spin_lock = 2;
549 /* LDV_COMMENT_RETURN Finish with success */
550 return 1;
551 }
552 }
553
554 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_wait_dma_spin_lock') The same process can not both lock spin 'dma_spin_lock' and wait until it will be unlocked */
555 void ldv_spin_unlock_wait_dma_spin_lock(void)
556 {
557 /* LDV_COMMENT_ASSERT Spin 'dma_spin_lock' must not be locked by a current process */
558 ldv_assert(ldv_spin_dma_spin_lock == 1);
559 }
560
561 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_locked_dma_spin_lock') Check whether spin 'dma_spin_lock' was locked */
562 int ldv_spin_is_locked_dma_spin_lock(void)
563 {
564 int is_spin_held_by_another_thread;
565
566 /* LDV_COMMENT_OTHER Construct nondetermined result */
567 is_spin_held_by_another_thread = ldv_undef_int();
568
569 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'dma_spin_lock' was locked */
570 if(ldv_spin_dma_spin_lock == 1 && !is_spin_held_by_another_thread)
571 {
572 /* LDV_COMMENT_RETURN Spin 'dma_spin_lock' was unlocked */
573 return 0;
574 }
575 else
576 {
577 /* LDV_COMMENT_RETURN Spin 'dma_spin_lock' was locked */
578 return 1;
579 }
580 }
581
582 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_can_lock_dma_spin_lock') Check whether spin 'dma_spin_lock' was locked */
583 int ldv_spin_can_lock_dma_spin_lock(void)
584 {
585 /* LDV_COMMENT_RETURN Inverse function for spin_is_locked() */
586 return !ldv_spin_is_locked_dma_spin_lock();
587 }
588
589 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_contended_dma_spin_lock') Check whether spin 'dma_spin_lock' is contended */
590 int ldv_spin_is_contended_dma_spin_lock(void)
591 {
592 int is_spin_contended;
593
594 /* LDV_COMMENT_OTHER Construct nondetermined result */
595 is_spin_contended = ldv_undef_int();
596
597 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'dma_spin_lock' is contended */
598 if(is_spin_contended)
599 {
600 /* LDV_COMMENT_RETURN Spin 'dma_spin_lock' is contended */
601 return 0;
602 }
603 else
604 {
605 /* LDV_COMMENT_RETURN Spin 'dma_spin_lock' isn't contended */
606 return 1;
607 }
608 }
609
610 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_lock_dma_spin_lock') Lock spin 'dma_spin_lock' if atomic decrement result is zero */
611 int ldv_atomic_dec_and_lock_dma_spin_lock(void)
612 {
613 int atomic_value_after_dec;
614
615 /* LDV_COMMENT_ASSERT Spin 'dma_spin_lock' must be unlocked (since we may lock it in this function) */
616 ldv_assert(ldv_spin_dma_spin_lock == 1);
617
618 /* LDV_COMMENT_OTHER Assign the result of atomic decrement */
619 atomic_value_after_dec = ldv_undef_int();
620
621 /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */
622 if (atomic_value_after_dec == 0)
623 {
624 /* LDV_COMMENT_CHANGE_STATE Lock spin 'dma_spin_lock', as atomic has decremented to zero */
625 ldv_spin_dma_spin_lock = 2;
626 /* LDV_COMMENT_RETURN Return 1 with locked spin 'dma_spin_lock' */
627 return 1;
628 }
629
630 /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking spin 'dma_spin_lock' */
631 return 0;
632 }
633 static int ldv_spin_i_lock_of_inode;
634
635 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_i_lock_of_inode') Check that spin 'i_lock_of_inode' was not locked and lock it */
636 void ldv_spin_lock_i_lock_of_inode(void)
637 {
638 /* LDV_COMMENT_ASSERT Spin 'i_lock_of_inode' must be unlocked */
639 ldv_assert(ldv_spin_i_lock_of_inode == 1);
640 /* LDV_COMMENT_CHANGE_STATE Lock spin 'i_lock_of_inode' */
641 ldv_spin_i_lock_of_inode = 2;
642 }
643
644 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_i_lock_of_inode') Check that spin 'i_lock_of_inode' was locked and unlock it */
645 void ldv_spin_unlock_i_lock_of_inode(void)
646 {
647 /* LDV_COMMENT_ASSERT Spin 'i_lock_of_inode' must be locked */
648 ldv_assert(ldv_spin_i_lock_of_inode == 2);
649 /* LDV_COMMENT_CHANGE_STATE Unlock spin 'i_lock_of_inode' */
650 ldv_spin_i_lock_of_inode = 1;
651 }
652
653 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock_i_lock_of_inode') Check that spin 'i_lock_of_inode' was not locked and nondeterministically lock it. Return 0 on fails */
654 int ldv_spin_trylock_i_lock_of_inode(void)
655 {
656 int is_spin_held_by_another_thread;
657
658 /* LDV_COMMENT_ASSERT It may be an error if spin 'i_lock_of_inode' is locked at this point */
659 ldv_assert(ldv_spin_i_lock_of_inode == 1);
660
661 /* LDV_COMMENT_OTHER Construct nondetermined result */
662 is_spin_held_by_another_thread = ldv_undef_int();
663
664 /* LDV_COMMENT_ASSERT Nondeterministically lock spin 'i_lock_of_inode' */
665 if (is_spin_held_by_another_thread)
666 {
667 /* LDV_COMMENT_RETURN Spin 'i_lock_of_inode' was not locked. Finish with fail */
668 return 0;
669 }
670 else
671 {
672 /* LDV_COMMENT_CHANGE_STATE Lock spin 'i_lock_of_inode' */
673 ldv_spin_i_lock_of_inode = 2;
674 /* LDV_COMMENT_RETURN Finish with success */
675 return 1;
676 }
677 }
678
679 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_wait_i_lock_of_inode') The same process can not both lock spin 'i_lock_of_inode' and wait until it will be unlocked */
680 void ldv_spin_unlock_wait_i_lock_of_inode(void)
681 {
682 /* LDV_COMMENT_ASSERT Spin 'i_lock_of_inode' must not be locked by a current process */
683 ldv_assert(ldv_spin_i_lock_of_inode == 1);
684 }
685
686 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_locked_i_lock_of_inode') Check whether spin 'i_lock_of_inode' was locked */
687 int ldv_spin_is_locked_i_lock_of_inode(void)
688 {
689 int is_spin_held_by_another_thread;
690
691 /* LDV_COMMENT_OTHER Construct nondetermined result */
692 is_spin_held_by_another_thread = ldv_undef_int();
693
694 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'i_lock_of_inode' was locked */
695 if(ldv_spin_i_lock_of_inode == 1 && !is_spin_held_by_another_thread)
696 {
697 /* LDV_COMMENT_RETURN Spin 'i_lock_of_inode' was unlocked */
698 return 0;
699 }
700 else
701 {
702 /* LDV_COMMENT_RETURN Spin 'i_lock_of_inode' was locked */
703 return 1;
704 }
705 }
706
707 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_can_lock_i_lock_of_inode') Check whether spin 'i_lock_of_inode' was locked */
708 int ldv_spin_can_lock_i_lock_of_inode(void)
709 {
710 /* LDV_COMMENT_RETURN Inverse function for spin_is_locked() */
711 return !ldv_spin_is_locked_i_lock_of_inode();
712 }
713
714 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_contended_i_lock_of_inode') Check whether spin 'i_lock_of_inode' is contended */
715 int ldv_spin_is_contended_i_lock_of_inode(void)
716 {
717 int is_spin_contended;
718
719 /* LDV_COMMENT_OTHER Construct nondetermined result */
720 is_spin_contended = ldv_undef_int();
721
722 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'i_lock_of_inode' is contended */
723 if(is_spin_contended)
724 {
725 /* LDV_COMMENT_RETURN Spin 'i_lock_of_inode' is contended */
726 return 0;
727 }
728 else
729 {
730 /* LDV_COMMENT_RETURN Spin 'i_lock_of_inode' isn't contended */
731 return 1;
732 }
733 }
734
735 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_lock_i_lock_of_inode') Lock spin 'i_lock_of_inode' if atomic decrement result is zero */
736 int ldv_atomic_dec_and_lock_i_lock_of_inode(void)
737 {
738 int atomic_value_after_dec;
739
740 /* LDV_COMMENT_ASSERT Spin 'i_lock_of_inode' must be unlocked (since we may lock it in this function) */
741 ldv_assert(ldv_spin_i_lock_of_inode == 1);
742
743 /* LDV_COMMENT_OTHER Assign the result of atomic decrement */
744 atomic_value_after_dec = ldv_undef_int();
745
746 /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */
747 if (atomic_value_after_dec == 0)
748 {
749 /* LDV_COMMENT_CHANGE_STATE Lock spin 'i_lock_of_inode', as atomic has decremented to zero */
750 ldv_spin_i_lock_of_inode = 2;
751 /* LDV_COMMENT_RETURN Return 1 with locked spin 'i_lock_of_inode' */
752 return 1;
753 }
754
755 /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking spin 'i_lock_of_inode' */
756 return 0;
757 }
758 static int ldv_spin_lock;
759
760 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_lock') Check that spin 'lock' was not locked and lock it */
761 void ldv_spin_lock_lock(void)
762 {
763 /* LDV_COMMENT_ASSERT Spin 'lock' must be unlocked */
764 ldv_assert(ldv_spin_lock == 1);
765 /* LDV_COMMENT_CHANGE_STATE Lock spin 'lock' */
766 ldv_spin_lock = 2;
767 }
768
769 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_lock') Check that spin 'lock' was locked and unlock it */
770 void ldv_spin_unlock_lock(void)
771 {
772 /* LDV_COMMENT_ASSERT Spin 'lock' must be locked */
773 ldv_assert(ldv_spin_lock == 2);
774 /* LDV_COMMENT_CHANGE_STATE Unlock spin 'lock' */
775 ldv_spin_lock = 1;
776 }
777
778 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock_lock') Check that spin 'lock' was not locked and nondeterministically lock it. Return 0 on fails */
779 int ldv_spin_trylock_lock(void)
780 {
781 int is_spin_held_by_another_thread;
782
783 /* LDV_COMMENT_ASSERT It may be an error if spin 'lock' is locked at this point */
784 ldv_assert(ldv_spin_lock == 1);
785
786 /* LDV_COMMENT_OTHER Construct nondetermined result */
787 is_spin_held_by_another_thread = ldv_undef_int();
788
789 /* LDV_COMMENT_ASSERT Nondeterministically lock spin 'lock' */
790 if (is_spin_held_by_another_thread)
791 {
792 /* LDV_COMMENT_RETURN Spin 'lock' was not locked. Finish with fail */
793 return 0;
794 }
795 else
796 {
797 /* LDV_COMMENT_CHANGE_STATE Lock spin 'lock' */
798 ldv_spin_lock = 2;
799 /* LDV_COMMENT_RETURN Finish with success */
800 return 1;
801 }
802 }
803
804 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_wait_lock') The same process can not both lock spin 'lock' and wait until it will be unlocked */
805 void ldv_spin_unlock_wait_lock(void)
806 {
807 /* LDV_COMMENT_ASSERT Spin 'lock' must not be locked by a current process */
808 ldv_assert(ldv_spin_lock == 1);
809 }
810
811 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_locked_lock') Check whether spin 'lock' was locked */
812 int ldv_spin_is_locked_lock(void)
813 {
814 int is_spin_held_by_another_thread;
815
816 /* LDV_COMMENT_OTHER Construct nondetermined result */
817 is_spin_held_by_another_thread = ldv_undef_int();
818
819 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'lock' was locked */
820 if(ldv_spin_lock == 1 && !is_spin_held_by_another_thread)
821 {
822 /* LDV_COMMENT_RETURN Spin 'lock' was unlocked */
823 return 0;
824 }
825 else
826 {
827 /* LDV_COMMENT_RETURN Spin 'lock' was locked */
828 return 1;
829 }
830 }
831
832 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_can_lock_lock') Check whether spin 'lock' was locked */
833 int ldv_spin_can_lock_lock(void)
834 {
835 /* LDV_COMMENT_RETURN Inverse function for spin_is_locked() */
836 return !ldv_spin_is_locked_lock();
837 }
838
839 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_contended_lock') Check whether spin 'lock' is contended */
840 int ldv_spin_is_contended_lock(void)
841 {
842 int is_spin_contended;
843
844 /* LDV_COMMENT_OTHER Construct nondetermined result */
845 is_spin_contended = ldv_undef_int();
846
847 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'lock' is contended */
848 if(is_spin_contended)
849 {
850 /* LDV_COMMENT_RETURN Spin 'lock' is contended */
851 return 0;
852 }
853 else
854 {
855 /* LDV_COMMENT_RETURN Spin 'lock' isn't contended */
856 return 1;
857 }
858 }
859
860 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_lock_lock') Lock spin 'lock' if atomic decrement result is zero */
861 int ldv_atomic_dec_and_lock_lock(void)
862 {
863 int atomic_value_after_dec;
864
865 /* LDV_COMMENT_ASSERT Spin 'lock' must be unlocked (since we may lock it in this function) */
866 ldv_assert(ldv_spin_lock == 1);
867
868 /* LDV_COMMENT_OTHER Assign the result of atomic decrement */
869 atomic_value_after_dec = ldv_undef_int();
870
871 /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */
872 if (atomic_value_after_dec == 0)
873 {
874 /* LDV_COMMENT_CHANGE_STATE Lock spin 'lock', as atomic has decremented to zero */
875 ldv_spin_lock = 2;
876 /* LDV_COMMENT_RETURN Return 1 with locked spin 'lock' */
877 return 1;
878 }
879
880 /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking spin 'lock' */
881 return 0;
882 }
883 static int ldv_spin_lock_of_NOT_ARG_SIGN;
884
885 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_lock_of_NOT_ARG_SIGN') Check that spin 'lock_of_NOT_ARG_SIGN' was not locked and lock it */
886 void ldv_spin_lock_lock_of_NOT_ARG_SIGN(void)
887 {
888 /* LDV_COMMENT_ASSERT Spin 'lock_of_NOT_ARG_SIGN' must be unlocked */
889 ldv_assert(ldv_spin_lock_of_NOT_ARG_SIGN == 1);
890 /* LDV_COMMENT_CHANGE_STATE Lock spin 'lock_of_NOT_ARG_SIGN' */
891 ldv_spin_lock_of_NOT_ARG_SIGN = 2;
892 }
893
894 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_lock_of_NOT_ARG_SIGN') Check that spin 'lock_of_NOT_ARG_SIGN' was locked and unlock it */
895 void ldv_spin_unlock_lock_of_NOT_ARG_SIGN(void)
896 {
897 /* LDV_COMMENT_ASSERT Spin 'lock_of_NOT_ARG_SIGN' must be locked */
898 ldv_assert(ldv_spin_lock_of_NOT_ARG_SIGN == 2);
899 /* LDV_COMMENT_CHANGE_STATE Unlock spin 'lock_of_NOT_ARG_SIGN' */
900 ldv_spin_lock_of_NOT_ARG_SIGN = 1;
901 }
902
903 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock_lock_of_NOT_ARG_SIGN') Check that spin 'lock_of_NOT_ARG_SIGN' was not locked and nondeterministically lock it. Return 0 on fails */
904 int ldv_spin_trylock_lock_of_NOT_ARG_SIGN(void)
905 {
906 int is_spin_held_by_another_thread;
907
908 /* LDV_COMMENT_ASSERT It may be an error if spin 'lock_of_NOT_ARG_SIGN' is locked at this point */
909 ldv_assert(ldv_spin_lock_of_NOT_ARG_SIGN == 1);
910
911 /* LDV_COMMENT_OTHER Construct nondetermined result */
912 is_spin_held_by_another_thread = ldv_undef_int();
913
914 /* LDV_COMMENT_ASSERT Nondeterministically lock spin 'lock_of_NOT_ARG_SIGN' */
915 if (is_spin_held_by_another_thread)
916 {
917 /* LDV_COMMENT_RETURN Spin 'lock_of_NOT_ARG_SIGN' was not locked. Finish with fail */
918 return 0;
919 }
920 else
921 {
922 /* LDV_COMMENT_CHANGE_STATE Lock spin 'lock_of_NOT_ARG_SIGN' */
923 ldv_spin_lock_of_NOT_ARG_SIGN = 2;
924 /* LDV_COMMENT_RETURN Finish with success */
925 return 1;
926 }
927 }
928
929 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_wait_lock_of_NOT_ARG_SIGN') The same process can not both lock spin 'lock_of_NOT_ARG_SIGN' and wait until it will be unlocked */
930 void ldv_spin_unlock_wait_lock_of_NOT_ARG_SIGN(void)
931 {
932 /* LDV_COMMENT_ASSERT Spin 'lock_of_NOT_ARG_SIGN' must not be locked by a current process */
933 ldv_assert(ldv_spin_lock_of_NOT_ARG_SIGN == 1);
934 }
935
936 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_locked_lock_of_NOT_ARG_SIGN') Check whether spin 'lock_of_NOT_ARG_SIGN' was locked */
937 int ldv_spin_is_locked_lock_of_NOT_ARG_SIGN(void)
938 {
939 int is_spin_held_by_another_thread;
940
941 /* LDV_COMMENT_OTHER Construct nondetermined result */
942 is_spin_held_by_another_thread = ldv_undef_int();
943
944 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'lock_of_NOT_ARG_SIGN' was locked */
945 if(ldv_spin_lock_of_NOT_ARG_SIGN == 1 && !is_spin_held_by_another_thread)
946 {
947 /* LDV_COMMENT_RETURN Spin 'lock_of_NOT_ARG_SIGN' was unlocked */
948 return 0;
949 }
950 else
951 {
952 /* LDV_COMMENT_RETURN Spin 'lock_of_NOT_ARG_SIGN' was locked */
953 return 1;
954 }
955 }
956
957 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_can_lock_lock_of_NOT_ARG_SIGN') Check whether spin 'lock_of_NOT_ARG_SIGN' was locked */
958 int ldv_spin_can_lock_lock_of_NOT_ARG_SIGN(void)
959 {
960 /* LDV_COMMENT_RETURN Inverse function for spin_is_locked() */
961 return !ldv_spin_is_locked_lock_of_NOT_ARG_SIGN();
962 }
963
964 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_contended_lock_of_NOT_ARG_SIGN') Check whether spin 'lock_of_NOT_ARG_SIGN' is contended */
965 int ldv_spin_is_contended_lock_of_NOT_ARG_SIGN(void)
966 {
967 int is_spin_contended;
968
969 /* LDV_COMMENT_OTHER Construct nondetermined result */
970 is_spin_contended = ldv_undef_int();
971
972 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'lock_of_NOT_ARG_SIGN' is contended */
973 if(is_spin_contended)
974 {
975 /* LDV_COMMENT_RETURN Spin 'lock_of_NOT_ARG_SIGN' is contended */
976 return 0;
977 }
978 else
979 {
980 /* LDV_COMMENT_RETURN Spin 'lock_of_NOT_ARG_SIGN' isn't contended */
981 return 1;
982 }
983 }
984
985 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_lock_lock_of_NOT_ARG_SIGN') Lock spin 'lock_of_NOT_ARG_SIGN' if atomic decrement result is zero */
986 int ldv_atomic_dec_and_lock_lock_of_NOT_ARG_SIGN(void)
987 {
988 int atomic_value_after_dec;
989
990 /* LDV_COMMENT_ASSERT Spin 'lock_of_NOT_ARG_SIGN' must be unlocked (since we may lock it in this function) */
991 ldv_assert(ldv_spin_lock_of_NOT_ARG_SIGN == 1);
992
993 /* LDV_COMMENT_OTHER Assign the result of atomic decrement */
994 atomic_value_after_dec = ldv_undef_int();
995
996 /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */
997 if (atomic_value_after_dec == 0)
998 {
999 /* LDV_COMMENT_CHANGE_STATE Lock spin 'lock_of_NOT_ARG_SIGN', as atomic has decremented to zero */
1000 ldv_spin_lock_of_NOT_ARG_SIGN = 2;
1001 /* LDV_COMMENT_RETURN Return 1 with locked spin 'lock_of_NOT_ARG_SIGN' */
1002 return 1;
1003 }
1004
1005 /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking spin 'lock_of_NOT_ARG_SIGN' */
1006 return 0;
1007 }
1008 static int ldv_spin_lock_of_ali_ircc_cb;
1009
1010 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_lock_of_ali_ircc_cb') Check that spin 'lock_of_ali_ircc_cb' was not locked and lock it */
1011 void ldv_spin_lock_lock_of_ali_ircc_cb(void)
1012 {
1013 /* LDV_COMMENT_ASSERT Spin 'lock_of_ali_ircc_cb' must be unlocked */
1014 ldv_assert(ldv_spin_lock_of_ali_ircc_cb == 1);
1015 /* LDV_COMMENT_CHANGE_STATE Lock spin 'lock_of_ali_ircc_cb' */
1016 ldv_spin_lock_of_ali_ircc_cb = 2;
1017 }
1018
1019 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_lock_of_ali_ircc_cb') Check that spin 'lock_of_ali_ircc_cb' was locked and unlock it */
1020 void ldv_spin_unlock_lock_of_ali_ircc_cb(void)
1021 {
1022 /* LDV_COMMENT_ASSERT Spin 'lock_of_ali_ircc_cb' must be locked */
1023 ldv_assert(ldv_spin_lock_of_ali_ircc_cb == 2);
1024 /* LDV_COMMENT_CHANGE_STATE Unlock spin 'lock_of_ali_ircc_cb' */
1025 ldv_spin_lock_of_ali_ircc_cb = 1;
1026 }
1027
1028 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock_lock_of_ali_ircc_cb') Check that spin 'lock_of_ali_ircc_cb' was not locked and nondeterministically lock it. Return 0 on fails */
1029 int ldv_spin_trylock_lock_of_ali_ircc_cb(void)
1030 {
1031 int is_spin_held_by_another_thread;
1032
1033 /* LDV_COMMENT_ASSERT It may be an error if spin 'lock_of_ali_ircc_cb' is locked at this point */
1034 ldv_assert(ldv_spin_lock_of_ali_ircc_cb == 1);
1035
1036 /* LDV_COMMENT_OTHER Construct nondetermined result */
1037 is_spin_held_by_another_thread = ldv_undef_int();
1038
1039 /* LDV_COMMENT_ASSERT Nondeterministically lock spin 'lock_of_ali_ircc_cb' */
1040 if (is_spin_held_by_another_thread)
1041 {
1042 /* LDV_COMMENT_RETURN Spin 'lock_of_ali_ircc_cb' was not locked. Finish with fail */
1043 return 0;
1044 }
1045 else
1046 {
1047 /* LDV_COMMENT_CHANGE_STATE Lock spin 'lock_of_ali_ircc_cb' */
1048 ldv_spin_lock_of_ali_ircc_cb = 2;
1049 /* LDV_COMMENT_RETURN Finish with success */
1050 return 1;
1051 }
1052 }
1053
1054 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_wait_lock_of_ali_ircc_cb') The same process can not both lock spin 'lock_of_ali_ircc_cb' and wait until it will be unlocked */
1055 void ldv_spin_unlock_wait_lock_of_ali_ircc_cb(void)
1056 {
1057 /* LDV_COMMENT_ASSERT Spin 'lock_of_ali_ircc_cb' must not be locked by a current process */
1058 ldv_assert(ldv_spin_lock_of_ali_ircc_cb == 1);
1059 }
1060
1061 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_locked_lock_of_ali_ircc_cb') Check whether spin 'lock_of_ali_ircc_cb' was locked */
1062 int ldv_spin_is_locked_lock_of_ali_ircc_cb(void)
1063 {
1064 int is_spin_held_by_another_thread;
1065
1066 /* LDV_COMMENT_OTHER Construct nondetermined result */
1067 is_spin_held_by_another_thread = ldv_undef_int();
1068
1069 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'lock_of_ali_ircc_cb' was locked */
1070 if(ldv_spin_lock_of_ali_ircc_cb == 1 && !is_spin_held_by_another_thread)
1071 {
1072 /* LDV_COMMENT_RETURN Spin 'lock_of_ali_ircc_cb' was unlocked */
1073 return 0;
1074 }
1075 else
1076 {
1077 /* LDV_COMMENT_RETURN Spin 'lock_of_ali_ircc_cb' was locked */
1078 return 1;
1079 }
1080 }
1081
1082 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_can_lock_lock_of_ali_ircc_cb') Check whether spin 'lock_of_ali_ircc_cb' was locked */
1083 int ldv_spin_can_lock_lock_of_ali_ircc_cb(void)
1084 {
1085 /* LDV_COMMENT_RETURN Inverse function for spin_is_locked() */
1086 return !ldv_spin_is_locked_lock_of_ali_ircc_cb();
1087 }
1088
1089 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_contended_lock_of_ali_ircc_cb') Check whether spin 'lock_of_ali_ircc_cb' is contended */
1090 int ldv_spin_is_contended_lock_of_ali_ircc_cb(void)
1091 {
1092 int is_spin_contended;
1093
1094 /* LDV_COMMENT_OTHER Construct nondetermined result */
1095 is_spin_contended = ldv_undef_int();
1096
1097 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'lock_of_ali_ircc_cb' is contended */
1098 if(is_spin_contended)
1099 {
1100 /* LDV_COMMENT_RETURN Spin 'lock_of_ali_ircc_cb' is contended */
1101 return 0;
1102 }
1103 else
1104 {
1105 /* LDV_COMMENT_RETURN Spin 'lock_of_ali_ircc_cb' isn't contended */
1106 return 1;
1107 }
1108 }
1109
1110 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_lock_lock_of_ali_ircc_cb') Lock spin 'lock_of_ali_ircc_cb' if atomic decrement result is zero */
1111 int ldv_atomic_dec_and_lock_lock_of_ali_ircc_cb(void)
1112 {
1113 int atomic_value_after_dec;
1114
1115 /* LDV_COMMENT_ASSERT Spin 'lock_of_ali_ircc_cb' must be unlocked (since we may lock it in this function) */
1116 ldv_assert(ldv_spin_lock_of_ali_ircc_cb == 1);
1117
1118 /* LDV_COMMENT_OTHER Assign the result of atomic decrement */
1119 atomic_value_after_dec = ldv_undef_int();
1120
1121 /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */
1122 if (atomic_value_after_dec == 0)
1123 {
1124 /* LDV_COMMENT_CHANGE_STATE Lock spin 'lock_of_ali_ircc_cb', as atomic has decremented to zero */
1125 ldv_spin_lock_of_ali_ircc_cb = 2;
1126 /* LDV_COMMENT_RETURN Return 1 with locked spin 'lock_of_ali_ircc_cb' */
1127 return 1;
1128 }
1129
1130 /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking spin 'lock_of_ali_ircc_cb' */
1131 return 0;
1132 }
1133 static int ldv_spin_lru_lock_of_netns_frags;
1134
1135 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_lru_lock_of_netns_frags') Check that spin 'lru_lock_of_netns_frags' was not locked and lock it */
1136 void ldv_spin_lock_lru_lock_of_netns_frags(void)
1137 {
1138 /* LDV_COMMENT_ASSERT Spin 'lru_lock_of_netns_frags' must be unlocked */
1139 ldv_assert(ldv_spin_lru_lock_of_netns_frags == 1);
1140 /* LDV_COMMENT_CHANGE_STATE Lock spin 'lru_lock_of_netns_frags' */
1141 ldv_spin_lru_lock_of_netns_frags = 2;
1142 }
1143
1144 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_lru_lock_of_netns_frags') Check that spin 'lru_lock_of_netns_frags' was locked and unlock it */
1145 void ldv_spin_unlock_lru_lock_of_netns_frags(void)
1146 {
1147 /* LDV_COMMENT_ASSERT Spin 'lru_lock_of_netns_frags' must be locked */
1148 ldv_assert(ldv_spin_lru_lock_of_netns_frags == 2);
1149 /* LDV_COMMENT_CHANGE_STATE Unlock spin 'lru_lock_of_netns_frags' */
1150 ldv_spin_lru_lock_of_netns_frags = 1;
1151 }
1152
1153 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock_lru_lock_of_netns_frags') Check that spin 'lru_lock_of_netns_frags' was not locked and nondeterministically lock it. Return 0 on fails */
1154 int ldv_spin_trylock_lru_lock_of_netns_frags(void)
1155 {
1156 int is_spin_held_by_another_thread;
1157
1158 /* LDV_COMMENT_ASSERT It may be an error if spin 'lru_lock_of_netns_frags' is locked at this point */
1159 ldv_assert(ldv_spin_lru_lock_of_netns_frags == 1);
1160
1161 /* LDV_COMMENT_OTHER Construct nondetermined result */
1162 is_spin_held_by_another_thread = ldv_undef_int();
1163
1164 /* LDV_COMMENT_ASSERT Nondeterministically lock spin 'lru_lock_of_netns_frags' */
1165 if (is_spin_held_by_another_thread)
1166 {
1167 /* LDV_COMMENT_RETURN Spin 'lru_lock_of_netns_frags' was not locked. Finish with fail */
1168 return 0;
1169 }
1170 else
1171 {
1172 /* LDV_COMMENT_CHANGE_STATE Lock spin 'lru_lock_of_netns_frags' */
1173 ldv_spin_lru_lock_of_netns_frags = 2;
1174 /* LDV_COMMENT_RETURN Finish with success */
1175 return 1;
1176 }
1177 }
1178
1179 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_wait_lru_lock_of_netns_frags') The same process can not both lock spin 'lru_lock_of_netns_frags' and wait until it will be unlocked */
1180 void ldv_spin_unlock_wait_lru_lock_of_netns_frags(void)
1181 {
1182 /* LDV_COMMENT_ASSERT Spin 'lru_lock_of_netns_frags' must not be locked by a current process */
1183 ldv_assert(ldv_spin_lru_lock_of_netns_frags == 1);
1184 }
1185
1186 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_locked_lru_lock_of_netns_frags') Check whether spin 'lru_lock_of_netns_frags' was locked */
1187 int ldv_spin_is_locked_lru_lock_of_netns_frags(void)
1188 {
1189 int is_spin_held_by_another_thread;
1190
1191 /* LDV_COMMENT_OTHER Construct nondetermined result */
1192 is_spin_held_by_another_thread = ldv_undef_int();
1193
1194 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'lru_lock_of_netns_frags' was locked */
1195 if(ldv_spin_lru_lock_of_netns_frags == 1 && !is_spin_held_by_another_thread)
1196 {
1197 /* LDV_COMMENT_RETURN Spin 'lru_lock_of_netns_frags' was unlocked */
1198 return 0;
1199 }
1200 else
1201 {
1202 /* LDV_COMMENT_RETURN Spin 'lru_lock_of_netns_frags' was locked */
1203 return 1;
1204 }
1205 }
1206
1207 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_can_lock_lru_lock_of_netns_frags') Check whether spin 'lru_lock_of_netns_frags' was locked */
1208 int ldv_spin_can_lock_lru_lock_of_netns_frags(void)
1209 {
1210 /* LDV_COMMENT_RETURN Inverse function for spin_is_locked() */
1211 return !ldv_spin_is_locked_lru_lock_of_netns_frags();
1212 }
1213
1214 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_contended_lru_lock_of_netns_frags') Check whether spin 'lru_lock_of_netns_frags' is contended */
1215 int ldv_spin_is_contended_lru_lock_of_netns_frags(void)
1216 {
1217 int is_spin_contended;
1218
1219 /* LDV_COMMENT_OTHER Construct nondetermined result */
1220 is_spin_contended = ldv_undef_int();
1221
1222 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'lru_lock_of_netns_frags' is contended */
1223 if(is_spin_contended)
1224 {
1225 /* LDV_COMMENT_RETURN Spin 'lru_lock_of_netns_frags' is contended */
1226 return 0;
1227 }
1228 else
1229 {
1230 /* LDV_COMMENT_RETURN Spin 'lru_lock_of_netns_frags' isn't contended */
1231 return 1;
1232 }
1233 }
1234
1235 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_lock_lru_lock_of_netns_frags') Lock spin 'lru_lock_of_netns_frags' if atomic decrement result is zero */
1236 int ldv_atomic_dec_and_lock_lru_lock_of_netns_frags(void)
1237 {
1238 int atomic_value_after_dec;
1239
1240 /* LDV_COMMENT_ASSERT Spin 'lru_lock_of_netns_frags' must be unlocked (since we may lock it in this function) */
1241 ldv_assert(ldv_spin_lru_lock_of_netns_frags == 1);
1242
1243 /* LDV_COMMENT_OTHER Assign the result of atomic decrement */
1244 atomic_value_after_dec = ldv_undef_int();
1245
1246 /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */
1247 if (atomic_value_after_dec == 0)
1248 {
1249 /* LDV_COMMENT_CHANGE_STATE Lock spin 'lru_lock_of_netns_frags', as atomic has decremented to zero */
1250 ldv_spin_lru_lock_of_netns_frags = 2;
1251 /* LDV_COMMENT_RETURN Return 1 with locked spin 'lru_lock_of_netns_frags' */
1252 return 1;
1253 }
1254
1255 /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking spin 'lru_lock_of_netns_frags' */
1256 return 0;
1257 }
1258 static int ldv_spin_node_size_lock_of_pglist_data;
1259
1260 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_node_size_lock_of_pglist_data') Check that spin 'node_size_lock_of_pglist_data' was not locked and lock it */
1261 void ldv_spin_lock_node_size_lock_of_pglist_data(void)
1262 {
1263 /* LDV_COMMENT_ASSERT Spin 'node_size_lock_of_pglist_data' must be unlocked */
1264 ldv_assert(ldv_spin_node_size_lock_of_pglist_data == 1);
1265 /* LDV_COMMENT_CHANGE_STATE Lock spin 'node_size_lock_of_pglist_data' */
1266 ldv_spin_node_size_lock_of_pglist_data = 2;
1267 }
1268
1269 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_node_size_lock_of_pglist_data') Check that spin 'node_size_lock_of_pglist_data' was locked and unlock it */
1270 void ldv_spin_unlock_node_size_lock_of_pglist_data(void)
1271 {
1272 /* LDV_COMMENT_ASSERT Spin 'node_size_lock_of_pglist_data' must be locked */
1273 ldv_assert(ldv_spin_node_size_lock_of_pglist_data == 2);
1274 /* LDV_COMMENT_CHANGE_STATE Unlock spin 'node_size_lock_of_pglist_data' */
1275 ldv_spin_node_size_lock_of_pglist_data = 1;
1276 }
1277
1278 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock_node_size_lock_of_pglist_data') Check that spin 'node_size_lock_of_pglist_data' was not locked and nondeterministically lock it. Return 0 on fails */
1279 int ldv_spin_trylock_node_size_lock_of_pglist_data(void)
1280 {
1281 int is_spin_held_by_another_thread;
1282
1283 /* LDV_COMMENT_ASSERT It may be an error if spin 'node_size_lock_of_pglist_data' is locked at this point */
1284 ldv_assert(ldv_spin_node_size_lock_of_pglist_data == 1);
1285
1286 /* LDV_COMMENT_OTHER Construct nondetermined result */
1287 is_spin_held_by_another_thread = ldv_undef_int();
1288
1289 /* LDV_COMMENT_ASSERT Nondeterministically lock spin 'node_size_lock_of_pglist_data' */
1290 if (is_spin_held_by_another_thread)
1291 {
1292 /* LDV_COMMENT_RETURN Spin 'node_size_lock_of_pglist_data' was not locked. Finish with fail */
1293 return 0;
1294 }
1295 else
1296 {
1297 /* LDV_COMMENT_CHANGE_STATE Lock spin 'node_size_lock_of_pglist_data' */
1298 ldv_spin_node_size_lock_of_pglist_data = 2;
1299 /* LDV_COMMENT_RETURN Finish with success */
1300 return 1;
1301 }
1302 }
1303
1304 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_wait_node_size_lock_of_pglist_data') The same process can not both lock spin 'node_size_lock_of_pglist_data' and wait until it will be unlocked */
1305 void ldv_spin_unlock_wait_node_size_lock_of_pglist_data(void)
1306 {
1307 /* LDV_COMMENT_ASSERT Spin 'node_size_lock_of_pglist_data' must not be locked by a current process */
1308 ldv_assert(ldv_spin_node_size_lock_of_pglist_data == 1);
1309 }
1310
1311 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_locked_node_size_lock_of_pglist_data') Check whether spin 'node_size_lock_of_pglist_data' was locked */
1312 int ldv_spin_is_locked_node_size_lock_of_pglist_data(void)
1313 {
1314 int is_spin_held_by_another_thread;
1315
1316 /* LDV_COMMENT_OTHER Construct nondetermined result */
1317 is_spin_held_by_another_thread = ldv_undef_int();
1318
1319 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'node_size_lock_of_pglist_data' was locked */
1320 if(ldv_spin_node_size_lock_of_pglist_data == 1 && !is_spin_held_by_another_thread)
1321 {
1322 /* LDV_COMMENT_RETURN Spin 'node_size_lock_of_pglist_data' was unlocked */
1323 return 0;
1324 }
1325 else
1326 {
1327 /* LDV_COMMENT_RETURN Spin 'node_size_lock_of_pglist_data' was locked */
1328 return 1;
1329 }
1330 }
1331
1332 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_can_lock_node_size_lock_of_pglist_data') Check whether spin 'node_size_lock_of_pglist_data' was locked */
1333 int ldv_spin_can_lock_node_size_lock_of_pglist_data(void)
1334 {
1335 /* LDV_COMMENT_RETURN Inverse function for spin_is_locked() */
1336 return !ldv_spin_is_locked_node_size_lock_of_pglist_data();
1337 }
1338
1339 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_contended_node_size_lock_of_pglist_data') Check whether spin 'node_size_lock_of_pglist_data' is contended */
1340 int ldv_spin_is_contended_node_size_lock_of_pglist_data(void)
1341 {
1342 int is_spin_contended;
1343
1344 /* LDV_COMMENT_OTHER Construct nondetermined result */
1345 is_spin_contended = ldv_undef_int();
1346
1347 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'node_size_lock_of_pglist_data' is contended */
1348 if(is_spin_contended)
1349 {
1350 /* LDV_COMMENT_RETURN Spin 'node_size_lock_of_pglist_data' is contended */
1351 return 0;
1352 }
1353 else
1354 {
1355 /* LDV_COMMENT_RETURN Spin 'node_size_lock_of_pglist_data' isn't contended */
1356 return 1;
1357 }
1358 }
1359
1360 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_lock_node_size_lock_of_pglist_data') Lock spin 'node_size_lock_of_pglist_data' if atomic decrement result is zero */
1361 int ldv_atomic_dec_and_lock_node_size_lock_of_pglist_data(void)
1362 {
1363 int atomic_value_after_dec;
1364
1365 /* LDV_COMMENT_ASSERT Spin 'node_size_lock_of_pglist_data' must be unlocked (since we may lock it in this function) */
1366 ldv_assert(ldv_spin_node_size_lock_of_pglist_data == 1);
1367
1368 /* LDV_COMMENT_OTHER Assign the result of atomic decrement */
1369 atomic_value_after_dec = ldv_undef_int();
1370
1371 /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */
1372 if (atomic_value_after_dec == 0)
1373 {
1374 /* LDV_COMMENT_CHANGE_STATE Lock spin 'node_size_lock_of_pglist_data', as atomic has decremented to zero */
1375 ldv_spin_node_size_lock_of_pglist_data = 2;
1376 /* LDV_COMMENT_RETURN Return 1 with locked spin 'node_size_lock_of_pglist_data' */
1377 return 1;
1378 }
1379
1380 /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking spin 'node_size_lock_of_pglist_data' */
1381 return 0;
1382 }
1383 static int ldv_spin_ptl;
1384
1385 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_ptl') Check that spin 'ptl' was not locked and lock it */
1386 void ldv_spin_lock_ptl(void)
1387 {
1388 /* LDV_COMMENT_ASSERT Spin 'ptl' must be unlocked */
1389 ldv_assert(ldv_spin_ptl == 1);
1390 /* LDV_COMMENT_CHANGE_STATE Lock spin 'ptl' */
1391 ldv_spin_ptl = 2;
1392 }
1393
1394 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_ptl') Check that spin 'ptl' was locked and unlock it */
1395 void ldv_spin_unlock_ptl(void)
1396 {
1397 /* LDV_COMMENT_ASSERT Spin 'ptl' must be locked */
1398 ldv_assert(ldv_spin_ptl == 2);
1399 /* LDV_COMMENT_CHANGE_STATE Unlock spin 'ptl' */
1400 ldv_spin_ptl = 1;
1401 }
1402
1403 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock_ptl') Check that spin 'ptl' was not locked and nondeterministically lock it. Return 0 on fails */
1404 int ldv_spin_trylock_ptl(void)
1405 {
1406 int is_spin_held_by_another_thread;
1407
1408 /* LDV_COMMENT_ASSERT It may be an error if spin 'ptl' is locked at this point */
1409 ldv_assert(ldv_spin_ptl == 1);
1410
1411 /* LDV_COMMENT_OTHER Construct nondetermined result */
1412 is_spin_held_by_another_thread = ldv_undef_int();
1413
1414 /* LDV_COMMENT_ASSERT Nondeterministically lock spin 'ptl' */
1415 if (is_spin_held_by_another_thread)
1416 {
1417 /* LDV_COMMENT_RETURN Spin 'ptl' was not locked. Finish with fail */
1418 return 0;
1419 }
1420 else
1421 {
1422 /* LDV_COMMENT_CHANGE_STATE Lock spin 'ptl' */
1423 ldv_spin_ptl = 2;
1424 /* LDV_COMMENT_RETURN Finish with success */
1425 return 1;
1426 }
1427 }
1428
1429 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_wait_ptl') The same process can not both lock spin 'ptl' and wait until it will be unlocked */
1430 void ldv_spin_unlock_wait_ptl(void)
1431 {
1432 /* LDV_COMMENT_ASSERT Spin 'ptl' must not be locked by a current process */
1433 ldv_assert(ldv_spin_ptl == 1);
1434 }
1435
1436 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_locked_ptl') Check whether spin 'ptl' was locked */
1437 int ldv_spin_is_locked_ptl(void)
1438 {
1439 int is_spin_held_by_another_thread;
1440
1441 /* LDV_COMMENT_OTHER Construct nondetermined result */
1442 is_spin_held_by_another_thread = ldv_undef_int();
1443
1444 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'ptl' was locked */
1445 if(ldv_spin_ptl == 1 && !is_spin_held_by_another_thread)
1446 {
1447 /* LDV_COMMENT_RETURN Spin 'ptl' was unlocked */
1448 return 0;
1449 }
1450 else
1451 {
1452 /* LDV_COMMENT_RETURN Spin 'ptl' was locked */
1453 return 1;
1454 }
1455 }
1456
1457 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_can_lock_ptl') Check whether spin 'ptl' was locked */
1458 int ldv_spin_can_lock_ptl(void)
1459 {
1460 /* LDV_COMMENT_RETURN Inverse function for spin_is_locked() */
1461 return !ldv_spin_is_locked_ptl();
1462 }
1463
1464 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_contended_ptl') Check whether spin 'ptl' is contended */
1465 int ldv_spin_is_contended_ptl(void)
1466 {
1467 int is_spin_contended;
1468
1469 /* LDV_COMMENT_OTHER Construct nondetermined result */
1470 is_spin_contended = ldv_undef_int();
1471
1472 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'ptl' is contended */
1473 if(is_spin_contended)
1474 {
1475 /* LDV_COMMENT_RETURN Spin 'ptl' is contended */
1476 return 0;
1477 }
1478 else
1479 {
1480 /* LDV_COMMENT_RETURN Spin 'ptl' isn't contended */
1481 return 1;
1482 }
1483 }
1484
1485 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_lock_ptl') Lock spin 'ptl' if atomic decrement result is zero */
1486 int ldv_atomic_dec_and_lock_ptl(void)
1487 {
1488 int atomic_value_after_dec;
1489
1490 /* LDV_COMMENT_ASSERT Spin 'ptl' must be unlocked (since we may lock it in this function) */
1491 ldv_assert(ldv_spin_ptl == 1);
1492
1493 /* LDV_COMMENT_OTHER Assign the result of atomic decrement */
1494 atomic_value_after_dec = ldv_undef_int();
1495
1496 /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */
1497 if (atomic_value_after_dec == 0)
1498 {
1499 /* LDV_COMMENT_CHANGE_STATE Lock spin 'ptl', as atomic has decremented to zero */
1500 ldv_spin_ptl = 2;
1501 /* LDV_COMMENT_RETURN Return 1 with locked spin 'ptl' */
1502 return 1;
1503 }
1504
1505 /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking spin 'ptl' */
1506 return 0;
1507 }
1508 static int ldv_spin_siglock_of_sighand_struct;
1509
1510 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_siglock_of_sighand_struct') Check that spin 'siglock_of_sighand_struct' was not locked and lock it */
1511 void ldv_spin_lock_siglock_of_sighand_struct(void)
1512 {
1513 /* LDV_COMMENT_ASSERT Spin 'siglock_of_sighand_struct' must be unlocked */
1514 ldv_assert(ldv_spin_siglock_of_sighand_struct == 1);
1515 /* LDV_COMMENT_CHANGE_STATE Lock spin 'siglock_of_sighand_struct' */
1516 ldv_spin_siglock_of_sighand_struct = 2;
1517 }
1518
1519 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_siglock_of_sighand_struct') Check that spin 'siglock_of_sighand_struct' was locked and unlock it */
1520 void ldv_spin_unlock_siglock_of_sighand_struct(void)
1521 {
1522 /* LDV_COMMENT_ASSERT Spin 'siglock_of_sighand_struct' must be locked */
1523 ldv_assert(ldv_spin_siglock_of_sighand_struct == 2);
1524 /* LDV_COMMENT_CHANGE_STATE Unlock spin 'siglock_of_sighand_struct' */
1525 ldv_spin_siglock_of_sighand_struct = 1;
1526 }
1527
1528 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock_siglock_of_sighand_struct') Check that spin 'siglock_of_sighand_struct' was not locked and nondeterministically lock it. Return 0 on fails */
1529 int ldv_spin_trylock_siglock_of_sighand_struct(void)
1530 {
1531 int is_spin_held_by_another_thread;
1532
1533 /* LDV_COMMENT_ASSERT It may be an error if spin 'siglock_of_sighand_struct' is locked at this point */
1534 ldv_assert(ldv_spin_siglock_of_sighand_struct == 1);
1535
1536 /* LDV_COMMENT_OTHER Construct nondetermined result */
1537 is_spin_held_by_another_thread = ldv_undef_int();
1538
1539 /* LDV_COMMENT_ASSERT Nondeterministically lock spin 'siglock_of_sighand_struct' */
1540 if (is_spin_held_by_another_thread)
1541 {
1542 /* LDV_COMMENT_RETURN Spin 'siglock_of_sighand_struct' was not locked. Finish with fail */
1543 return 0;
1544 }
1545 else
1546 {
1547 /* LDV_COMMENT_CHANGE_STATE Lock spin 'siglock_of_sighand_struct' */
1548 ldv_spin_siglock_of_sighand_struct = 2;
1549 /* LDV_COMMENT_RETURN Finish with success */
1550 return 1;
1551 }
1552 }
1553
1554 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_wait_siglock_of_sighand_struct') The same process can not both lock spin 'siglock_of_sighand_struct' and wait until it will be unlocked */
1555 void ldv_spin_unlock_wait_siglock_of_sighand_struct(void)
1556 {
1557 /* LDV_COMMENT_ASSERT Spin 'siglock_of_sighand_struct' must not be locked by a current process */
1558 ldv_assert(ldv_spin_siglock_of_sighand_struct == 1);
1559 }
1560
1561 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_locked_siglock_of_sighand_struct') Check whether spin 'siglock_of_sighand_struct' was locked */
1562 int ldv_spin_is_locked_siglock_of_sighand_struct(void)
1563 {
1564 int is_spin_held_by_another_thread;
1565
1566 /* LDV_COMMENT_OTHER Construct nondetermined result */
1567 is_spin_held_by_another_thread = ldv_undef_int();
1568
1569 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'siglock_of_sighand_struct' was locked */
1570 if(ldv_spin_siglock_of_sighand_struct == 1 && !is_spin_held_by_another_thread)
1571 {
1572 /* LDV_COMMENT_RETURN Spin 'siglock_of_sighand_struct' was unlocked */
1573 return 0;
1574 }
1575 else
1576 {
1577 /* LDV_COMMENT_RETURN Spin 'siglock_of_sighand_struct' was locked */
1578 return 1;
1579 }
1580 }
1581
1582 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_can_lock_siglock_of_sighand_struct') Check whether spin 'siglock_of_sighand_struct' was locked */
1583 int ldv_spin_can_lock_siglock_of_sighand_struct(void)
1584 {
1585 /* LDV_COMMENT_RETURN Inverse function for spin_is_locked() */
1586 return !ldv_spin_is_locked_siglock_of_sighand_struct();
1587 }
1588
1589 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_contended_siglock_of_sighand_struct') Check whether spin 'siglock_of_sighand_struct' is contended */
1590 int ldv_spin_is_contended_siglock_of_sighand_struct(void)
1591 {
1592 int is_spin_contended;
1593
1594 /* LDV_COMMENT_OTHER Construct nondetermined result */
1595 is_spin_contended = ldv_undef_int();
1596
1597 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'siglock_of_sighand_struct' is contended */
1598 if(is_spin_contended)
1599 {
1600 /* LDV_COMMENT_RETURN Spin 'siglock_of_sighand_struct' is contended */
1601 return 0;
1602 }
1603 else
1604 {
1605 /* LDV_COMMENT_RETURN Spin 'siglock_of_sighand_struct' isn't contended */
1606 return 1;
1607 }
1608 }
1609
1610 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_lock_siglock_of_sighand_struct') Lock spin 'siglock_of_sighand_struct' if atomic decrement result is zero */
1611 int ldv_atomic_dec_and_lock_siglock_of_sighand_struct(void)
1612 {
1613 int atomic_value_after_dec;
1614
1615 /* LDV_COMMENT_ASSERT Spin 'siglock_of_sighand_struct' must be unlocked (since we may lock it in this function) */
1616 ldv_assert(ldv_spin_siglock_of_sighand_struct == 1);
1617
1618 /* LDV_COMMENT_OTHER Assign the result of atomic decrement */
1619 atomic_value_after_dec = ldv_undef_int();
1620
1621 /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */
1622 if (atomic_value_after_dec == 0)
1623 {
1624 /* LDV_COMMENT_CHANGE_STATE Lock spin 'siglock_of_sighand_struct', as atomic has decremented to zero */
1625 ldv_spin_siglock_of_sighand_struct = 2;
1626 /* LDV_COMMENT_RETURN Return 1 with locked spin 'siglock_of_sighand_struct' */
1627 return 1;
1628 }
1629
1630 /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking spin 'siglock_of_sighand_struct' */
1631 return 0;
1632 }
1633 static int ldv_spin_tx_global_lock_of_net_device;
1634
1635 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_tx_global_lock_of_net_device') Check that spin 'tx_global_lock_of_net_device' was not locked and lock it */
1636 void ldv_spin_lock_tx_global_lock_of_net_device(void)
1637 {
1638 /* LDV_COMMENT_ASSERT Spin 'tx_global_lock_of_net_device' must be unlocked */
1639 ldv_assert(ldv_spin_tx_global_lock_of_net_device == 1);
1640 /* LDV_COMMENT_CHANGE_STATE Lock spin 'tx_global_lock_of_net_device' */
1641 ldv_spin_tx_global_lock_of_net_device = 2;
1642 }
1643
1644 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_tx_global_lock_of_net_device') Check that spin 'tx_global_lock_of_net_device' was locked and unlock it */
1645 void ldv_spin_unlock_tx_global_lock_of_net_device(void)
1646 {
1647 /* LDV_COMMENT_ASSERT Spin 'tx_global_lock_of_net_device' must be locked */
1648 ldv_assert(ldv_spin_tx_global_lock_of_net_device == 2);
1649 /* LDV_COMMENT_CHANGE_STATE Unlock spin 'tx_global_lock_of_net_device' */
1650 ldv_spin_tx_global_lock_of_net_device = 1;
1651 }
1652
1653 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock_tx_global_lock_of_net_device') Check that spin 'tx_global_lock_of_net_device' was not locked and nondeterministically lock it. Return 0 on fails */
1654 int ldv_spin_trylock_tx_global_lock_of_net_device(void)
1655 {
1656 int is_spin_held_by_another_thread;
1657
1658 /* LDV_COMMENT_ASSERT It may be an error if spin 'tx_global_lock_of_net_device' is locked at this point */
1659 ldv_assert(ldv_spin_tx_global_lock_of_net_device == 1);
1660
1661 /* LDV_COMMENT_OTHER Construct nondetermined result */
1662 is_spin_held_by_another_thread = ldv_undef_int();
1663
1664 /* LDV_COMMENT_ASSERT Nondeterministically lock spin 'tx_global_lock_of_net_device' */
1665 if (is_spin_held_by_another_thread)
1666 {
1667 /* LDV_COMMENT_RETURN Spin 'tx_global_lock_of_net_device' was not locked. Finish with fail */
1668 return 0;
1669 }
1670 else
1671 {
1672 /* LDV_COMMENT_CHANGE_STATE Lock spin 'tx_global_lock_of_net_device' */
1673 ldv_spin_tx_global_lock_of_net_device = 2;
1674 /* LDV_COMMENT_RETURN Finish with success */
1675 return 1;
1676 }
1677 }
1678
1679 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_wait_tx_global_lock_of_net_device') The same process can not both lock spin 'tx_global_lock_of_net_device' and wait until it will be unlocked */
1680 void ldv_spin_unlock_wait_tx_global_lock_of_net_device(void)
1681 {
1682 /* LDV_COMMENT_ASSERT Spin 'tx_global_lock_of_net_device' must not be locked by a current process */
1683 ldv_assert(ldv_spin_tx_global_lock_of_net_device == 1);
1684 }
1685
1686 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_locked_tx_global_lock_of_net_device') Check whether spin 'tx_global_lock_of_net_device' was locked */
1687 int ldv_spin_is_locked_tx_global_lock_of_net_device(void)
1688 {
1689 int is_spin_held_by_another_thread;
1690
1691 /* LDV_COMMENT_OTHER Construct nondetermined result */
1692 is_spin_held_by_another_thread = ldv_undef_int();
1693
1694 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'tx_global_lock_of_net_device' was locked */
1695 if(ldv_spin_tx_global_lock_of_net_device == 1 && !is_spin_held_by_another_thread)
1696 {
1697 /* LDV_COMMENT_RETURN Spin 'tx_global_lock_of_net_device' was unlocked */
1698 return 0;
1699 }
1700 else
1701 {
1702 /* LDV_COMMENT_RETURN Spin 'tx_global_lock_of_net_device' was locked */
1703 return 1;
1704 }
1705 }
1706
1707 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_can_lock_tx_global_lock_of_net_device') Check whether spin 'tx_global_lock_of_net_device' was locked */
1708 int ldv_spin_can_lock_tx_global_lock_of_net_device(void)
1709 {
1710 /* LDV_COMMENT_RETURN Inverse function for spin_is_locked() */
1711 return !ldv_spin_is_locked_tx_global_lock_of_net_device();
1712 }
1713
1714 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_contended_tx_global_lock_of_net_device') Check whether spin 'tx_global_lock_of_net_device' is contended */
1715 int ldv_spin_is_contended_tx_global_lock_of_net_device(void)
1716 {
1717 int is_spin_contended;
1718
1719 /* LDV_COMMENT_OTHER Construct nondetermined result */
1720 is_spin_contended = ldv_undef_int();
1721
1722 /* LDV_COMMENT_ASSERT Nondeterministically understand whether spin 'tx_global_lock_of_net_device' is contended */
1723 if(is_spin_contended)
1724 {
1725 /* LDV_COMMENT_RETURN Spin 'tx_global_lock_of_net_device' is contended */
1726 return 0;
1727 }
1728 else
1729 {
1730 /* LDV_COMMENT_RETURN Spin 'tx_global_lock_of_net_device' isn't contended */
1731 return 1;
1732 }
1733 }
1734
1735 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_lock_tx_global_lock_of_net_device') Lock spin 'tx_global_lock_of_net_device' if atomic decrement result is zero */
1736 int ldv_atomic_dec_and_lock_tx_global_lock_of_net_device(void)
1737 {
1738 int atomic_value_after_dec;
1739
1740 /* LDV_COMMENT_ASSERT Spin 'tx_global_lock_of_net_device' must be unlocked (since we may lock it in this function) */
1741 ldv_assert(ldv_spin_tx_global_lock_of_net_device == 1);
1742
1743 /* LDV_COMMENT_OTHER Assign the result of atomic decrement */
1744 atomic_value_after_dec = ldv_undef_int();
1745
1746 /* LDV_COMMENT_ASSERT Check if atomic decrement returns zero */
1747 if (atomic_value_after_dec == 0)
1748 {
1749 /* LDV_COMMENT_CHANGE_STATE Lock spin 'tx_global_lock_of_net_device', as atomic has decremented to zero */
1750 ldv_spin_tx_global_lock_of_net_device = 2;
1751 /* LDV_COMMENT_RETURN Return 1 with locked spin 'tx_global_lock_of_net_device' */
1752 return 1;
1753 }
1754
1755 /* LDV_COMMENT_RETURN Atomic decrement is still not zero, return 0 without locking spin 'tx_global_lock_of_net_device' */
1756 return 0;
1757 }
1758
1759 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_initialize') Make all spins unlocked at the beginning */
1760 void ldv_initialize(void)
1761 {
1762 /* LDV_COMMENT_CHANGE_STATE Make spin 'NOT_ARG_SIGN' unlocked at the beginning */
1763 ldv_spin_NOT_ARG_SIGN = 1;
1764 /* LDV_COMMENT_CHANGE_STATE Make spin '_xmit_lock_of_netdev_queue' unlocked at the beginning */
1765 ldv_spin__xmit_lock_of_netdev_queue = 1;
1766 /* LDV_COMMENT_CHANGE_STATE Make spin 'addr_list_lock_of_net_device' unlocked at the beginning */
1767 ldv_spin_addr_list_lock_of_net_device = 1;
1768 /* LDV_COMMENT_CHANGE_STATE Make spin 'alloc_lock_of_task_struct' unlocked at the beginning */
1769 ldv_spin_alloc_lock_of_task_struct = 1;
1770 /* LDV_COMMENT_CHANGE_STATE Make spin 'dma_spin_lock' unlocked at the beginning */
1771 ldv_spin_dma_spin_lock = 1;
1772 /* LDV_COMMENT_CHANGE_STATE Make spin 'i_lock_of_inode' unlocked at the beginning */
1773 ldv_spin_i_lock_of_inode = 1;
1774 /* LDV_COMMENT_CHANGE_STATE Make spin 'lock' unlocked at the beginning */
1775 ldv_spin_lock = 1;
1776 /* LDV_COMMENT_CHANGE_STATE Make spin 'lock_of_NOT_ARG_SIGN' unlocked at the beginning */
1777 ldv_spin_lock_of_NOT_ARG_SIGN = 1;
1778 /* LDV_COMMENT_CHANGE_STATE Make spin 'lock_of_ali_ircc_cb' unlocked at the beginning */
1779 ldv_spin_lock_of_ali_ircc_cb = 1;
1780 /* LDV_COMMENT_CHANGE_STATE Make spin 'lru_lock_of_netns_frags' unlocked at the beginning */
1781 ldv_spin_lru_lock_of_netns_frags = 1;
1782 /* LDV_COMMENT_CHANGE_STATE Make spin 'node_size_lock_of_pglist_data' unlocked at the beginning */
1783 ldv_spin_node_size_lock_of_pglist_data = 1;
1784 /* LDV_COMMENT_CHANGE_STATE Make spin 'ptl' unlocked at the beginning */
1785 ldv_spin_ptl = 1;
1786 /* LDV_COMMENT_CHANGE_STATE Make spin 'siglock_of_sighand_struct' unlocked at the beginning */
1787 ldv_spin_siglock_of_sighand_struct = 1;
1788 /* LDV_COMMENT_CHANGE_STATE Make spin 'tx_global_lock_of_net_device' unlocked at the beginning */
1789 ldv_spin_tx_global_lock_of_net_device = 1;
1790 }
1791
1792 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_check_final_state') Check that all spins are unlocked at the end */
1793 void ldv_check_final_state(void)
1794 {
1795 /* LDV_COMMENT_ASSERT Spin 'NOT_ARG_SIGN' must be unlocked at the end */
1796 ldv_assert(ldv_spin_NOT_ARG_SIGN == 1);
1797 /* LDV_COMMENT_ASSERT Spin '_xmit_lock_of_netdev_queue' must be unlocked at the end */
1798 ldv_assert(ldv_spin__xmit_lock_of_netdev_queue == 1);
1799 /* LDV_COMMENT_ASSERT Spin 'addr_list_lock_of_net_device' must be unlocked at the end */
1800 ldv_assert(ldv_spin_addr_list_lock_of_net_device == 1);
1801 /* LDV_COMMENT_ASSERT Spin 'alloc_lock_of_task_struct' must be unlocked at the end */
1802 ldv_assert(ldv_spin_alloc_lock_of_task_struct == 1);
1803 /* LDV_COMMENT_ASSERT Spin 'dma_spin_lock' must be unlocked at the end */
1804 ldv_assert(ldv_spin_dma_spin_lock == 1);
1805 /* LDV_COMMENT_ASSERT Spin 'i_lock_of_inode' must be unlocked at the end */
1806 ldv_assert(ldv_spin_i_lock_of_inode == 1);
1807 /* LDV_COMMENT_ASSERT Spin 'lock' must be unlocked at the end */
1808 ldv_assert(ldv_spin_lock == 1);
1809 /* LDV_COMMENT_ASSERT Spin 'lock_of_NOT_ARG_SIGN' must be unlocked at the end */
1810 ldv_assert(ldv_spin_lock_of_NOT_ARG_SIGN == 1);
1811 /* LDV_COMMENT_ASSERT Spin 'lock_of_ali_ircc_cb' must be unlocked at the end */
1812 ldv_assert(ldv_spin_lock_of_ali_ircc_cb == 1);
1813 /* LDV_COMMENT_ASSERT Spin 'lru_lock_of_netns_frags' must be unlocked at the end */
1814 ldv_assert(ldv_spin_lru_lock_of_netns_frags == 1);
1815 /* LDV_COMMENT_ASSERT Spin 'node_size_lock_of_pglist_data' must be unlocked at the end */
1816 ldv_assert(ldv_spin_node_size_lock_of_pglist_data == 1);
1817 /* LDV_COMMENT_ASSERT Spin 'ptl' must be unlocked at the end */
1818 ldv_assert(ldv_spin_ptl == 1);
1819 /* LDV_COMMENT_ASSERT Spin 'siglock_of_sighand_struct' must be unlocked at the end */
1820 ldv_assert(ldv_spin_siglock_of_sighand_struct == 1);
1821 /* LDV_COMMENT_ASSERT Spin 'tx_global_lock_of_net_device' must be unlocked at the end */
1822 ldv_assert(ldv_spin_tx_global_lock_of_net_device == 1);
1823 } 1 #ifndef _LDV_RCV_H_
2 #define _LDV_RCV_H_
3
4 /* If expr evaluates to zero, ldv_assert() causes a program to reach the error
5 label like the standard assert(). */
6 #define ldv_assert(expr) ((expr) ? 0 : ldv_error())
7
8 /* The error label wrapper. It is used because of some static verifiers (like
9 BLAST) don't accept multiple error labels through a program. */
10 static inline void ldv_error(void)
11 {
12 LDV_ERROR: goto LDV_ERROR;
13 }
14
15 /* If expr evaluates to zero, ldv_assume() causes an infinite loop that is
16 avoided by verifiers. */
17 #define ldv_assume(expr) ((expr) ? 0 : ldv_stop())
18
19 /* Infinite loop, that causes verifiers to skip such paths. */
20 static inline void ldv_stop(void) {
21 LDV_STOP: goto LDV_STOP;
22 }
23
24 /* Special nondeterministic functions. */
25 int ldv_undef_int(void);
26 void *ldv_undef_ptr(void);
27 unsigned long ldv_undef_ulong(void);
28 /* Return nondeterministic negative integer number. */
29 static inline int ldv_undef_int_negative(void)
30 {
31 int ret = ldv_undef_int();
32
33 ldv_assume(ret < 0);
34
35 return ret;
36 }
37 /* Return nondeterministic nonpositive integer number. */
38 static inline int ldv_undef_int_nonpositive(void)
39 {
40 int ret = ldv_undef_int();
41
42 ldv_assume(ret <= 0);
43
44 return ret;
45 }
46
47 /* Add explicit model for __builin_expect GCC function. Without the model a
48 return value will be treated as nondetermined by verifiers. */
49 long __builtin_expect(long exp, long c)
50 {
51 return exp;
52 }
53
54 /* This function causes the program to exit abnormally. GCC implements this
55 function by using a target-dependent mechanism (such as intentionally executing
56 an illegal instruction) or by calling abort. The mechanism used may vary from
57 release to release so you should not rely on any particular implementation.
58 http://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html */
59 void __builtin_trap(void)
60 {
61 ldv_assert(0);
62 }
63
64 /* The constant is for simulating an error of ldv_undef_ptr() function. */
65 #define LDV_PTR_MAX 2012
66
67 #endif /* _LDV_RCV_H_ */ 1 #ifndef _LINUX_DMA_MAPPING_H
2 #define _LINUX_DMA_MAPPING_H
3
4 #include <linux/string.h>
5 #include <linux/device.h>
6 #include <linux/err.h>
7 #include <linux/dma-attrs.h>
8 #include <linux/dma-direction.h>
9 #include <linux/scatterlist.h>
10
11 /*
12 * A dma_addr_t can hold any valid DMA or bus address for the platform.
13 * It can be given to a device to use as a DMA source or target. A CPU cannot
14 * reference a dma_addr_t directly because there may be translation between
15 * its physical address space and the bus address space.
16 */
17 struct dma_map_ops {
18 void* (*alloc)(struct device *dev, size_t size,
19 dma_addr_t *dma_handle, gfp_t gfp,
20 struct dma_attrs *attrs);
21 void (*free)(struct device *dev, size_t size,
22 void *vaddr, dma_addr_t dma_handle,
23 struct dma_attrs *attrs);
24 int (*mmap)(struct device *, struct vm_area_struct *,
25 void *, dma_addr_t, size_t, struct dma_attrs *attrs);
26
27 int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
28 dma_addr_t, size_t, struct dma_attrs *attrs);
29
30 dma_addr_t (*map_page)(struct device *dev, struct page *page,
31 unsigned long offset, size_t size,
32 enum dma_data_direction dir,
33 struct dma_attrs *attrs);
34 void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
35 size_t size, enum dma_data_direction dir,
36 struct dma_attrs *attrs);
37 int (*map_sg)(struct device *dev, struct scatterlist *sg,
38 int nents, enum dma_data_direction dir,
39 struct dma_attrs *attrs);
40 void (*unmap_sg)(struct device *dev,
41 struct scatterlist *sg, int nents,
42 enum dma_data_direction dir,
43 struct dma_attrs *attrs);
44 void (*sync_single_for_cpu)(struct device *dev,
45 dma_addr_t dma_handle, size_t size,
46 enum dma_data_direction dir);
47 void (*sync_single_for_device)(struct device *dev,
48 dma_addr_t dma_handle, size_t size,
49 enum dma_data_direction dir);
50 void (*sync_sg_for_cpu)(struct device *dev,
51 struct scatterlist *sg, int nents,
52 enum dma_data_direction dir);
53 void (*sync_sg_for_device)(struct device *dev,
54 struct scatterlist *sg, int nents,
55 enum dma_data_direction dir);
56 int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
57 int (*dma_supported)(struct device *dev, u64 mask);
58 int (*set_dma_mask)(struct device *dev, u64 mask);
59 #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
60 u64 (*get_required_mask)(struct device *dev);
61 #endif
62 int is_phys;
63 };
64
65 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
66
67 #define DMA_MASK_NONE 0x0ULL
68
69 static inline int valid_dma_direction(int dma_direction)
70 {
71 return ((dma_direction == DMA_BIDIRECTIONAL) ||
72 (dma_direction == DMA_TO_DEVICE) ||
73 (dma_direction == DMA_FROM_DEVICE));
74 }
75
76 static inline int is_device_dma_capable(struct device *dev)
77 {
78 return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
79 }
80
81 #ifdef CONFIG_HAS_DMA
82 #include <asm/dma-mapping.h>
83 #else
84 #include <asm-generic/dma-mapping-broken.h>
85 #endif
86
87 static inline u64 dma_get_mask(struct device *dev)
88 {
89 if (dev && dev->dma_mask && *dev->dma_mask)
90 return *dev->dma_mask;
91 return DMA_BIT_MASK(32);
92 }
93
94 #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
95 int dma_set_coherent_mask(struct device *dev, u64 mask);
96 #else
97 static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
98 {
99 if (!dma_supported(dev, mask))
100 return -EIO;
101 dev->coherent_dma_mask = mask;
102 return 0;
103 }
104 #endif
105
106 /*
107 * Set both the DMA mask and the coherent DMA mask to the same thing.
108 * Note that we don't check the return value from dma_set_coherent_mask()
109 * as the DMA API guarantees that the coherent DMA mask can be set to
110 * the same or smaller than the streaming DMA mask.
111 */
112 static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
113 {
114 int rc = dma_set_mask(dev, mask);
115 if (rc == 0)
116 dma_set_coherent_mask(dev, mask);
117 return rc;
118 }
119
120 /*
121 * Similar to the above, except it deals with the case where the device
122 * does not have dev->dma_mask appropriately setup.
123 */
124 static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
125 {
126 dev->dma_mask = &dev->coherent_dma_mask;
127 return dma_set_mask_and_coherent(dev, mask);
128 }
129
130 extern u64 dma_get_required_mask(struct device *dev);
131
132 #ifndef set_arch_dma_coherent_ops
133 static inline int set_arch_dma_coherent_ops(struct device *dev)
134 {
135 return 0;
136 }
137 #endif
138
139 static inline unsigned int dma_get_max_seg_size(struct device *dev)
140 {
141 return dev->dma_parms ? dev->dma_parms->max_segment_size : 65536;
142 }
143
144 static inline unsigned int dma_set_max_seg_size(struct device *dev,
145 unsigned int size)
146 {
147 if (dev->dma_parms) {
148 dev->dma_parms->max_segment_size = size;
149 return 0;
150 } else
151 return -EIO;
152 }
153
154 static inline unsigned long dma_get_seg_boundary(struct device *dev)
155 {
156 return dev->dma_parms ?
157 dev->dma_parms->segment_boundary_mask : 0xffffffff;
158 }
159
160 static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
161 {
162 if (dev->dma_parms) {
163 dev->dma_parms->segment_boundary_mask = mask;
164 return 0;
165 } else
166 return -EIO;
167 }
168
169 #ifndef dma_max_pfn
170 static inline unsigned long dma_max_pfn(struct device *dev)
171 {
172 return *dev->dma_mask >> PAGE_SHIFT;
173 }
174 #endif
175
176 static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
177 dma_addr_t *dma_handle, gfp_t flag)
178 {
179 void *ret = dma_alloc_coherent(dev, size, dma_handle,
180 flag | __GFP_ZERO);
181 return ret;
182 }
183
184 #ifdef CONFIG_HAS_DMA
185 static inline int dma_get_cache_alignment(void)
186 {
187 #ifdef ARCH_DMA_MINALIGN
188 return ARCH_DMA_MINALIGN;
189 #endif
190 return 1;
191 }
192 #endif
193
194 /* flags for the coherent memory api */
195 #define DMA_MEMORY_MAP 0x01
196 #define DMA_MEMORY_IO 0x02
197 #define DMA_MEMORY_INCLUDES_CHILDREN 0x04
198 #define DMA_MEMORY_EXCLUSIVE 0x08
199
200 #ifndef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
201 static inline int
202 dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
203 dma_addr_t device_addr, size_t size, int flags)
204 {
205 return 0;
206 }
207
208 static inline void
209 dma_release_declared_memory(struct device *dev)
210 {
211 }
212
213 static inline void *
214 dma_mark_declared_memory_occupied(struct device *dev,
215 dma_addr_t device_addr, size_t size)
216 {
217 return ERR_PTR(-EBUSY);
218 }
219 #endif
220
221 /*
222 * Managed DMA API
223 */
224 extern void *dmam_alloc_coherent(struct device *dev, size_t size,
225 dma_addr_t *dma_handle, gfp_t gfp);
226 extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
227 dma_addr_t dma_handle);
228 extern void *dmam_alloc_noncoherent(struct device *dev, size_t size,
229 dma_addr_t *dma_handle, gfp_t gfp);
230 extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
231 dma_addr_t dma_handle);
232 #ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
233 extern int dmam_declare_coherent_memory(struct device *dev,
234 phys_addr_t phys_addr,
235 dma_addr_t device_addr, size_t size,
236 int flags);
237 extern void dmam_release_declared_memory(struct device *dev);
238 #else /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */
239 static inline int dmam_declare_coherent_memory(struct device *dev,
240 phys_addr_t phys_addr, dma_addr_t device_addr,
241 size_t size, gfp_t gfp)
242 {
243 return 0;
244 }
245
246 static inline void dmam_release_declared_memory(struct device *dev)
247 {
248 }
249 #endif /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */
250
251 #ifndef CONFIG_HAVE_DMA_ATTRS
252 struct dma_attrs;
253
254 #define dma_map_single_attrs(dev, cpu_addr, size, dir, attrs) \
255 dma_map_single(dev, cpu_addr, size, dir)
256
257 #define dma_unmap_single_attrs(dev, dma_addr, size, dir, attrs) \
258 dma_unmap_single(dev, dma_addr, size, dir)
259
260 #define dma_map_sg_attrs(dev, sgl, nents, dir, attrs) \
261 dma_map_sg(dev, sgl, nents, dir)
262
263 #define dma_unmap_sg_attrs(dev, sgl, nents, dir, attrs) \
264 dma_unmap_sg(dev, sgl, nents, dir)
265
266 #endif /* CONFIG_HAVE_DMA_ATTRS */
267
268 #ifdef CONFIG_NEED_DMA_MAP_STATE
269 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
270 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
271 #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
272 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
273 #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
274 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
275 #else
276 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
277 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
278 #define dma_unmap_addr(PTR, ADDR_NAME) (0)
279 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
280 #define dma_unmap_len(PTR, LEN_NAME) (0)
281 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
282 #endif
283
284 #endif 1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Definitions for the Interfaces handler.
7 *
8 * Version: @(#)dev.h 1.0.10 08/12/93
9 *
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
14 * Alan Cox, <alan@lxorguk.ukuu.org.uk>
15 * Bjorn Ekwall. <bj0rn@blox.se>
16 * Pekka Riikonen <priikone@poseidon.pspt.fi>
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 *
23 * Moved to /usr/include/linux for NET3
24 */
25 #ifndef _LINUX_NETDEVICE_H
26 #define _LINUX_NETDEVICE_H
27
28 #include <linux/pm_qos.h>
29 #include <linux/timer.h>
30 #include <linux/bug.h>
31 #include <linux/delay.h>
32 #include <linux/atomic.h>
33 #include <asm/cache.h>
34 #include <asm/byteorder.h>
35
36 #include <linux/percpu.h>
37 #include <linux/rculist.h>
38 #include <linux/dmaengine.h>
39 #include <linux/workqueue.h>
40 #include <linux/dynamic_queue_limits.h>
41
42 #include <linux/ethtool.h>
43 #include <net/net_namespace.h>
44 #include <net/dsa.h>
45 #ifdef CONFIG_DCB
46 #include <net/dcbnl.h>
47 #endif
48 #include <net/netprio_cgroup.h>
49
50 #include <linux/netdev_features.h>
51 #include <linux/neighbour.h>
52 #include <uapi/linux/netdevice.h>
53
54 struct netpoll_info;
55 struct device;
56 struct phy_device;
57 /* 802.11 specific */
58 struct wireless_dev;
59
60 void netdev_set_default_ethtool_ops(struct net_device *dev,
61 const struct ethtool_ops *ops);
62
63 /* Backlog congestion levels */
64 #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
65 #define NET_RX_DROP 1 /* packet dropped */
66
67 /*
68 * Transmit return codes: transmit return codes originate from three different
69 * namespaces:
70 *
71 * - qdisc return codes
72 * - driver transmit return codes
73 * - errno values
74 *
75 * Drivers are allowed to return any one of those in their hard_start_xmit()
76 * function. Real network devices commonly used with qdiscs should only return
77 * the driver transmit return codes though - when qdiscs are used, the actual
78 * transmission happens asynchronously, so the value is not propagated to
79 * higher layers. Virtual network devices transmit synchronously, in this case
80 * the driver transmit return codes are consumed by dev_queue_xmit(), all
81 * others are propagated to higher layers.
82 */
83
84 /* qdisc ->enqueue() return codes. */
85 #define NET_XMIT_SUCCESS 0x00
86 #define NET_XMIT_DROP 0x01 /* skb dropped */
87 #define NET_XMIT_CN 0x02 /* congestion notification */
88 #define NET_XMIT_POLICED 0x03 /* skb is shot by police */
89 #define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */
90
91 /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
92 * indicates that the device will soon be dropping packets, or already drops
93 * some packets of the same priority; prompting us to send less aggressively. */
94 #define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
95 #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
96
97 /* Driver transmit return codes */
98 #define NETDEV_TX_MASK 0xf0
99
100 enum netdev_tx {
101 __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */
102 NETDEV_TX_OK = 0x00, /* driver took care of packet */
103 NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/
104 NETDEV_TX_LOCKED = 0x20, /* driver tx lock was already taken */
105 };
106 typedef enum netdev_tx netdev_tx_t;
107
108 /*
109 * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
110 * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
111 */
112 static inline bool dev_xmit_complete(int rc)
113 {
114 /*
115 * Positive cases with an skb consumed by a driver:
116 * - successful transmission (rc == NETDEV_TX_OK)
117 * - error while transmitting (rc < 0)
118 * - error while queueing to a different device (rc & NET_XMIT_MASK)
119 */
120 if (likely(rc < NET_XMIT_MASK))
121 return true;
122
123 return false;
124 }
125
126 /*
127 * Compute the worst case header length according to the protocols
128 * used.
129 */
130
131 #if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
132 # if defined(CONFIG_MAC80211_MESH)
133 # define LL_MAX_HEADER 128
134 # else
135 # define LL_MAX_HEADER 96
136 # endif
137 #else
138 # define LL_MAX_HEADER 32
139 #endif
140
141 #if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
142 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
143 #define MAX_HEADER LL_MAX_HEADER
144 #else
145 #define MAX_HEADER (LL_MAX_HEADER + 48)
146 #endif
147
148 /*
149 * Old network device statistics. Fields are native words
150 * (unsigned long) so they can be read and written atomically.
151 */
152
153 struct net_device_stats {
154 unsigned long rx_packets;
155 unsigned long tx_packets;
156 unsigned long rx_bytes;
157 unsigned long tx_bytes;
158 unsigned long rx_errors;
159 unsigned long tx_errors;
160 unsigned long rx_dropped;
161 unsigned long tx_dropped;
162 unsigned long multicast;
163 unsigned long collisions;
164 unsigned long rx_length_errors;
165 unsigned long rx_over_errors;
166 unsigned long rx_crc_errors;
167 unsigned long rx_frame_errors;
168 unsigned long rx_fifo_errors;
169 unsigned long rx_missed_errors;
170 unsigned long tx_aborted_errors;
171 unsigned long tx_carrier_errors;
172 unsigned long tx_fifo_errors;
173 unsigned long tx_heartbeat_errors;
174 unsigned long tx_window_errors;
175 unsigned long rx_compressed;
176 unsigned long tx_compressed;
177 };
178
179
180 #include <linux/cache.h>
181 #include <linux/skbuff.h>
182
183 #ifdef CONFIG_RPS
184 #include <linux/static_key.h>
185 extern struct static_key rps_needed;
186 #endif
187
188 struct neighbour;
189 struct neigh_parms;
190 struct sk_buff;
191
192 struct netdev_hw_addr {
193 struct list_head list;
194 unsigned char addr[MAX_ADDR_LEN];
195 unsigned char type;
196 #define NETDEV_HW_ADDR_T_LAN 1
197 #define NETDEV_HW_ADDR_T_SAN 2
198 #define NETDEV_HW_ADDR_T_SLAVE 3
199 #define NETDEV_HW_ADDR_T_UNICAST 4
200 #define NETDEV_HW_ADDR_T_MULTICAST 5
201 bool global_use;
202 int sync_cnt;
203 int refcount;
204 int synced;
205 struct rcu_head rcu_head;
206 };
207
208 struct netdev_hw_addr_list {
209 struct list_head list;
210 int count;
211 };
212
213 #define netdev_hw_addr_list_count(l) ((l)->count)
214 #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
215 #define netdev_hw_addr_list_for_each(ha, l) \
216 list_for_each_entry(ha, &(l)->list, list)
217
218 #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
219 #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
220 #define netdev_for_each_uc_addr(ha, dev) \
221 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
222
223 #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
224 #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
225 #define netdev_for_each_mc_addr(ha, dev) \
226 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
227
228 struct hh_cache {
229 u16 hh_len;
230 u16 __pad;
231 seqlock_t hh_lock;
232
233 /* cached hardware header; allow for machine alignment needs. */
234 #define HH_DATA_MOD 16
235 #define HH_DATA_OFF(__len) \
236 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
237 #define HH_DATA_ALIGN(__len) \
238 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
239 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
240 };
241
242 /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
243 * Alternative is:
244 * dev->hard_header_len ? (dev->hard_header_len +
245 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
246 *
247 * We could use other alignment values, but we must maintain the
248 * relationship HH alignment <= LL alignment.
249 */
250 #define LL_RESERVED_SPACE(dev) \
251 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
252 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
253 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
254
255 struct header_ops {
256 int (*create) (struct sk_buff *skb, struct net_device *dev,
257 unsigned short type, const void *daddr,
258 const void *saddr, unsigned int len);
259 int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
260 int (*rebuild)(struct sk_buff *skb);
261 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
262 void (*cache_update)(struct hh_cache *hh,
263 const struct net_device *dev,
264 const unsigned char *haddr);
265 };
266
267 /* These flag bits are private to the generic network queueing
268 * layer, they may not be explicitly referenced by any other
269 * code.
270 */
271
272 enum netdev_state_t {
273 __LINK_STATE_START,
274 __LINK_STATE_PRESENT,
275 __LINK_STATE_NOCARRIER,
276 __LINK_STATE_LINKWATCH_PENDING,
277 __LINK_STATE_DORMANT,
278 };
279
280
281 /*
282 * This structure holds at boot time configured netdevice settings. They
283 * are then used in the device probing.
284 */
285 struct netdev_boot_setup {
286 char name[IFNAMSIZ];
287 struct ifmap map;
288 };
289 #define NETDEV_BOOT_SETUP_MAX 8
290
291 int __init netdev_boot_setup(char *str);
292
293 /*
294 * Structure for NAPI scheduling similar to tasklet but with weighting
295 */
296 struct napi_struct {
297 /* The poll_list must only be managed by the entity which
298 * changes the state of the NAPI_STATE_SCHED bit. This means
299 * whoever atomically sets that bit can add this napi_struct
300 * to the per-cpu poll_list, and whoever clears that bit
301 * can remove from the list right before clearing the bit.
302 */
303 struct list_head poll_list;
304
305 unsigned long state;
306 int weight;
307 unsigned int gro_count;
308 int (*poll)(struct napi_struct *, int);
309 #ifdef CONFIG_NETPOLL
310 spinlock_t poll_lock;
311 int poll_owner;
312 #endif
313 struct net_device *dev;
314 struct sk_buff *gro_list;
315 struct sk_buff *skb;
316 struct list_head dev_list;
317 struct hlist_node napi_hash_node;
318 unsigned int napi_id;
319 };
320
321 enum {
322 NAPI_STATE_SCHED, /* Poll is scheduled */
323 NAPI_STATE_DISABLE, /* Disable pending */
324 NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
325 NAPI_STATE_HASHED, /* In NAPI hash */
326 };
327
328 enum gro_result {
329 GRO_MERGED,
330 GRO_MERGED_FREE,
331 GRO_HELD,
332 GRO_NORMAL,
333 GRO_DROP,
334 };
335 typedef enum gro_result gro_result_t;
336
337 /*
338 * enum rx_handler_result - Possible return values for rx_handlers.
339 * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it
340 * further.
341 * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in
342 * case skb->dev was changed by rx_handler.
343 * @RX_HANDLER_EXACT: Force exact delivery, no wildcard.
344 * @RX_HANDLER_PASS: Do nothing, passe the skb as if no rx_handler was called.
345 *
346 * rx_handlers are functions called from inside __netif_receive_skb(), to do
347 * special processing of the skb, prior to delivery to protocol handlers.
348 *
349 * Currently, a net_device can only have a single rx_handler registered. Trying
350 * to register a second rx_handler will return -EBUSY.
351 *
352 * To register a rx_handler on a net_device, use netdev_rx_handler_register().
353 * To unregister a rx_handler on a net_device, use
354 * netdev_rx_handler_unregister().
355 *
356 * Upon return, rx_handler is expected to tell __netif_receive_skb() what to
357 * do with the skb.
358 *
359 * If the rx_handler consumed to skb in some way, it should return
360 * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for
361 * the skb to be delivered in some other ways.
362 *
363 * If the rx_handler changed skb->dev, to divert the skb to another
364 * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the
365 * new device will be called if it exists.
366 *
367 * If the rx_handler consider the skb should be ignored, it should return
368 * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
369 * are registered on exact device (ptype->dev == skb->dev).
370 *
371 * If the rx_handler didn't changed skb->dev, but want the skb to be normally
372 * delivered, it should return RX_HANDLER_PASS.
373 *
374 * A device without a registered rx_handler will behave as if rx_handler
375 * returned RX_HANDLER_PASS.
376 */
377
378 enum rx_handler_result {
379 RX_HANDLER_CONSUMED,
380 RX_HANDLER_ANOTHER,
381 RX_HANDLER_EXACT,
382 RX_HANDLER_PASS,
383 };
384 typedef enum rx_handler_result rx_handler_result_t;
385 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
386
387 void __napi_schedule(struct napi_struct *n);
388
389 static inline bool napi_disable_pending(struct napi_struct *n)
390 {
391 return test_bit(NAPI_STATE_DISABLE, &n->state);
392 }
393
394 /**
395 * napi_schedule_prep - check if napi can be scheduled
396 * @n: napi context
397 *
398 * Test if NAPI routine is already running, and if not mark
399 * it as running. This is used as a condition variable
400 * insure only one NAPI poll instance runs. We also make
401 * sure there is no pending NAPI disable.
402 */
403 static inline bool napi_schedule_prep(struct napi_struct *n)
404 {
405 return !napi_disable_pending(n) &&
406 !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
407 }
408
409 /**
410 * napi_schedule - schedule NAPI poll
411 * @n: napi context
412 *
413 * Schedule NAPI poll routine to be called if it is not already
414 * running.
415 */
416 static inline void napi_schedule(struct napi_struct *n)
417 {
418 if (napi_schedule_prep(n))
419 __napi_schedule(n);
420 }
421
422 /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
423 static inline bool napi_reschedule(struct napi_struct *napi)
424 {
425 if (napi_schedule_prep(napi)) {
426 __napi_schedule(napi);
427 return true;
428 }
429 return false;
430 }
431
432 /**
433 * napi_complete - NAPI processing complete
434 * @n: napi context
435 *
436 * Mark NAPI processing as complete.
437 */
438 void __napi_complete(struct napi_struct *n);
439 void napi_complete(struct napi_struct *n);
440
441 /**
442 * napi_by_id - lookup a NAPI by napi_id
443 * @napi_id: hashed napi_id
444 *
445 * lookup @napi_id in napi_hash table
446 * must be called under rcu_read_lock()
447 */
448 struct napi_struct *napi_by_id(unsigned int napi_id);
449
450 /**
451 * napi_hash_add - add a NAPI to global hashtable
452 * @napi: napi context
453 *
454 * generate a new napi_id and store a @napi under it in napi_hash
455 */
456 void napi_hash_add(struct napi_struct *napi);
457
458 /**
459 * napi_hash_del - remove a NAPI from global table
460 * @napi: napi context
461 *
462 * Warning: caller must observe rcu grace period
463 * before freeing memory containing @napi
464 */
465 void napi_hash_del(struct napi_struct *napi);
466
467 /**
468 * napi_disable - prevent NAPI from scheduling
469 * @n: napi context
470 *
471 * Stop NAPI from being scheduled on this context.
472 * Waits till any outstanding processing completes.
473 */
474 static inline void napi_disable(struct napi_struct *n)
475 {
476 might_sleep();
477 set_bit(NAPI_STATE_DISABLE, &n->state);
478 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
479 msleep(1);
480 clear_bit(NAPI_STATE_DISABLE, &n->state);
481 }
482
483 /**
484 * napi_enable - enable NAPI scheduling
485 * @n: napi context
486 *
487 * Resume NAPI from being scheduled on this context.
488 * Must be paired with napi_disable.
489 */
490 static inline void napi_enable(struct napi_struct *n)
491 {
492 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
493 smp_mb__before_atomic();
494 clear_bit(NAPI_STATE_SCHED, &n->state);
495 }
496
497 #ifdef CONFIG_SMP
498 /**
499 * napi_synchronize - wait until NAPI is not running
500 * @n: napi context
501 *
502 * Wait until NAPI is done being scheduled on this context.
503 * Waits till any outstanding processing completes but
504 * does not disable future activations.
505 */
506 static inline void napi_synchronize(const struct napi_struct *n)
507 {
508 while (test_bit(NAPI_STATE_SCHED, &n->state))
509 msleep(1);
510 }
511 #else
512 # define napi_synchronize(n) barrier()
513 #endif
514
515 enum netdev_queue_state_t {
516 __QUEUE_STATE_DRV_XOFF,
517 __QUEUE_STATE_STACK_XOFF,
518 __QUEUE_STATE_FROZEN,
519 };
520
521 #define QUEUE_STATE_DRV_XOFF (1 << __QUEUE_STATE_DRV_XOFF)
522 #define QUEUE_STATE_STACK_XOFF (1 << __QUEUE_STATE_STACK_XOFF)
523 #define QUEUE_STATE_FROZEN (1 << __QUEUE_STATE_FROZEN)
524
525 #define QUEUE_STATE_ANY_XOFF (QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF)
526 #define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
527 QUEUE_STATE_FROZEN)
528 #define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \
529 QUEUE_STATE_FROZEN)
530
531 /*
532 * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The
533 * netif_tx_* functions below are used to manipulate this flag. The
534 * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
535 * queue independently. The netif_xmit_*stopped functions below are called
536 * to check if the queue has been stopped by the driver or stack (either
537 * of the XOFF bits are set in the state). Drivers should not need to call
538 * netif_xmit*stopped functions, they should only be using netif_tx_*.
539 */
540
541 struct netdev_queue {
542 /*
543 * read mostly part
544 */
545 struct net_device *dev;
546 struct Qdisc *qdisc;
547 struct Qdisc *qdisc_sleeping;
548 #ifdef CONFIG_SYSFS
549 struct kobject kobj;
550 #endif
551 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
552 int numa_node;
553 #endif
554 /*
555 * write mostly part
556 */
557 spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
558 int xmit_lock_owner;
559 /*
560 * please use this field instead of dev->trans_start
561 */
562 unsigned long trans_start;
563
564 /*
565 * Number of TX timeouts for this queue
566 * (/sys/class/net/DEV/Q/trans_timeout)
567 */
568 unsigned long trans_timeout;
569
570 unsigned long state;
571
572 #ifdef CONFIG_BQL
573 struct dql dql;
574 #endif
575 } ____cacheline_aligned_in_smp;
576
577 static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
578 {
579 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
580 return q->numa_node;
581 #else
582 return NUMA_NO_NODE;
583 #endif
584 }
585
586 static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
587 {
588 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
589 q->numa_node = node;
590 #endif
591 }
592
593 #ifdef CONFIG_RPS
594 /*
595 * This structure holds an RPS map which can be of variable length. The
596 * map is an array of CPUs.
597 */
598 struct rps_map {
599 unsigned int len;
600 struct rcu_head rcu;
601 u16 cpus[0];
602 };
603 #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
604
605 /*
606 * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
607 * tail pointer for that CPU's input queue at the time of last enqueue, and
608 * a hardware filter index.
609 */
610 struct rps_dev_flow {
611 u16 cpu;
612 u16 filter;
613 unsigned int last_qtail;
614 };
615 #define RPS_NO_FILTER 0xffff
616
617 /*
618 * The rps_dev_flow_table structure contains a table of flow mappings.
619 */
620 struct rps_dev_flow_table {
621 unsigned int mask;
622 struct rcu_head rcu;
623 struct rps_dev_flow flows[0];
624 };
625 #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
626 ((_num) * sizeof(struct rps_dev_flow)))
627
628 /*
629 * The rps_sock_flow_table contains mappings of flows to the last CPU
630 * on which they were processed by the application (set in recvmsg).
631 */
632 struct rps_sock_flow_table {
633 unsigned int mask;
634 u16 ents[0];
635 };
636 #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \
637 ((_num) * sizeof(u16)))
638
639 #define RPS_NO_CPU 0xffff
640
641 static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
642 u32 hash)
643 {
644 if (table && hash) {
645 unsigned int cpu, index = hash & table->mask;
646
647 /* We only give a hint, preemption can change cpu under us */
648 cpu = raw_smp_processor_id();
649
650 if (table->ents[index] != cpu)
651 table->ents[index] = cpu;
652 }
653 }
654
655 static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
656 u32 hash)
657 {
658 if (table && hash)
659 table->ents[hash & table->mask] = RPS_NO_CPU;
660 }
661
662 extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
663
664 #ifdef CONFIG_RFS_ACCEL
665 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
666 u16 filter_id);
667 #endif
668 #endif /* CONFIG_RPS */
669
670 /* This structure contains an instance of an RX queue. */
671 struct netdev_rx_queue {
672 #ifdef CONFIG_RPS
673 struct rps_map __rcu *rps_map;
674 struct rps_dev_flow_table __rcu *rps_flow_table;
675 #endif
676 struct kobject kobj;
677 struct net_device *dev;
678 } ____cacheline_aligned_in_smp;
679
680 /*
681 * RX queue sysfs structures and functions.
682 */
683 struct rx_queue_attribute {
684 struct attribute attr;
685 ssize_t (*show)(struct netdev_rx_queue *queue,
686 struct rx_queue_attribute *attr, char *buf);
687 ssize_t (*store)(struct netdev_rx_queue *queue,
688 struct rx_queue_attribute *attr, const char *buf, size_t len);
689 };
690
691 #ifdef CONFIG_XPS
692 /*
693 * This structure holds an XPS map which can be of variable length. The
694 * map is an array of queues.
695 */
696 struct xps_map {
697 unsigned int len;
698 unsigned int alloc_len;
699 struct rcu_head rcu;
700 u16 queues[0];
701 };
702 #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
703 #define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \
704 / sizeof(u16))
705
706 /*
707 * This structure holds all XPS maps for device. Maps are indexed by CPU.
708 */
709 struct xps_dev_maps {
710 struct rcu_head rcu;
711 struct xps_map __rcu *cpu_map[0];
712 };
713 #define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \
714 (nr_cpu_ids * sizeof(struct xps_map *)))
715 #endif /* CONFIG_XPS */
716
717 #define TC_MAX_QUEUE 16
718 #define TC_BITMASK 15
719 /* HW offloaded queuing disciplines txq count and offset maps */
720 struct netdev_tc_txq {
721 u16 count;
722 u16 offset;
723 };
724
725 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
726 /*
727 * This structure is to hold information about the device
728 * configured to run FCoE protocol stack.
729 */
730 struct netdev_fcoe_hbainfo {
731 char manufacturer[64];
732 char serial_number[64];
733 char hardware_version[64];
734 char driver_version[64];
735 char optionrom_version[64];
736 char firmware_version[64];
737 char model[256];
738 char model_description[256];
739 };
740 #endif
741
742 #define MAX_PHYS_PORT_ID_LEN 32
743
744 /* This structure holds a unique identifier to identify the
745 * physical port used by a netdevice.
746 */
747 struct netdev_phys_port_id {
748 unsigned char id[MAX_PHYS_PORT_ID_LEN];
749 unsigned char id_len;
750 };
751
752 typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
753 struct sk_buff *skb);
754
755 /*
756 * This structure defines the management hooks for network devices.
757 * The following hooks can be defined; unless noted otherwise, they are
758 * optional and can be filled with a null pointer.
759 *
760 * int (*ndo_init)(struct net_device *dev);
761 * This function is called once when network device is registered.
762 * The network device can use this to any late stage initializaton
763 * or semantic validattion. It can fail with an error code which will
764 * be propogated back to register_netdev
765 *
766 * void (*ndo_uninit)(struct net_device *dev);
767 * This function is called when device is unregistered or when registration
768 * fails. It is not called if init fails.
769 *
770 * int (*ndo_open)(struct net_device *dev);
771 * This function is called when network device transistions to the up
772 * state.
773 *
774 * int (*ndo_stop)(struct net_device *dev);
775 * This function is called when network device transistions to the down
776 * state.
777 *
778 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
779 * struct net_device *dev);
780 * Called when a packet needs to be transmitted.
781 * Must return NETDEV_TX_OK , NETDEV_TX_BUSY.
782 * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
783 * Required can not be NULL.
784 *
785 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
786 * void *accel_priv, select_queue_fallback_t fallback);
787 * Called to decide which queue to when device supports multiple
788 * transmit queues.
789 *
790 * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
791 * This function is called to allow device receiver to make
792 * changes to configuration when multicast or promiscious is enabled.
793 *
794 * void (*ndo_set_rx_mode)(struct net_device *dev);
795 * This function is called device changes address list filtering.
796 * If driver handles unicast address filtering, it should set
797 * IFF_UNICAST_FLT to its priv_flags.
798 *
799 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
800 * This function is called when the Media Access Control address
801 * needs to be changed. If this interface is not defined, the
802 * mac address can not be changed.
803 *
804 * int (*ndo_validate_addr)(struct net_device *dev);
805 * Test if Media Access Control address is valid for the device.
806 *
807 * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
808 * Called when a user request an ioctl which can't be handled by
809 * the generic interface code. If not defined ioctl's return
810 * not supported error code.
811 *
812 * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
813 * Used to set network devices bus interface parameters. This interface
814 * is retained for legacy reason, new devices should use the bus
815 * interface (PCI) for low level management.
816 *
817 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
818 * Called when a user wants to change the Maximum Transfer Unit
819 * of a device. If not defined, any request to change MTU will
820 * will return an error.
821 *
822 * void (*ndo_tx_timeout)(struct net_device *dev);
823 * Callback uses when the transmitter has not made any progress
824 * for dev->watchdog ticks.
825 *
826 * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
827 * struct rtnl_link_stats64 *storage);
828 * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
829 * Called when a user wants to get the network device usage
830 * statistics. Drivers must do one of the following:
831 * 1. Define @ndo_get_stats64 to fill in a zero-initialised
832 * rtnl_link_stats64 structure passed by the caller.
833 * 2. Define @ndo_get_stats to update a net_device_stats structure
834 * (which should normally be dev->stats) and return a pointer to
835 * it. The structure may be changed asynchronously only if each
836 * field is written atomically.
837 * 3. Update dev->stats asynchronously and atomically, and define
838 * neither operation.
839 *
840 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16t vid);
841 * If device support VLAN filtering this function is called when a
842 * VLAN id is registered.
843 *
844 * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
845 * If device support VLAN filtering this function is called when a
846 * VLAN id is unregistered.
847 *
848 * void (*ndo_poll_controller)(struct net_device *dev);
849 *
850 * SR-IOV management functions.
851 * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
852 * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos);
853 * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate,
854 * int max_tx_rate);
855 * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
856 * int (*ndo_get_vf_config)(struct net_device *dev,
857 * int vf, struct ifla_vf_info *ivf);
858 * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state);
859 * int (*ndo_set_vf_port)(struct net_device *dev, int vf,
860 * struct nlattr *port[]);
861 * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
862 * int (*ndo_setup_tc)(struct net_device *dev, u8 tc)
863 * Called to setup 'tc' number of traffic classes in the net device. This
864 * is always called from the stack with the rtnl lock held and netif tx
865 * queues stopped. This allows the netdevice to perform queue management
866 * safely.
867 *
868 * Fiber Channel over Ethernet (FCoE) offload functions.
869 * int (*ndo_fcoe_enable)(struct net_device *dev);
870 * Called when the FCoE protocol stack wants to start using LLD for FCoE
871 * so the underlying device can perform whatever needed configuration or
872 * initialization to support acceleration of FCoE traffic.
873 *
874 * int (*ndo_fcoe_disable)(struct net_device *dev);
875 * Called when the FCoE protocol stack wants to stop using LLD for FCoE
876 * so the underlying device can perform whatever needed clean-ups to
877 * stop supporting acceleration of FCoE traffic.
878 *
879 * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid,
880 * struct scatterlist *sgl, unsigned int sgc);
881 * Called when the FCoE Initiator wants to initialize an I/O that
882 * is a possible candidate for Direct Data Placement (DDP). The LLD can
883 * perform necessary setup and returns 1 to indicate the device is set up
884 * successfully to perform DDP on this I/O, otherwise this returns 0.
885 *
886 * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid);
887 * Called when the FCoE Initiator/Target is done with the DDPed I/O as
888 * indicated by the FC exchange id 'xid', so the underlying device can
889 * clean up and reuse resources for later DDP requests.
890 *
891 * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid,
892 * struct scatterlist *sgl, unsigned int sgc);
893 * Called when the FCoE Target wants to initialize an I/O that
894 * is a possible candidate for Direct Data Placement (DDP). The LLD can
895 * perform necessary setup and returns 1 to indicate the device is set up
896 * successfully to perform DDP on this I/O, otherwise this returns 0.
897 *
898 * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
899 * struct netdev_fcoe_hbainfo *hbainfo);
900 * Called when the FCoE Protocol stack wants information on the underlying
901 * device. This information is utilized by the FCoE protocol stack to
902 * register attributes with Fiber Channel management service as per the
903 * FC-GS Fabric Device Management Information(FDMI) specification.
904 *
905 * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type);
906 * Called when the underlying device wants to override default World Wide
907 * Name (WWN) generation mechanism in FCoE protocol stack to pass its own
908 * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE
909 * protocol stack to use.
910 *
911 * RFS acceleration.
912 * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
913 * u16 rxq_index, u32 flow_id);
914 * Set hardware filter for RFS. rxq_index is the target queue index;
915 * flow_id is a flow ID to be passed to rps_may_expire_flow() later.
916 * Return the filter ID on success, or a negative error code.
917 *
918 * Slave management functions (for bridge, bonding, etc).
919 * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev);
920 * Called to make another netdev an underling.
921 *
922 * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
923 * Called to release previously enslaved netdev.
924 *
925 * Feature/offload setting functions.
926 * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
927 * netdev_features_t features);
928 * Adjusts the requested feature flags according to device-specific
929 * constraints, and returns the resulting flags. Must not modify
930 * the device state.
931 *
932 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
933 * Called to update device configuration to new features. Passed
934 * feature set might be less than what was returned by ndo_fix_features()).
935 * Must return >0 or -errno if it changed dev->features itself.
936 *
937 * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
938 * struct net_device *dev,
939 * const unsigned char *addr, u16 flags)
940 * Adds an FDB entry to dev for addr.
941 * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[],
942 * struct net_device *dev,
943 * const unsigned char *addr)
944 * Deletes the FDB entry from dev coresponding to addr.
945 * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
946 * struct net_device *dev, int idx)
947 * Used to add FDB entries to dump requests. Implementers should add
948 * entries to skb and update idx with the number of entries.
949 *
950 * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh)
951 * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
952 * struct net_device *dev, u32 filter_mask)
953 *
954 * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
955 * Called to change device carrier. Soft-devices (like dummy, team, etc)
956 * which do not represent real hardware may define this to allow their
957 * userspace components to manage their virtual carrier state. Devices
958 * that determine carrier state from physical hardware properties (eg
959 * network cables) or protocol-dependent mechanisms (eg
960 * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function.
961 *
962 * int (*ndo_get_phys_port_id)(struct net_device *dev,
963 * struct netdev_phys_port_id *ppid);
964 * Called to get ID of physical port of this device. If driver does
965 * not implement this, it is assumed that the hw is not able to have
966 * multiple net devices on single physical port.
967 *
968 * void (*ndo_add_vxlan_port)(struct net_device *dev,
969 * sa_family_t sa_family, __be16 port);
970 * Called by vxlan to notiy a driver about the UDP port and socket
971 * address family that vxlan is listnening to. It is called only when
972 * a new port starts listening. The operation is protected by the
973 * vxlan_net->sock_lock.
974 *
975 * void (*ndo_del_vxlan_port)(struct net_device *dev,
976 * sa_family_t sa_family, __be16 port);
977 * Called by vxlan to notify the driver about a UDP port and socket
978 * address family that vxlan is not listening to anymore. The operation
979 * is protected by the vxlan_net->sock_lock.
980 *
981 * void* (*ndo_dfwd_add_station)(struct net_device *pdev,
982 * struct net_device *dev)
983 * Called by upper layer devices to accelerate switching or other
984 * station functionality into hardware. 'pdev is the lowerdev
985 * to use for the offload and 'dev' is the net device that will
986 * back the offload. Returns a pointer to the private structure
987 * the upper layer will maintain.
988 * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv)
989 * Called by upper layer device to delete the station created
990 * by 'ndo_dfwd_add_station'. 'pdev' is the net device backing
991 * the station and priv is the structure returned by the add
992 * operation.
993 * netdev_tx_t (*ndo_dfwd_start_xmit)(struct sk_buff *skb,
994 * struct net_device *dev,
995 * void *priv);
996 * Callback to use for xmit over the accelerated station. This
997 * is used in place of ndo_start_xmit on accelerated net
998 * devices.
999 */
1000 struct net_device_ops {
1001 int (*ndo_init)(struct net_device *dev);
1002 void (*ndo_uninit)(struct net_device *dev);
1003 int (*ndo_open)(struct net_device *dev);
1004 int (*ndo_stop)(struct net_device *dev);
1005 netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb,
1006 struct net_device *dev);
1007 u16 (*ndo_select_queue)(struct net_device *dev,
1008 struct sk_buff *skb,
1009 void *accel_priv,
1010 select_queue_fallback_t fallback);
1011 void (*ndo_change_rx_flags)(struct net_device *dev,
1012 int flags);
1013 void (*ndo_set_rx_mode)(struct net_device *dev);
1014 int (*ndo_set_mac_address)(struct net_device *dev,
1015 void *addr);
1016 int (*ndo_validate_addr)(struct net_device *dev);
1017 int (*ndo_do_ioctl)(struct net_device *dev,
1018 struct ifreq *ifr, int cmd);
1019 int (*ndo_set_config)(struct net_device *dev,
1020 struct ifmap *map);
1021 int (*ndo_change_mtu)(struct net_device *dev,
1022 int new_mtu);
1023 int (*ndo_neigh_setup)(struct net_device *dev,
1024 struct neigh_parms *);
1025 void (*ndo_tx_timeout) (struct net_device *dev);
1026
1027 struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
1028 struct rtnl_link_stats64 *storage);
1029 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
1030
1031 int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
1032 __be16 proto, u16 vid);
1033 int (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
1034 __be16 proto, u16 vid);
1035 #ifdef CONFIG_NET_POLL_CONTROLLER
1036 void (*ndo_poll_controller)(struct net_device *dev);
1037 int (*ndo_netpoll_setup)(struct net_device *dev,
1038 struct netpoll_info *info);
1039 void (*ndo_netpoll_cleanup)(struct net_device *dev);
1040 #endif
1041 #ifdef CONFIG_NET_RX_BUSY_POLL
1042 int (*ndo_busy_poll)(struct napi_struct *dev);
1043 #endif
1044 int (*ndo_set_vf_mac)(struct net_device *dev,
1045 int queue, u8 *mac);
1046 int (*ndo_set_vf_vlan)(struct net_device *dev,
1047 int queue, u16 vlan, u8 qos);
1048 int (*ndo_set_vf_rate)(struct net_device *dev,
1049 int vf, int min_tx_rate,
1050 int max_tx_rate);
1051 int (*ndo_set_vf_spoofchk)(struct net_device *dev,
1052 int vf, bool setting);
1053 int (*ndo_get_vf_config)(struct net_device *dev,
1054 int vf,
1055 struct ifla_vf_info *ivf);
1056 int (*ndo_set_vf_link_state)(struct net_device *dev,
1057 int vf, int link_state);
1058 int (*ndo_set_vf_port)(struct net_device *dev,
1059 int vf,
1060 struct nlattr *port[]);
1061 int (*ndo_get_vf_port)(struct net_device *dev,
1062 int vf, struct sk_buff *skb);
1063 int (*ndo_setup_tc)(struct net_device *dev, u8 tc);
1064 #if IS_ENABLED(CONFIG_FCOE)
1065 int (*ndo_fcoe_enable)(struct net_device *dev);
1066 int (*ndo_fcoe_disable)(struct net_device *dev);
1067 int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
1068 u16 xid,
1069 struct scatterlist *sgl,
1070 unsigned int sgc);
1071 int (*ndo_fcoe_ddp_done)(struct net_device *dev,
1072 u16 xid);
1073 int (*ndo_fcoe_ddp_target)(struct net_device *dev,
1074 u16 xid,
1075 struct scatterlist *sgl,
1076 unsigned int sgc);
1077 int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
1078 struct netdev_fcoe_hbainfo *hbainfo);
1079 #endif
1080
1081 #if IS_ENABLED(CONFIG_LIBFCOE)
1082 #define NETDEV_FCOE_WWNN 0
1083 #define NETDEV_FCOE_WWPN 1
1084 int (*ndo_fcoe_get_wwn)(struct net_device *dev,
1085 u64 *wwn, int type);
1086 #endif
1087
1088 #ifdef CONFIG_RFS_ACCEL
1089 int (*ndo_rx_flow_steer)(struct net_device *dev,
1090 const struct sk_buff *skb,
1091 u16 rxq_index,
1092 u32 flow_id);
1093 #endif
1094 int (*ndo_add_slave)(struct net_device *dev,
1095 struct net_device *slave_dev);
1096 int (*ndo_del_slave)(struct net_device *dev,
1097 struct net_device *slave_dev);
1098 netdev_features_t (*ndo_fix_features)(struct net_device *dev,
1099 netdev_features_t features);
1100 int (*ndo_set_features)(struct net_device *dev,
1101 netdev_features_t features);
1102 int (*ndo_neigh_construct)(struct neighbour *n);
1103 void (*ndo_neigh_destroy)(struct neighbour *n);
1104
1105 int (*ndo_fdb_add)(struct ndmsg *ndm,
1106 struct nlattr *tb[],
1107 struct net_device *dev,
1108 const unsigned char *addr,
1109 u16 flags);
1110 int (*ndo_fdb_del)(struct ndmsg *ndm,
1111 struct nlattr *tb[],
1112 struct net_device *dev,
1113 const unsigned char *addr);
1114 int (*ndo_fdb_dump)(struct sk_buff *skb,
1115 struct netlink_callback *cb,
1116 struct net_device *dev,
1117 int idx);
1118
1119 int (*ndo_bridge_setlink)(struct net_device *dev,
1120 struct nlmsghdr *nlh);
1121 int (*ndo_bridge_getlink)(struct sk_buff *skb,
1122 u32 pid, u32 seq,
1123 struct net_device *dev,
1124 u32 filter_mask);
1125 int (*ndo_bridge_dellink)(struct net_device *dev,
1126 struct nlmsghdr *nlh);
1127 int (*ndo_change_carrier)(struct net_device *dev,
1128 bool new_carrier);
1129 int (*ndo_get_phys_port_id)(struct net_device *dev,
1130 struct netdev_phys_port_id *ppid);
1131 void (*ndo_add_vxlan_port)(struct net_device *dev,
1132 sa_family_t sa_family,
1133 __be16 port);
1134 void (*ndo_del_vxlan_port)(struct net_device *dev,
1135 sa_family_t sa_family,
1136 __be16 port);
1137
1138 void* (*ndo_dfwd_add_station)(struct net_device *pdev,
1139 struct net_device *dev);
1140 void (*ndo_dfwd_del_station)(struct net_device *pdev,
1141 void *priv);
1142
1143 netdev_tx_t (*ndo_dfwd_start_xmit) (struct sk_buff *skb,
1144 struct net_device *dev,
1145 void *priv);
1146 int (*ndo_get_lock_subclass)(struct net_device *dev);
1147 };
1148
1149 /**
1150 * enum net_device_priv_flags - &struct net_device priv_flags
1151 *
1152 * These are the &struct net_device, they are only set internally
1153 * by drivers and used in the kernel. These flags are invisible to
1154 * userspace, this means that the order of these flags can change
1155 * during any kernel release.
1156 *
1157 * You should have a pretty good reason to be extending these flags.
1158 *
1159 * @IFF_802_1Q_VLAN: 802.1Q VLAN device
1160 * @IFF_EBRIDGE: Ethernet bridging device
1161 * @IFF_SLAVE_INACTIVE: bonding slave not the curr. active
1162 * @IFF_MASTER_8023AD: bonding master, 802.3ad
1163 * @IFF_MASTER_ALB: bonding master, balance-alb
1164 * @IFF_BONDING: bonding master or slave
1165 * @IFF_SLAVE_NEEDARP: need ARPs for validation
1166 * @IFF_ISATAP: ISATAP interface (RFC4214)
1167 * @IFF_MASTER_ARPMON: bonding master, ARP mon in use
1168 * @IFF_WAN_HDLC: WAN HDLC device
1169 * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to
1170 * release skb->dst
1171 * @IFF_DONT_BRIDGE: disallow bridging this ether dev
1172 * @IFF_DISABLE_NETPOLL: disable netpoll at run-time
1173 * @IFF_MACVLAN_PORT: device used as macvlan port
1174 * @IFF_BRIDGE_PORT: device used as bridge port
1175 * @IFF_OVS_DATAPATH: device used as Open vSwitch datapath port
1176 * @IFF_TX_SKB_SHARING: The interface supports sharing skbs on transmit
1177 * @IFF_UNICAST_FLT: Supports unicast filtering
1178 * @IFF_TEAM_PORT: device used as team port
1179 * @IFF_SUPP_NOFCS: device supports sending custom FCS
1180 * @IFF_LIVE_ADDR_CHANGE: device supports hardware address
1181 * change when it's running
1182 * @IFF_MACVLAN: Macvlan device
1183 */
1184 enum netdev_priv_flags {
1185 IFF_802_1Q_VLAN = 1<<0,
1186 IFF_EBRIDGE = 1<<1,
1187 IFF_SLAVE_INACTIVE = 1<<2,
1188 IFF_MASTER_8023AD = 1<<3,
1189 IFF_MASTER_ALB = 1<<4,
1190 IFF_BONDING = 1<<5,
1191 IFF_SLAVE_NEEDARP = 1<<6,
1192 IFF_ISATAP = 1<<7,
1193 IFF_MASTER_ARPMON = 1<<8,
1194 IFF_WAN_HDLC = 1<<9,
1195 IFF_XMIT_DST_RELEASE = 1<<10,
1196 IFF_DONT_BRIDGE = 1<<11,
1197 IFF_DISABLE_NETPOLL = 1<<12,
1198 IFF_MACVLAN_PORT = 1<<13,
1199 IFF_BRIDGE_PORT = 1<<14,
1200 IFF_OVS_DATAPATH = 1<<15,
1201 IFF_TX_SKB_SHARING = 1<<16,
1202 IFF_UNICAST_FLT = 1<<17,
1203 IFF_TEAM_PORT = 1<<18,
1204 IFF_SUPP_NOFCS = 1<<19,
1205 IFF_LIVE_ADDR_CHANGE = 1<<20,
1206 IFF_MACVLAN = 1<<21,
1207 };
1208
1209 #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
1210 #define IFF_EBRIDGE IFF_EBRIDGE
1211 #define IFF_SLAVE_INACTIVE IFF_SLAVE_INACTIVE
1212 #define IFF_MASTER_8023AD IFF_MASTER_8023AD
1213 #define IFF_MASTER_ALB IFF_MASTER_ALB
1214 #define IFF_BONDING IFF_BONDING
1215 #define IFF_SLAVE_NEEDARP IFF_SLAVE_NEEDARP
1216 #define IFF_ISATAP IFF_ISATAP
1217 #define IFF_MASTER_ARPMON IFF_MASTER_ARPMON
1218 #define IFF_WAN_HDLC IFF_WAN_HDLC
1219 #define IFF_XMIT_DST_RELEASE IFF_XMIT_DST_RELEASE
1220 #define IFF_DONT_BRIDGE IFF_DONT_BRIDGE
1221 #define IFF_DISABLE_NETPOLL IFF_DISABLE_NETPOLL
1222 #define IFF_MACVLAN_PORT IFF_MACVLAN_PORT
1223 #define IFF_BRIDGE_PORT IFF_BRIDGE_PORT
1224 #define IFF_OVS_DATAPATH IFF_OVS_DATAPATH
1225 #define IFF_TX_SKB_SHARING IFF_TX_SKB_SHARING
1226 #define IFF_UNICAST_FLT IFF_UNICAST_FLT
1227 #define IFF_TEAM_PORT IFF_TEAM_PORT
1228 #define IFF_SUPP_NOFCS IFF_SUPP_NOFCS
1229 #define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE
1230 #define IFF_MACVLAN IFF_MACVLAN
1231
1232 /*
1233 * The DEVICE structure.
1234 * Actually, this whole structure is a big mistake. It mixes I/O
1235 * data with strictly "high-level" data, and it has to know about
1236 * almost every data structure used in the INET module.
1237 *
1238 * FIXME: cleanup struct net_device such that network protocol info
1239 * moves out.
1240 */
1241
1242 struct net_device {
1243
1244 /*
1245 * This is the first field of the "visible" part of this structure
1246 * (i.e. as seen by users in the "Space.c" file). It is the name
1247 * of the interface.
1248 */
1249 char name[IFNAMSIZ];
1250
1251 /* device name hash chain, please keep it close to name[] */
1252 struct hlist_node name_hlist;
1253
1254 /* snmp alias */
1255 char *ifalias;
1256
1257 /*
1258 * I/O specific fields
1259 * FIXME: Merge these and struct ifmap into one
1260 */
1261 unsigned long mem_end; /* shared mem end */
1262 unsigned long mem_start; /* shared mem start */
1263 unsigned long base_addr; /* device I/O address */
1264 int irq; /* device IRQ number */
1265
1266 /*
1267 * Some hardware also needs these fields, but they are not
1268 * part of the usual set specified in Space.c.
1269 */
1270
1271 unsigned long state;
1272
1273 struct list_head dev_list;
1274 struct list_head napi_list;
1275 struct list_head unreg_list;
1276 struct list_head close_list;
1277
1278 /* directly linked devices, like slaves for bonding */
1279 struct {
1280 struct list_head upper;
1281 struct list_head lower;
1282 } adj_list;
1283
1284 /* all linked devices, *including* neighbours */
1285 struct {
1286 struct list_head upper;
1287 struct list_head lower;
1288 } all_adj_list;
1289
1290
1291 /* currently active device features */
1292 netdev_features_t features;
1293 /* user-changeable features */
1294 netdev_features_t hw_features;
1295 /* user-requested features */
1296 netdev_features_t wanted_features;
1297 /* mask of features inheritable by VLAN devices */
1298 netdev_features_t vlan_features;
1299 /* mask of features inherited by encapsulating devices
1300 * This field indicates what encapsulation offloads
1301 * the hardware is capable of doing, and drivers will
1302 * need to set them appropriately.
1303 */
1304 netdev_features_t hw_enc_features;
1305 /* mask of fetures inheritable by MPLS */
1306 netdev_features_t mpls_features;
1307
1308 /* Interface index. Unique device identifier */
1309 int ifindex;
1310 int iflink;
1311
1312 struct net_device_stats stats;
1313
1314 /* dropped packets by core network, Do not use this in drivers */
1315 atomic_long_t rx_dropped;
1316 atomic_long_t tx_dropped;
1317
1318 /* Stats to monitor carrier on<->off transitions */
1319 atomic_t carrier_changes;
1320
1321 #ifdef CONFIG_WIRELESS_EXT
1322 /* List of functions to handle Wireless Extensions (instead of ioctl).
1323 * See <net/iw_handler.h> for details. Jean II */
1324 const struct iw_handler_def * wireless_handlers;
1325 /* Instance data managed by the core of Wireless Extensions. */
1326 struct iw_public_data * wireless_data;
1327 #endif
1328 /* Management operations */
1329 const struct net_device_ops *netdev_ops;
1330 const struct ethtool_ops *ethtool_ops;
1331 const struct forwarding_accel_ops *fwd_ops;
1332
1333 /* Hardware header description */
1334 const struct header_ops *header_ops;
1335
1336 unsigned int flags; /* interface flags (a la BSD) */
1337 unsigned int priv_flags; /* Like 'flags' but invisible to userspace.
1338 * See if.h for definitions. */
1339 unsigned short gflags;
1340 unsigned short padded; /* How much padding added by alloc_netdev() */
1341
1342 unsigned char operstate; /* RFC2863 operstate */
1343 unsigned char link_mode; /* mapping policy to operstate */
1344
1345 unsigned char if_port; /* Selectable AUI, TP,..*/
1346 unsigned char dma; /* DMA channel */
1347
1348 unsigned int mtu; /* interface MTU value */
1349 unsigned short type; /* interface hardware type */
1350 unsigned short hard_header_len; /* hardware hdr length */
1351
1352 /* extra head- and tailroom the hardware may need, but not in all cases
1353 * can this be guaranteed, especially tailroom. Some cases also use
1354 * LL_MAX_HEADER instead to allocate the skb.
1355 */
1356 unsigned short needed_headroom;
1357 unsigned short needed_tailroom;
1358
1359 /* Interface address info. */
1360 unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
1361 unsigned char addr_assign_type; /* hw address assignment type */
1362 unsigned char addr_len; /* hardware address length */
1363 unsigned short neigh_priv_len;
1364 unsigned short dev_id; /* Used to differentiate devices
1365 * that share the same link
1366 * layer address
1367 */
1368 unsigned short dev_port; /* Used to differentiate
1369 * devices that share the same
1370 * function
1371 */
1372 spinlock_t addr_list_lock;
1373 struct netdev_hw_addr_list uc; /* Unicast mac addresses */
1374 struct netdev_hw_addr_list mc; /* Multicast mac addresses */
1375 struct netdev_hw_addr_list dev_addrs; /* list of device
1376 * hw addresses
1377 */
1378 #ifdef CONFIG_SYSFS
1379 struct kset *queues_kset;
1380 #endif
1381
1382 bool uc_promisc;
1383 unsigned int promiscuity;
1384 unsigned int allmulti;
1385
1386
1387 /* Protocol specific pointers */
1388
1389 #if IS_ENABLED(CONFIG_VLAN_8021Q)
1390 struct vlan_info __rcu *vlan_info; /* VLAN info */
1391 #endif
1392 #if IS_ENABLED(CONFIG_NET_DSA)
1393 struct dsa_switch_tree *dsa_ptr; /* dsa specific data */
1394 #endif
1395 #if IS_ENABLED(CONFIG_TIPC)
1396 struct tipc_bearer __rcu *tipc_ptr; /* TIPC specific data */
1397 #endif
1398 void *atalk_ptr; /* AppleTalk link */
1399 struct in_device __rcu *ip_ptr; /* IPv4 specific data */
1400 struct dn_dev __rcu *dn_ptr; /* DECnet specific data */
1401 struct inet6_dev __rcu *ip6_ptr; /* IPv6 specific data */
1402 void *ax25_ptr; /* AX.25 specific data */
1403 struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data,
1404 assign before registering */
1405
1406 /*
1407 * Cache lines mostly used on receive path (including eth_type_trans())
1408 */
1409 unsigned long last_rx; /* Time of last Rx */
1410
1411 /* Interface address info used in eth_type_trans() */
1412 unsigned char *dev_addr; /* hw address, (before bcast
1413 because most packets are
1414 unicast) */
1415
1416
1417 #ifdef CONFIG_SYSFS
1418 struct netdev_rx_queue *_rx;
1419
1420 /* Number of RX queues allocated at register_netdev() time */
1421 unsigned int num_rx_queues;
1422
1423 /* Number of RX queues currently active in device */
1424 unsigned int real_num_rx_queues;
1425
1426 #endif
1427
1428 rx_handler_func_t __rcu *rx_handler;
1429 void __rcu *rx_handler_data;
1430
1431 struct netdev_queue __rcu *ingress_queue;
1432 unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
1433
1434
1435 /*
1436 * Cache lines mostly used on transmit path
1437 */
1438 struct netdev_queue *_tx ____cacheline_aligned_in_smp;
1439
1440 /* Number of TX queues allocated at alloc_netdev_mq() time */
1441 unsigned int num_tx_queues;
1442
1443 /* Number of TX queues currently active in device */
1444 unsigned int real_num_tx_queues;
1445
1446 /* root qdisc from userspace point of view */
1447 struct Qdisc *qdisc;
1448
1449 unsigned long tx_queue_len; /* Max frames per queue allowed */
1450 spinlock_t tx_global_lock;
1451
1452 #ifdef CONFIG_XPS
1453 struct xps_dev_maps __rcu *xps_maps;
1454 #endif
1455 #ifdef CONFIG_RFS_ACCEL
1456 /* CPU reverse-mapping for RX completion interrupts, indexed
1457 * by RX queue number. Assigned by driver. This must only be
1458 * set if the ndo_rx_flow_steer operation is defined. */
1459 struct cpu_rmap *rx_cpu_rmap;
1460 #endif
1461
1462 /* These may be needed for future network-power-down code. */
1463
1464 /*
1465 * trans_start here is expensive for high speed devices on SMP,
1466 * please use netdev_queue->trans_start instead.
1467 */
1468 unsigned long trans_start; /* Time (in jiffies) of last Tx */
1469
1470 int watchdog_timeo; /* used by dev_watchdog() */
1471 struct timer_list watchdog_timer;
1472
1473 /* Number of references to this device */
1474 int __percpu *pcpu_refcnt;
1475
1476 /* delayed register/unregister */
1477 struct list_head todo_list;
1478 /* device index hash chain */
1479 struct hlist_node index_hlist;
1480
1481 struct list_head link_watch_list;
1482
1483 /* register/unregister state machine */
1484 enum { NETREG_UNINITIALIZED=0,
1485 NETREG_REGISTERED, /* completed register_netdevice */
1486 NETREG_UNREGISTERING, /* called unregister_netdevice */
1487 NETREG_UNREGISTERED, /* completed unregister todo */
1488 NETREG_RELEASED, /* called free_netdev */
1489 NETREG_DUMMY, /* dummy device for NAPI poll */
1490 } reg_state:8;
1491
1492 bool dismantle; /* device is going do be freed */
1493
1494 enum {
1495 RTNL_LINK_INITIALIZED,
1496 RTNL_LINK_INITIALIZING,
1497 } rtnl_link_state:16;
1498
1499 /* Called from unregister, can be used to call free_netdev */
1500 void (*destructor)(struct net_device *dev);
1501
1502 #ifdef CONFIG_NETPOLL
1503 struct netpoll_info __rcu *npinfo;
1504 #endif
1505
1506 #ifdef CONFIG_NET_NS
1507 /* Network namespace this network device is inside */
1508 struct net *nd_net;
1509 #endif
1510
1511 /* mid-layer private */
1512 union {
1513 void *ml_priv;
1514 struct pcpu_lstats __percpu *lstats; /* loopback stats */
1515 struct pcpu_sw_netstats __percpu *tstats;
1516 struct pcpu_dstats __percpu *dstats; /* dummy stats */
1517 struct pcpu_vstats __percpu *vstats; /* veth stats */
1518 };
1519 /* GARP */
1520 struct garp_port __rcu *garp_port;
1521 /* MRP */
1522 struct mrp_port __rcu *mrp_port;
1523
1524 /* class/net/name entry */
1525 struct device dev;
1526 /* space for optional device, statistics, and wireless sysfs groups */
1527 const struct attribute_group *sysfs_groups[4];
1528 /* space for optional per-rx queue attributes */
1529 const struct attribute_group *sysfs_rx_queue_group;
1530
1531 /* rtnetlink link ops */
1532 const struct rtnl_link_ops *rtnl_link_ops;
1533
1534 /* for setting kernel sock attribute on TCP connection setup */
1535 #define GSO_MAX_SIZE 65536
1536 unsigned int gso_max_size;
1537 #define GSO_MAX_SEGS 65535
1538 u16 gso_max_segs;
1539
1540 #ifdef CONFIG_DCB
1541 /* Data Center Bridging netlink ops */
1542 const struct dcbnl_rtnl_ops *dcbnl_ops;
1543 #endif
1544 u8 num_tc;
1545 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
1546 u8 prio_tc_map[TC_BITMASK + 1];
1547
1548 #if IS_ENABLED(CONFIG_FCOE)
1549 /* max exchange id for FCoE LRO by ddp */
1550 unsigned int fcoe_ddp_xid;
1551 #endif
1552 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
1553 struct netprio_map __rcu *priomap;
1554 #endif
1555 /* phy device may attach itself for hardware timestamping */
1556 struct phy_device *phydev;
1557
1558 struct lock_class_key *qdisc_tx_busylock;
1559
1560 /* group the device belongs to */
1561 int group;
1562
1563 struct pm_qos_request pm_qos_req;
1564 };
1565 #define to_net_dev(d) container_of(d, struct net_device, dev)
1566
1567 #define NETDEV_ALIGN 32
1568
1569 static inline
1570 int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
1571 {
1572 return dev->prio_tc_map[prio & TC_BITMASK];
1573 }
1574
1575 static inline
1576 int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
1577 {
1578 if (tc >= dev->num_tc)
1579 return -EINVAL;
1580
1581 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
1582 return 0;
1583 }
1584
1585 static inline
1586 void netdev_reset_tc(struct net_device *dev)
1587 {
1588 dev->num_tc = 0;
1589 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
1590 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
1591 }
1592
1593 static inline
1594 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
1595 {
1596 if (tc >= dev->num_tc)
1597 return -EINVAL;
1598
1599 dev->tc_to_txq[tc].count = count;
1600 dev->tc_to_txq[tc].offset = offset;
1601 return 0;
1602 }
1603
1604 static inline
1605 int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
1606 {
1607 if (num_tc > TC_MAX_QUEUE)
1608 return -EINVAL;
1609
1610 dev->num_tc = num_tc;
1611 return 0;
1612 }
1613
1614 static inline
1615 int netdev_get_num_tc(struct net_device *dev)
1616 {
1617 return dev->num_tc;
1618 }
1619
1620 static inline
1621 struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
1622 unsigned int index)
1623 {
1624 return &dev->_tx[index];
1625 }
1626
1627 static inline void netdev_for_each_tx_queue(struct net_device *dev,
1628 void (*f)(struct net_device *,
1629 struct netdev_queue *,
1630 void *),
1631 void *arg)
1632 {
1633 unsigned int i;
1634
1635 for (i = 0; i < dev->num_tx_queues; i++)
1636 f(dev, &dev->_tx[i], arg);
1637 }
1638
1639 struct netdev_queue *netdev_pick_tx(struct net_device *dev,
1640 struct sk_buff *skb,
1641 void *accel_priv);
1642
1643 /*
1644 * Net namespace inlines
1645 */
1646 static inline
1647 struct net *dev_net(const struct net_device *dev)
1648 {
1649 return read_pnet(&dev->nd_net);
1650 }
1651
1652 static inline
1653 void dev_net_set(struct net_device *dev, struct net *net)
1654 {
1655 #ifdef CONFIG_NET_NS
1656 release_net(dev->nd_net);
1657 dev->nd_net = hold_net(net);
1658 #endif
1659 }
1660
1661 static inline bool netdev_uses_dsa_tags(struct net_device *dev)
1662 {
1663 #ifdef CONFIG_NET_DSA_TAG_DSA
1664 if (dev->dsa_ptr != NULL)
1665 return dsa_uses_dsa_tags(dev->dsa_ptr);
1666 #endif
1667
1668 return 0;
1669 }
1670
1671 static inline bool netdev_uses_trailer_tags(struct net_device *dev)
1672 {
1673 #ifdef CONFIG_NET_DSA_TAG_TRAILER
1674 if (dev->dsa_ptr != NULL)
1675 return dsa_uses_trailer_tags(dev->dsa_ptr);
1676 #endif
1677
1678 return 0;
1679 }
1680
1681 /**
1682 * netdev_priv - access network device private data
1683 * @dev: network device
1684 *
1685 * Get network device private data
1686 */
1687 static inline void *netdev_priv(const struct net_device *dev)
1688 {
1689 return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
1690 }
1691
1692 /* Set the sysfs physical device reference for the network logical device
1693 * if set prior to registration will cause a symlink during initialization.
1694 */
1695 #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
1696
1697 /* Set the sysfs device type for the network logical device to allow
1698 * fine-grained identification of different network device types. For
1699 * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc.
1700 */
1701 #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
1702
1703 /* Default NAPI poll() weight
1704 * Device drivers are strongly advised to not use bigger value
1705 */
1706 #define NAPI_POLL_WEIGHT 64
1707
1708 /**
1709 * netif_napi_add - initialize a napi context
1710 * @dev: network device
1711 * @napi: napi context
1712 * @poll: polling function
1713 * @weight: default weight
1714 *
1715 * netif_napi_add() must be used to initialize a napi context prior to calling
1716 * *any* of the other napi related functions.
1717 */
1718 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
1719 int (*poll)(struct napi_struct *, int), int weight);
1720
1721 /**
1722 * netif_napi_del - remove a napi context
1723 * @napi: napi context
1724 *
1725 * netif_napi_del() removes a napi context from the network device napi list
1726 */
1727 void netif_napi_del(struct napi_struct *napi);
1728
1729 struct napi_gro_cb {
1730 /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
1731 void *frag0;
1732
1733 /* Length of frag0. */
1734 unsigned int frag0_len;
1735
1736 /* This indicates where we are processing relative to skb->data. */
1737 int data_offset;
1738
1739 /* This is non-zero if the packet cannot be merged with the new skb. */
1740 u16 flush;
1741
1742 /* Save the IP ID here and check when we get to the transport layer */
1743 u16 flush_id;
1744
1745 /* Number of segments aggregated. */
1746 u16 count;
1747
1748 /* This is non-zero if the packet may be of the same flow. */
1749 u8 same_flow;
1750
1751 /* Free the skb? */
1752 u8 free;
1753 #define NAPI_GRO_FREE 1
1754 #define NAPI_GRO_FREE_STOLEN_HEAD 2
1755
1756 /* jiffies when first packet was created/queued */
1757 unsigned long age;
1758
1759 /* Used in ipv6_gro_receive() */
1760 u16 proto;
1761
1762 /* Used in udp_gro_receive */
1763 u16 udp_mark;
1764
1765 /* used to support CHECKSUM_COMPLETE for tunneling protocols */
1766 __wsum csum;
1767
1768 /* used in skb_gro_receive() slow path */
1769 struct sk_buff *last;
1770 };
1771
1772 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
1773
1774 struct packet_type {
1775 __be16 type; /* This is really htons(ether_type). */
1776 struct net_device *dev; /* NULL is wildcarded here */
1777 int (*func) (struct sk_buff *,
1778 struct net_device *,
1779 struct packet_type *,
1780 struct net_device *);
1781 bool (*id_match)(struct packet_type *ptype,
1782 struct sock *sk);
1783 void *af_packet_priv;
1784 struct list_head list;
1785 };
1786
1787 struct offload_callbacks {
1788 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
1789 netdev_features_t features);
1790 int (*gso_send_check)(struct sk_buff *skb);
1791 struct sk_buff **(*gro_receive)(struct sk_buff **head,
1792 struct sk_buff *skb);
1793 int (*gro_complete)(struct sk_buff *skb, int nhoff);
1794 };
1795
1796 struct packet_offload {
1797 __be16 type; /* This is really htons(ether_type). */
1798 struct offload_callbacks callbacks;
1799 struct list_head list;
1800 };
1801
1802 struct udp_offload {
1803 __be16 port;
1804 struct offload_callbacks callbacks;
1805 };
1806
1807 /* often modified stats are per cpu, other are shared (netdev->stats) */
1808 struct pcpu_sw_netstats {
1809 u64 rx_packets;
1810 u64 rx_bytes;
1811 u64 tx_packets;
1812 u64 tx_bytes;
1813 struct u64_stats_sync syncp;
1814 };
1815
1816 #define netdev_alloc_pcpu_stats(type) \
1817 ({ \
1818 typeof(type) __percpu *pcpu_stats = alloc_percpu(type); \
1819 if (pcpu_stats) { \
1820 int i; \
1821 for_each_possible_cpu(i) { \
1822 typeof(type) *stat; \
1823 stat = per_cpu_ptr(pcpu_stats, i); \
1824 u64_stats_init(&stat->syncp); \
1825 } \
1826 } \
1827 pcpu_stats; \
1828 })
1829
1830 #include <linux/notifier.h>
1831
1832 /* netdevice notifier chain. Please remember to update the rtnetlink
1833 * notification exclusion list in rtnetlink_event() when adding new
1834 * types.
1835 */
1836 #define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */
1837 #define NETDEV_DOWN 0x0002
1838 #define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface
1839 detected a hardware crash and restarted
1840 - we can use this eg to kick tcp sessions
1841 once done */
1842 #define NETDEV_CHANGE 0x0004 /* Notify device state change */
1843 #define NETDEV_REGISTER 0x0005
1844 #define NETDEV_UNREGISTER 0x0006
1845 #define NETDEV_CHANGEMTU 0x0007 /* notify after mtu change happened */
1846 #define NETDEV_CHANGEADDR 0x0008
1847 #define NETDEV_GOING_DOWN 0x0009
1848 #define NETDEV_CHANGENAME 0x000A
1849 #define NETDEV_FEAT_CHANGE 0x000B
1850 #define NETDEV_BONDING_FAILOVER 0x000C
1851 #define NETDEV_PRE_UP 0x000D
1852 #define NETDEV_PRE_TYPE_CHANGE 0x000E
1853 #define NETDEV_POST_TYPE_CHANGE 0x000F
1854 #define NETDEV_POST_INIT 0x0010
1855 #define NETDEV_UNREGISTER_FINAL 0x0011
1856 #define NETDEV_RELEASE 0x0012
1857 #define NETDEV_NOTIFY_PEERS 0x0013
1858 #define NETDEV_JOIN 0x0014
1859 #define NETDEV_CHANGEUPPER 0x0015
1860 #define NETDEV_RESEND_IGMP 0x0016
1861 #define NETDEV_PRECHANGEMTU 0x0017 /* notify before mtu change happened */
1862
1863 int register_netdevice_notifier(struct notifier_block *nb);
1864 int unregister_netdevice_notifier(struct notifier_block *nb);
1865
1866 struct netdev_notifier_info {
1867 struct net_device *dev;
1868 };
1869
1870 struct netdev_notifier_change_info {
1871 struct netdev_notifier_info info; /* must be first */
1872 unsigned int flags_changed;
1873 };
1874
1875 static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
1876 struct net_device *dev)
1877 {
1878 info->dev = dev;
1879 }
1880
1881 static inline struct net_device *
1882 netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
1883 {
1884 return info->dev;
1885 }
1886
1887 int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
1888
1889
1890 extern rwlock_t dev_base_lock; /* Device list lock */
1891
1892 #define for_each_netdev(net, d) \
1893 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
1894 #define for_each_netdev_reverse(net, d) \
1895 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
1896 #define for_each_netdev_rcu(net, d) \
1897 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
1898 #define for_each_netdev_safe(net, d, n) \
1899 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
1900 #define for_each_netdev_continue(net, d) \
1901 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
1902 #define for_each_netdev_continue_rcu(net, d) \
1903 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
1904 #define for_each_netdev_in_bond_rcu(bond, slave) \
1905 for_each_netdev_rcu(&init_net, slave) \
1906 if (netdev_master_upper_dev_get_rcu(slave) == bond)
1907 #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
1908
1909 static inline struct net_device *next_net_device(struct net_device *dev)
1910 {
1911 struct list_head *lh;
1912 struct net *net;
1913
1914 net = dev_net(dev);
1915 lh = dev->dev_list.next;
1916 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1917 }
1918
1919 static inline struct net_device *next_net_device_rcu(struct net_device *dev)
1920 {
1921 struct list_head *lh;
1922 struct net *net;
1923
1924 net = dev_net(dev);
1925 lh = rcu_dereference(list_next_rcu(&dev->dev_list));
1926 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1927 }
1928
1929 static inline struct net_device *first_net_device(struct net *net)
1930 {
1931 return list_empty(&net->dev_base_head) ? NULL :
1932 net_device_entry(net->dev_base_head.next);
1933 }
1934
1935 static inline struct net_device *first_net_device_rcu(struct net *net)
1936 {
1937 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
1938
1939 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1940 }
1941
1942 int netdev_boot_setup_check(struct net_device *dev);
1943 unsigned long netdev_boot_base(const char *prefix, int unit);
1944 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
1945 const char *hwaddr);
1946 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
1947 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
1948 void dev_add_pack(struct packet_type *pt);
1949 void dev_remove_pack(struct packet_type *pt);
1950 void __dev_remove_pack(struct packet_type *pt);
1951 void dev_add_offload(struct packet_offload *po);
1952 void dev_remove_offload(struct packet_offload *po);
1953
1954 struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags,
1955 unsigned short mask);
1956 struct net_device *dev_get_by_name(struct net *net, const char *name);
1957 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
1958 struct net_device *__dev_get_by_name(struct net *net, const char *name);
1959 int dev_alloc_name(struct net_device *dev, const char *name);
1960 int dev_open(struct net_device *dev);
1961 int dev_close(struct net_device *dev);
1962 void dev_disable_lro(struct net_device *dev);
1963 int dev_loopback_xmit(struct sk_buff *newskb);
1964 int dev_queue_xmit(struct sk_buff *skb);
1965 int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv);
1966 int register_netdevice(struct net_device *dev);
1967 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
1968 void unregister_netdevice_many(struct list_head *head);
1969 static inline void unregister_netdevice(struct net_device *dev)
1970 {
1971 unregister_netdevice_queue(dev, NULL);
1972 }
1973
1974 int netdev_refcnt_read(const struct net_device *dev);
1975 void free_netdev(struct net_device *dev);
1976 void netdev_freemem(struct net_device *dev);
1977 void synchronize_net(void);
1978 int init_dummy_netdev(struct net_device *dev);
1979
1980 struct net_device *dev_get_by_index(struct net *net, int ifindex);
1981 struct net_device *__dev_get_by_index(struct net *net, int ifindex);
1982 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
1983 int netdev_get_name(struct net *net, char *name, int ifindex);
1984 int dev_restart(struct net_device *dev);
1985 int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb);
1986
1987 static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
1988 {
1989 return NAPI_GRO_CB(skb)->data_offset;
1990 }
1991
1992 static inline unsigned int skb_gro_len(const struct sk_buff *skb)
1993 {
1994 return skb->len - NAPI_GRO_CB(skb)->data_offset;
1995 }
1996
1997 static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
1998 {
1999 NAPI_GRO_CB(skb)->data_offset += len;
2000 }
2001
2002 static inline void *skb_gro_header_fast(struct sk_buff *skb,
2003 unsigned int offset)
2004 {
2005 return NAPI_GRO_CB(skb)->frag0 + offset;
2006 }
2007
2008 static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
2009 {
2010 return NAPI_GRO_CB(skb)->frag0_len < hlen;
2011 }
2012
2013 static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
2014 unsigned int offset)
2015 {
2016 if (!pskb_may_pull(skb, hlen))
2017 return NULL;
2018
2019 NAPI_GRO_CB(skb)->frag0 = NULL;
2020 NAPI_GRO_CB(skb)->frag0_len = 0;
2021 return skb->data + offset;
2022 }
2023
2024 static inline void *skb_gro_network_header(struct sk_buff *skb)
2025 {
2026 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
2027 skb_network_offset(skb);
2028 }
2029
2030 static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
2031 const void *start, unsigned int len)
2032 {
2033 if (skb->ip_summed == CHECKSUM_COMPLETE)
2034 NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum,
2035 csum_partial(start, len, 0));
2036 }
2037
2038 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
2039 unsigned short type,
2040 const void *daddr, const void *saddr,
2041 unsigned int len)
2042 {
2043 if (!dev->header_ops || !dev->header_ops->create)
2044 return 0;
2045
2046 return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
2047 }
2048
2049 static inline int dev_parse_header(const struct sk_buff *skb,
2050 unsigned char *haddr)
2051 {
2052 const struct net_device *dev = skb->dev;
2053
2054 if (!dev->header_ops || !dev->header_ops->parse)
2055 return 0;
2056 return dev->header_ops->parse(skb, haddr);
2057 }
2058
2059 static inline int dev_rebuild_header(struct sk_buff *skb)
2060 {
2061 const struct net_device *dev = skb->dev;
2062
2063 if (!dev->header_ops || !dev->header_ops->rebuild)
2064 return 0;
2065 return dev->header_ops->rebuild(skb);
2066 }
2067
2068 typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
2069 int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
2070 static inline int unregister_gifconf(unsigned int family)
2071 {
2072 return register_gifconf(family, NULL);
2073 }
2074
2075 #ifdef CONFIG_NET_FLOW_LIMIT
2076 #define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */
2077 struct sd_flow_limit {
2078 u64 count;
2079 unsigned int num_buckets;
2080 unsigned int history_head;
2081 u16 history[FLOW_LIMIT_HISTORY];
2082 u8 buckets[];
2083 };
2084
2085 extern int netdev_flow_limit_table_len;
2086 #endif /* CONFIG_NET_FLOW_LIMIT */
2087
2088 /*
2089 * Incoming packets are placed on per-cpu queues
2090 */
2091 struct softnet_data {
2092 struct Qdisc *output_queue;
2093 struct Qdisc **output_queue_tailp;
2094 struct list_head poll_list;
2095 struct sk_buff *completion_queue;
2096 struct sk_buff_head process_queue;
2097
2098 /* stats */
2099 unsigned int processed;
2100 unsigned int time_squeeze;
2101 unsigned int cpu_collision;
2102 unsigned int received_rps;
2103
2104 #ifdef CONFIG_RPS
2105 struct softnet_data *rps_ipi_list;
2106
2107 /* Elements below can be accessed between CPUs for RPS */
2108 struct call_single_data csd ____cacheline_aligned_in_smp;
2109 struct softnet_data *rps_ipi_next;
2110 unsigned int cpu;
2111 unsigned int input_queue_head;
2112 unsigned int input_queue_tail;
2113 #endif
2114 unsigned int dropped;
2115 struct sk_buff_head input_pkt_queue;
2116 struct napi_struct backlog;
2117
2118 #ifdef CONFIG_NET_FLOW_LIMIT
2119 struct sd_flow_limit __rcu *flow_limit;
2120 #endif
2121 };
2122
2123 static inline void input_queue_head_incr(struct softnet_data *sd)
2124 {
2125 #ifdef CONFIG_RPS
2126 sd->input_queue_head++;
2127 #endif
2128 }
2129
2130 static inline void input_queue_tail_incr_save(struct softnet_data *sd,
2131 unsigned int *qtail)
2132 {
2133 #ifdef CONFIG_RPS
2134 *qtail = ++sd->input_queue_tail;
2135 #endif
2136 }
2137
2138 DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
2139
2140 void __netif_schedule(struct Qdisc *q);
2141
2142 static inline void netif_schedule_queue(struct netdev_queue *txq)
2143 {
2144 if (!(txq->state & QUEUE_STATE_ANY_XOFF))
2145 __netif_schedule(txq->qdisc);
2146 }
2147
2148 static inline void netif_tx_schedule_all(struct net_device *dev)
2149 {
2150 unsigned int i;
2151
2152 for (i = 0; i < dev->num_tx_queues; i++)
2153 netif_schedule_queue(netdev_get_tx_queue(dev, i));
2154 }
2155
2156 static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
2157 {
2158 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
2159 }
2160
2161 /**
2162 * netif_start_queue - allow transmit
2163 * @dev: network device
2164 *
2165 * Allow upper layers to call the device hard_start_xmit routine.
2166 */
2167 static inline void netif_start_queue(struct net_device *dev)
2168 {
2169 netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
2170 }
2171
2172 static inline void netif_tx_start_all_queues(struct net_device *dev)
2173 {
2174 unsigned int i;
2175
2176 for (i = 0; i < dev->num_tx_queues; i++) {
2177 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2178 netif_tx_start_queue(txq);
2179 }
2180 }
2181
2182 static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
2183 {
2184 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state))
2185 __netif_schedule(dev_queue->qdisc);
2186 }
2187
2188 /**
2189 * netif_wake_queue - restart transmit
2190 * @dev: network device
2191 *
2192 * Allow upper layers to call the device hard_start_xmit routine.
2193 * Used for flow control when transmit resources are available.
2194 */
2195 static inline void netif_wake_queue(struct net_device *dev)
2196 {
2197 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
2198 }
2199
2200 static inline void netif_tx_wake_all_queues(struct net_device *dev)
2201 {
2202 unsigned int i;
2203
2204 for (i = 0; i < dev->num_tx_queues; i++) {
2205 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2206 netif_tx_wake_queue(txq);
2207 }
2208 }
2209
2210 static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
2211 {
2212 if (WARN_ON(!dev_queue)) {
2213 pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
2214 return;
2215 }
2216 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
2217 }
2218
2219 /**
2220 * netif_stop_queue - stop transmitted packets
2221 * @dev: network device
2222 *
2223 * Stop upper layers calling the device hard_start_xmit routine.
2224 * Used for flow control when transmit resources are unavailable.
2225 */
2226 static inline void netif_stop_queue(struct net_device *dev)
2227 {
2228 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
2229 }
2230
2231 static inline void netif_tx_stop_all_queues(struct net_device *dev)
2232 {
2233 unsigned int i;
2234
2235 for (i = 0; i < dev->num_tx_queues; i++) {
2236 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2237 netif_tx_stop_queue(txq);
2238 }
2239 }
2240
2241 static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
2242 {
2243 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
2244 }
2245
2246 /**
2247 * netif_queue_stopped - test if transmit queue is flowblocked
2248 * @dev: network device
2249 *
2250 * Test if transmit queue on device is currently unable to send.
2251 */
2252 static inline bool netif_queue_stopped(const struct net_device *dev)
2253 {
2254 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
2255 }
2256
2257 static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
2258 {
2259 return dev_queue->state & QUEUE_STATE_ANY_XOFF;
2260 }
2261
2262 static inline bool
2263 netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
2264 {
2265 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
2266 }
2267
2268 static inline bool
2269 netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue)
2270 {
2271 return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN;
2272 }
2273
2274 static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
2275 unsigned int bytes)
2276 {
2277 #ifdef CONFIG_BQL
2278 dql_queued(&dev_queue->dql, bytes);
2279
2280 if (likely(dql_avail(&dev_queue->dql) >= 0))
2281 return;
2282
2283 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
2284
2285 /*
2286 * The XOFF flag must be set before checking the dql_avail below,
2287 * because in netdev_tx_completed_queue we update the dql_completed
2288 * before checking the XOFF flag.
2289 */
2290 smp_mb();
2291
2292 /* check again in case another CPU has just made room avail */
2293 if (unlikely(dql_avail(&dev_queue->dql) >= 0))
2294 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
2295 #endif
2296 }
2297
2298 /**
2299 * netdev_sent_queue - report the number of bytes queued to hardware
2300 * @dev: network device
2301 * @bytes: number of bytes queued to the hardware device queue
2302 *
2303 * Report the number of bytes queued for sending/completion to the network
2304 * device hardware queue. @bytes should be a good approximation and should
2305 * exactly match netdev_completed_queue() @bytes
2306 */
2307 static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
2308 {
2309 netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
2310 }
2311
2312 static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
2313 unsigned int pkts, unsigned int bytes)
2314 {
2315 #ifdef CONFIG_BQL
2316 if (unlikely(!bytes))
2317 return;
2318
2319 dql_completed(&dev_queue->dql, bytes);
2320
2321 /*
2322 * Without the memory barrier there is a small possiblity that
2323 * netdev_tx_sent_queue will miss the update and cause the queue to
2324 * be stopped forever
2325 */
2326 smp_mb();
2327
2328 if (dql_avail(&dev_queue->dql) < 0)
2329 return;
2330
2331 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
2332 netif_schedule_queue(dev_queue);
2333 #endif
2334 }
2335
2336 /**
2337 * netdev_completed_queue - report bytes and packets completed by device
2338 * @dev: network device
2339 * @pkts: actual number of packets sent over the medium
2340 * @bytes: actual number of bytes sent over the medium
2341 *
2342 * Report the number of bytes and packets transmitted by the network device
2343 * hardware queue over the physical medium, @bytes must exactly match the
2344 * @bytes amount passed to netdev_sent_queue()
2345 */
2346 static inline void netdev_completed_queue(struct net_device *dev,
2347 unsigned int pkts, unsigned int bytes)
2348 {
2349 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
2350 }
2351
2352 static inline void netdev_tx_reset_queue(struct netdev_queue *q)
2353 {
2354 #ifdef CONFIG_BQL
2355 clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
2356 dql_reset(&q->dql);
2357 #endif
2358 }
2359
2360 /**
2361 * netdev_reset_queue - reset the packets and bytes count of a network device
2362 * @dev_queue: network device
2363 *
2364 * Reset the bytes and packet count of a network device and clear the
2365 * software flow control OFF bit for this network device
2366 */
2367 static inline void netdev_reset_queue(struct net_device *dev_queue)
2368 {
2369 netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
2370 }
2371
2372 /**
2373 * netdev_cap_txqueue - check if selected tx queue exceeds device queues
2374 * @dev: network device
2375 * @queue_index: given tx queue index
2376 *
2377 * Returns 0 if given tx queue index >= number of device tx queues,
2378 * otherwise returns the originally passed tx queue index.
2379 */
2380 static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index)
2381 {
2382 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
2383 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
2384 dev->name, queue_index,
2385 dev->real_num_tx_queues);
2386 return 0;
2387 }
2388
2389 return queue_index;
2390 }
2391
2392 /**
2393 * netif_running - test if up
2394 * @dev: network device
2395 *
2396 * Test if the device has been brought up.
2397 */
2398 static inline bool netif_running(const struct net_device *dev)
2399 {
2400 return test_bit(__LINK_STATE_START, &dev->state);
2401 }
2402
2403 /*
2404 * Routines to manage the subqueues on a device. We only need start
2405 * stop, and a check if it's stopped. All other device management is
2406 * done at the overall netdevice level.
2407 * Also test the device if we're multiqueue.
2408 */
2409
2410 /**
2411 * netif_start_subqueue - allow sending packets on subqueue
2412 * @dev: network device
2413 * @queue_index: sub queue index
2414 *
2415 * Start individual transmit queue of a device with multiple transmit queues.
2416 */
2417 static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
2418 {
2419 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2420
2421 netif_tx_start_queue(txq);
2422 }
2423
2424 /**
2425 * netif_stop_subqueue - stop sending packets on subqueue
2426 * @dev: network device
2427 * @queue_index: sub queue index
2428 *
2429 * Stop individual transmit queue of a device with multiple transmit queues.
2430 */
2431 static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
2432 {
2433 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2434 netif_tx_stop_queue(txq);
2435 }
2436
2437 /**
2438 * netif_subqueue_stopped - test status of subqueue
2439 * @dev: network device
2440 * @queue_index: sub queue index
2441 *
2442 * Check individual transmit queue of a device with multiple transmit queues.
2443 */
2444 static inline bool __netif_subqueue_stopped(const struct net_device *dev,
2445 u16 queue_index)
2446 {
2447 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2448
2449 return netif_tx_queue_stopped(txq);
2450 }
2451
2452 static inline bool netif_subqueue_stopped(const struct net_device *dev,
2453 struct sk_buff *skb)
2454 {
2455 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
2456 }
2457
2458 /**
2459 * netif_wake_subqueue - allow sending packets on subqueue
2460 * @dev: network device
2461 * @queue_index: sub queue index
2462 *
2463 * Resume individual transmit queue of a device with multiple transmit queues.
2464 */
2465 static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
2466 {
2467 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2468 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state))
2469 __netif_schedule(txq->qdisc);
2470 }
2471
2472 #ifdef CONFIG_XPS
2473 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2474 u16 index);
2475 #else
2476 static inline int netif_set_xps_queue(struct net_device *dev,
2477 const struct cpumask *mask,
2478 u16 index)
2479 {
2480 return 0;
2481 }
2482 #endif
2483
2484 /*
2485 * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used
2486 * as a distribution range limit for the returned value.
2487 */
2488 static inline u16 skb_tx_hash(const struct net_device *dev,
2489 const struct sk_buff *skb)
2490 {
2491 return __skb_tx_hash(dev, skb, dev->real_num_tx_queues);
2492 }
2493
2494 /**
2495 * netif_is_multiqueue - test if device has multiple transmit queues
2496 * @dev: network device
2497 *
2498 * Check if device has multiple transmit queues
2499 */
2500 static inline bool netif_is_multiqueue(const struct net_device *dev)
2501 {
2502 return dev->num_tx_queues > 1;
2503 }
2504
2505 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
2506
2507 #ifdef CONFIG_SYSFS
2508 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
2509 #else
2510 static inline int netif_set_real_num_rx_queues(struct net_device *dev,
2511 unsigned int rxq)
2512 {
2513 return 0;
2514 }
2515 #endif
2516
2517 static inline int netif_copy_real_num_queues(struct net_device *to_dev,
2518 const struct net_device *from_dev)
2519 {
2520 int err;
2521
2522 err = netif_set_real_num_tx_queues(to_dev,
2523 from_dev->real_num_tx_queues);
2524 if (err)
2525 return err;
2526 #ifdef CONFIG_SYSFS
2527 return netif_set_real_num_rx_queues(to_dev,
2528 from_dev->real_num_rx_queues);
2529 #else
2530 return 0;
2531 #endif
2532 }
2533
2534 #ifdef CONFIG_SYSFS
2535 static inline unsigned int get_netdev_rx_queue_index(
2536 struct netdev_rx_queue *queue)
2537 {
2538 struct net_device *dev = queue->dev;
2539 int index = queue - dev->_rx;
2540
2541 BUG_ON(index >= dev->num_rx_queues);
2542 return index;
2543 }
2544 #endif
2545
2546 #define DEFAULT_MAX_NUM_RSS_QUEUES (8)
2547 int netif_get_num_default_rss_queues(void);
2548
2549 enum skb_free_reason {
2550 SKB_REASON_CONSUMED,
2551 SKB_REASON_DROPPED,
2552 };
2553
2554 void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason);
2555 void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason);
2556
2557 /*
2558 * It is not allowed to call kfree_skb() or consume_skb() from hardware
2559 * interrupt context or with hardware interrupts being disabled.
2560 * (in_irq() || irqs_disabled())
2561 *
2562 * We provide four helpers that can be used in following contexts :
2563 *
2564 * dev_kfree_skb_irq(skb) when caller drops a packet from irq context,
2565 * replacing kfree_skb(skb)
2566 *
2567 * dev_consume_skb_irq(skb) when caller consumes a packet from irq context.
2568 * Typically used in place of consume_skb(skb) in TX completion path
2569 *
2570 * dev_kfree_skb_any(skb) when caller doesn't know its current irq context,
2571 * replacing kfree_skb(skb)
2572 *
2573 * dev_consume_skb_any(skb) when caller doesn't know its current irq context,
2574 * and consumed a packet. Used in place of consume_skb(skb)
2575 */
2576 static inline void dev_kfree_skb_irq(struct sk_buff *skb)
2577 {
2578 __dev_kfree_skb_irq(skb, SKB_REASON_DROPPED);
2579 }
2580
2581 static inline void dev_consume_skb_irq(struct sk_buff *skb)
2582 {
2583 __dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED);
2584 }
2585
2586 static inline void dev_kfree_skb_any(struct sk_buff *skb)
2587 {
2588 __dev_kfree_skb_any(skb, SKB_REASON_DROPPED);
2589 }
2590
2591 static inline void dev_consume_skb_any(struct sk_buff *skb)
2592 {
2593 __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED);
2594 }
2595
2596 int netif_rx(struct sk_buff *skb);
2597 int netif_rx_ni(struct sk_buff *skb);
2598 int netif_receive_skb(struct sk_buff *skb);
2599 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
2600 void napi_gro_flush(struct napi_struct *napi, bool flush_old);
2601 struct sk_buff *napi_get_frags(struct napi_struct *napi);
2602 gro_result_t napi_gro_frags(struct napi_struct *napi);
2603 struct packet_offload *gro_find_receive_by_type(__be16 type);
2604 struct packet_offload *gro_find_complete_by_type(__be16 type);
2605
2606 static inline void napi_free_frags(struct napi_struct *napi)
2607 {
2608 kfree_skb(napi->skb);
2609 napi->skb = NULL;
2610 }
2611
2612 int netdev_rx_handler_register(struct net_device *dev,
2613 rx_handler_func_t *rx_handler,
2614 void *rx_handler_data);
2615 void netdev_rx_handler_unregister(struct net_device *dev);
2616
2617 bool dev_valid_name(const char *name);
2618 int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
2619 int dev_ethtool(struct net *net, struct ifreq *);
2620 unsigned int dev_get_flags(const struct net_device *);
2621 int __dev_change_flags(struct net_device *, unsigned int flags);
2622 int dev_change_flags(struct net_device *, unsigned int);
2623 void __dev_notify_flags(struct net_device *, unsigned int old_flags,
2624 unsigned int gchanges);
2625 int dev_change_name(struct net_device *, const char *);
2626 int dev_set_alias(struct net_device *, const char *, size_t);
2627 int dev_change_net_namespace(struct net_device *, struct net *, const char *);
2628 int dev_set_mtu(struct net_device *, int);
2629 void dev_set_group(struct net_device *, int);
2630 int dev_set_mac_address(struct net_device *, struct sockaddr *);
2631 int dev_change_carrier(struct net_device *, bool new_carrier);
2632 int dev_get_phys_port_id(struct net_device *dev,
2633 struct netdev_phys_port_id *ppid);
2634 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2635 struct netdev_queue *txq);
2636 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
2637 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
2638 bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb);
2639
2640 extern int netdev_budget;
2641
2642 /* Called by rtnetlink.c:rtnl_unlock() */
2643 void netdev_run_todo(void);
2644
2645 /**
2646 * dev_put - release reference to device
2647 * @dev: network device
2648 *
2649 * Release reference to device to allow it to be freed.
2650 */
2651 static inline void dev_put(struct net_device *dev)
2652 {
2653 this_cpu_dec(*dev->pcpu_refcnt);
2654 }
2655
2656 /**
2657 * dev_hold - get reference to device
2658 * @dev: network device
2659 *
2660 * Hold reference to device to keep it from being freed.
2661 */
2662 static inline void dev_hold(struct net_device *dev)
2663 {
2664 this_cpu_inc(*dev->pcpu_refcnt);
2665 }
2666
2667 /* Carrier loss detection, dial on demand. The functions netif_carrier_on
2668 * and _off may be called from IRQ context, but it is caller
2669 * who is responsible for serialization of these calls.
2670 *
2671 * The name carrier is inappropriate, these functions should really be
2672 * called netif_lowerlayer_*() because they represent the state of any
2673 * kind of lower layer not just hardware media.
2674 */
2675
2676 void linkwatch_init_dev(struct net_device *dev);
2677 void linkwatch_fire_event(struct net_device *dev);
2678 void linkwatch_forget_dev(struct net_device *dev);
2679
2680 /**
2681 * netif_carrier_ok - test if carrier present
2682 * @dev: network device
2683 *
2684 * Check if carrier is present on device
2685 */
2686 static inline bool netif_carrier_ok(const struct net_device *dev)
2687 {
2688 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
2689 }
2690
2691 unsigned long dev_trans_start(struct net_device *dev);
2692
2693 void __netdev_watchdog_up(struct net_device *dev);
2694
2695 void netif_carrier_on(struct net_device *dev);
2696
2697 void netif_carrier_off(struct net_device *dev);
2698
2699 /**
2700 * netif_dormant_on - mark device as dormant.
2701 * @dev: network device
2702 *
2703 * Mark device as dormant (as per RFC2863).
2704 *
2705 * The dormant state indicates that the relevant interface is not
2706 * actually in a condition to pass packets (i.e., it is not 'up') but is
2707 * in a "pending" state, waiting for some external event. For "on-
2708 * demand" interfaces, this new state identifies the situation where the
2709 * interface is waiting for events to place it in the up state.
2710 *
2711 */
2712 static inline void netif_dormant_on(struct net_device *dev)
2713 {
2714 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
2715 linkwatch_fire_event(dev);
2716 }
2717
2718 /**
2719 * netif_dormant_off - set device as not dormant.
2720 * @dev: network device
2721 *
2722 * Device is not in dormant state.
2723 */
2724 static inline void netif_dormant_off(struct net_device *dev)
2725 {
2726 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
2727 linkwatch_fire_event(dev);
2728 }
2729
2730 /**
2731 * netif_dormant - test if carrier present
2732 * @dev: network device
2733 *
2734 * Check if carrier is present on device
2735 */
2736 static inline bool netif_dormant(const struct net_device *dev)
2737 {
2738 return test_bit(__LINK_STATE_DORMANT, &dev->state);
2739 }
2740
2741
2742 /**
2743 * netif_oper_up - test if device is operational
2744 * @dev: network device
2745 *
2746 * Check if carrier is operational
2747 */
2748 static inline bool netif_oper_up(const struct net_device *dev)
2749 {
2750 return (dev->operstate == IF_OPER_UP ||
2751 dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
2752 }
2753
2754 /**
2755 * netif_device_present - is device available or removed
2756 * @dev: network device
2757 *
2758 * Check if device has not been removed from system.
2759 */
2760 static inline bool netif_device_present(struct net_device *dev)
2761 {
2762 return test_bit(__LINK_STATE_PRESENT, &dev->state);
2763 }
2764
2765 void netif_device_detach(struct net_device *dev);
2766
2767 void netif_device_attach(struct net_device *dev);
2768
2769 /*
2770 * Network interface message level settings
2771 */
2772
2773 enum {
2774 NETIF_MSG_DRV = 0x0001,
2775 NETIF_MSG_PROBE = 0x0002,
2776 NETIF_MSG_LINK = 0x0004,
2777 NETIF_MSG_TIMER = 0x0008,
2778 NETIF_MSG_IFDOWN = 0x0010,
2779 NETIF_MSG_IFUP = 0x0020,
2780 NETIF_MSG_RX_ERR = 0x0040,
2781 NETIF_MSG_TX_ERR = 0x0080,
2782 NETIF_MSG_TX_QUEUED = 0x0100,
2783 NETIF_MSG_INTR = 0x0200,
2784 NETIF_MSG_TX_DONE = 0x0400,
2785 NETIF_MSG_RX_STATUS = 0x0800,
2786 NETIF_MSG_PKTDATA = 0x1000,
2787 NETIF_MSG_HW = 0x2000,
2788 NETIF_MSG_WOL = 0x4000,
2789 };
2790
2791 #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
2792 #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
2793 #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
2794 #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
2795 #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
2796 #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
2797 #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
2798 #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
2799 #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
2800 #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
2801 #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
2802 #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
2803 #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
2804 #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
2805 #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
2806
2807 static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
2808 {
2809 /* use default */
2810 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
2811 return default_msg_enable_bits;
2812 if (debug_value == 0) /* no output */
2813 return 0;
2814 /* set low N bits */
2815 return (1 << debug_value) - 1;
2816 }
2817
2818 static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
2819 {
2820 spin_lock(&txq->_xmit_lock);
2821 txq->xmit_lock_owner = cpu;
2822 }
2823
2824 static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
2825 {
2826 spin_lock_bh(&txq->_xmit_lock);
2827 txq->xmit_lock_owner = smp_processor_id();
2828 }
2829
2830 static inline bool __netif_tx_trylock(struct netdev_queue *txq)
2831 {
2832 bool ok = spin_trylock(&txq->_xmit_lock);
2833 if (likely(ok))
2834 txq->xmit_lock_owner = smp_processor_id();
2835 return ok;
2836 }
2837
2838 static inline void __netif_tx_unlock(struct netdev_queue *txq)
2839 {
2840 txq->xmit_lock_owner = -1;
2841 spin_unlock(&txq->_xmit_lock);
2842 }
2843
2844 static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
2845 {
2846 txq->xmit_lock_owner = -1;
2847 spin_unlock_bh(&txq->_xmit_lock);
2848 }
2849
2850 static inline void txq_trans_update(struct netdev_queue *txq)
2851 {
2852 if (txq->xmit_lock_owner != -1)
2853 txq->trans_start = jiffies;
2854 }
2855
2856 /**
2857 * netif_tx_lock - grab network device transmit lock
2858 * @dev: network device
2859 *
2860 * Get network device transmit lock
2861 */
2862 static inline void netif_tx_lock(struct net_device *dev)
2863 {
2864 unsigned int i;
2865 int cpu;
2866
2867 spin_lock(&dev->tx_global_lock);
2868 cpu = smp_processor_id();
2869 for (i = 0; i < dev->num_tx_queues; i++) {
2870 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2871
2872 /* We are the only thread of execution doing a
2873 * freeze, but we have to grab the _xmit_lock in
2874 * order to synchronize with threads which are in
2875 * the ->hard_start_xmit() handler and already
2876 * checked the frozen bit.
2877 */
2878 __netif_tx_lock(txq, cpu);
2879 set_bit(__QUEUE_STATE_FROZEN, &txq->state);
2880 __netif_tx_unlock(txq);
2881 }
2882 }
2883
2884 static inline void netif_tx_lock_bh(struct net_device *dev)
2885 {
2886 local_bh_disable();
2887 netif_tx_lock(dev);
2888 }
2889
2890 static inline void netif_tx_unlock(struct net_device *dev)
2891 {
2892 unsigned int i;
2893
2894 for (i = 0; i < dev->num_tx_queues; i++) {
2895 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2896
2897 /* No need to grab the _xmit_lock here. If the
2898 * queue is not stopped for another reason, we
2899 * force a schedule.
2900 */
2901 clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
2902 netif_schedule_queue(txq);
2903 }
2904 spin_unlock(&dev->tx_global_lock);
2905 }
2906
2907 static inline void netif_tx_unlock_bh(struct net_device *dev)
2908 {
2909 netif_tx_unlock(dev);
2910 local_bh_enable();
2911 }
2912
2913 #define HARD_TX_LOCK(dev, txq, cpu) { \
2914 if ((dev->features & NETIF_F_LLTX) == 0) { \
2915 __netif_tx_lock(txq, cpu); \
2916 } \
2917 }
2918
2919 #define HARD_TX_TRYLOCK(dev, txq) \
2920 (((dev->features & NETIF_F_LLTX) == 0) ? \
2921 __netif_tx_trylock(txq) : \
2922 true )
2923
2924 #define HARD_TX_UNLOCK(dev, txq) { \
2925 if ((dev->features & NETIF_F_LLTX) == 0) { \
2926 __netif_tx_unlock(txq); \
2927 } \
2928 }
2929
2930 static inline void netif_tx_disable(struct net_device *dev)
2931 {
2932 unsigned int i;
2933 int cpu;
2934
2935 local_bh_disable();
2936 cpu = smp_processor_id();
2937 for (i = 0; i < dev->num_tx_queues; i++) {
2938 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2939
2940 __netif_tx_lock(txq, cpu);
2941 netif_tx_stop_queue(txq);
2942 __netif_tx_unlock(txq);
2943 }
2944 local_bh_enable();
2945 }
2946
2947 static inline void netif_addr_lock(struct net_device *dev)
2948 {
2949 spin_lock(&dev->addr_list_lock);
2950 }
2951
2952 static inline void netif_addr_lock_nested(struct net_device *dev)
2953 {
2954 int subclass = SINGLE_DEPTH_NESTING;
2955
2956 if (dev->netdev_ops->ndo_get_lock_subclass)
2957 subclass = dev->netdev_ops->ndo_get_lock_subclass(dev);
2958
2959 spin_lock_nested(&dev->addr_list_lock, subclass);
2960 }
2961
2962 static inline void netif_addr_lock_bh(struct net_device *dev)
2963 {
2964 spin_lock_bh(&dev->addr_list_lock);
2965 }
2966
2967 static inline void netif_addr_unlock(struct net_device *dev)
2968 {
2969 spin_unlock(&dev->addr_list_lock);
2970 }
2971
2972 static inline void netif_addr_unlock_bh(struct net_device *dev)
2973 {
2974 spin_unlock_bh(&dev->addr_list_lock);
2975 }
2976
2977 /*
2978 * dev_addrs walker. Should be used only for read access. Call with
2979 * rcu_read_lock held.
2980 */
2981 #define for_each_dev_addr(dev, ha) \
2982 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
2983
2984 /* These functions live elsewhere (drivers/net/net_init.c, but related) */
2985
2986 void ether_setup(struct net_device *dev);
2987
2988 /* Support for loadable net-drivers */
2989 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
2990 void (*setup)(struct net_device *),
2991 unsigned int txqs, unsigned int rxqs);
2992 #define alloc_netdev(sizeof_priv, name, setup) \
2993 alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1)
2994
2995 #define alloc_netdev_mq(sizeof_priv, name, setup, count) \
2996 alloc_netdev_mqs(sizeof_priv, name, setup, count, count)
2997
2998 int register_netdev(struct net_device *dev);
2999 void unregister_netdev(struct net_device *dev);
3000
3001 /* General hardware address lists handling functions */
3002 int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
3003 struct netdev_hw_addr_list *from_list, int addr_len);
3004 void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
3005 struct netdev_hw_addr_list *from_list, int addr_len);
3006 int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
3007 struct net_device *dev,
3008 int (*sync)(struct net_device *, const unsigned char *),
3009 int (*unsync)(struct net_device *,
3010 const unsigned char *));
3011 void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
3012 struct net_device *dev,
3013 int (*unsync)(struct net_device *,
3014 const unsigned char *));
3015 void __hw_addr_init(struct netdev_hw_addr_list *list);
3016
3017 /* Functions used for device addresses handling */
3018 int dev_addr_add(struct net_device *dev, const unsigned char *addr,
3019 unsigned char addr_type);
3020 int dev_addr_del(struct net_device *dev, const unsigned char *addr,
3021 unsigned char addr_type);
3022 void dev_addr_flush(struct net_device *dev);
3023 int dev_addr_init(struct net_device *dev);
3024
3025 /* Functions used for unicast addresses handling */
3026 int dev_uc_add(struct net_device *dev, const unsigned char *addr);
3027 int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
3028 int dev_uc_del(struct net_device *dev, const unsigned char *addr);
3029 int dev_uc_sync(struct net_device *to, struct net_device *from);
3030 int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
3031 void dev_uc_unsync(struct net_device *to, struct net_device *from);
3032 void dev_uc_flush(struct net_device *dev);
3033 void dev_uc_init(struct net_device *dev);
3034
3035 /**
3036 * __dev_uc_sync - Synchonize device's unicast list
3037 * @dev: device to sync
3038 * @sync: function to call if address should be added
3039 * @unsync: function to call if address should be removed
3040 *
3041 * Add newly added addresses to the interface, and release
3042 * addresses that have been deleted.
3043 **/
3044 static inline int __dev_uc_sync(struct net_device *dev,
3045 int (*sync)(struct net_device *,
3046 const unsigned char *),
3047 int (*unsync)(struct net_device *,
3048 const unsigned char *))
3049 {
3050 return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync);
3051 }
3052
3053 /**
3054 * __dev_uc_unsync - Remove synchonized addresses from device
3055 * @dev: device to sync
3056 * @unsync: function to call if address should be removed
3057 *
3058 * Remove all addresses that were added to the device by dev_uc_sync().
3059 **/
3060 static inline void __dev_uc_unsync(struct net_device *dev,
3061 int (*unsync)(struct net_device *,
3062 const unsigned char *))
3063 {
3064 __hw_addr_unsync_dev(&dev->uc, dev, unsync);
3065 }
3066
3067 /* Functions used for multicast addresses handling */
3068 int dev_mc_add(struct net_device *dev, const unsigned char *addr);
3069 int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
3070 int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
3071 int dev_mc_del(struct net_device *dev, const unsigned char *addr);
3072 int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
3073 int dev_mc_sync(struct net_device *to, struct net_device *from);
3074 int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
3075 void dev_mc_unsync(struct net_device *to, struct net_device *from);
3076 void dev_mc_flush(struct net_device *dev);
3077 void dev_mc_init(struct net_device *dev);
3078
3079 /**
3080 * __dev_mc_sync - Synchonize device's multicast list
3081 * @dev: device to sync
3082 * @sync: function to call if address should be added
3083 * @unsync: function to call if address should be removed
3084 *
3085 * Add newly added addresses to the interface, and release
3086 * addresses that have been deleted.
3087 **/
3088 static inline int __dev_mc_sync(struct net_device *dev,
3089 int (*sync)(struct net_device *,
3090 const unsigned char *),
3091 int (*unsync)(struct net_device *,
3092 const unsigned char *))
3093 {
3094 return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync);
3095 }
3096
3097 /**
3098 * __dev_mc_unsync - Remove synchonized addresses from device
3099 * @dev: device to sync
3100 * @unsync: function to call if address should be removed
3101 *
3102 * Remove all addresses that were added to the device by dev_mc_sync().
3103 **/
3104 static inline void __dev_mc_unsync(struct net_device *dev,
3105 int (*unsync)(struct net_device *,
3106 const unsigned char *))
3107 {
3108 __hw_addr_unsync_dev(&dev->mc, dev, unsync);
3109 }
3110
3111 /* Functions used for secondary unicast and multicast support */
3112 void dev_set_rx_mode(struct net_device *dev);
3113 void __dev_set_rx_mode(struct net_device *dev);
3114 int dev_set_promiscuity(struct net_device *dev, int inc);
3115 int dev_set_allmulti(struct net_device *dev, int inc);
3116 void netdev_state_change(struct net_device *dev);
3117 void netdev_notify_peers(struct net_device *dev);
3118 void netdev_features_change(struct net_device *dev);
3119 /* Load a device via the kmod */
3120 void dev_load(struct net *net, const char *name);
3121 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
3122 struct rtnl_link_stats64 *storage);
3123 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
3124 const struct net_device_stats *netdev_stats);
3125
3126 extern int netdev_max_backlog;
3127 extern int netdev_tstamp_prequeue;
3128 extern int weight_p;
3129 extern int bpf_jit_enable;
3130
3131 bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
3132 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
3133 struct list_head **iter);
3134 struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
3135 struct list_head **iter);
3136
3137 /* iterate through upper list, must be called under RCU read lock */
3138 #define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
3139 for (iter = &(dev)->adj_list.upper, \
3140 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
3141 updev; \
3142 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
3143
3144 /* iterate through upper list, must be called under RCU read lock */
3145 #define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \
3146 for (iter = &(dev)->all_adj_list.upper, \
3147 updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)); \
3148 updev; \
3149 updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)))
3150
3151 void *netdev_lower_get_next_private(struct net_device *dev,
3152 struct list_head **iter);
3153 void *netdev_lower_get_next_private_rcu(struct net_device *dev,
3154 struct list_head **iter);
3155
3156 #define netdev_for_each_lower_private(dev, priv, iter) \
3157 for (iter = (dev)->adj_list.lower.next, \
3158 priv = netdev_lower_get_next_private(dev, &(iter)); \
3159 priv; \
3160 priv = netdev_lower_get_next_private(dev, &(iter)))
3161
3162 #define netdev_for_each_lower_private_rcu(dev, priv, iter) \
3163 for (iter = &(dev)->adj_list.lower, \
3164 priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \
3165 priv; \
3166 priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
3167
3168 void *netdev_lower_get_next(struct net_device *dev,
3169 struct list_head **iter);
3170 #define netdev_for_each_lower_dev(dev, ldev, iter) \
3171 for (iter = &(dev)->adj_list.lower, \
3172 ldev = netdev_lower_get_next(dev, &(iter)); \
3173 ldev; \
3174 ldev = netdev_lower_get_next(dev, &(iter)))
3175
3176 void *netdev_adjacent_get_private(struct list_head *adj_list);
3177 void *netdev_lower_get_first_private_rcu(struct net_device *dev);
3178 struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
3179 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
3180 int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev);
3181 int netdev_master_upper_dev_link(struct net_device *dev,
3182 struct net_device *upper_dev);
3183 int netdev_master_upper_dev_link_private(struct net_device *dev,
3184 struct net_device *upper_dev,
3185 void *private);
3186 void netdev_upper_dev_unlink(struct net_device *dev,
3187 struct net_device *upper_dev);
3188 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
3189 void *netdev_lower_dev_get_private(struct net_device *dev,
3190 struct net_device *lower_dev);
3191 int dev_get_nest_level(struct net_device *dev,
3192 bool (*type_check)(struct net_device *dev));
3193 int skb_checksum_help(struct sk_buff *skb);
3194 struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
3195 netdev_features_t features, bool tx_path);
3196 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
3197 netdev_features_t features);
3198
3199 static inline
3200 struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
3201 {
3202 return __skb_gso_segment(skb, features, true);
3203 }
3204 __be16 skb_network_protocol(struct sk_buff *skb, int *depth);
3205
3206 static inline bool can_checksum_protocol(netdev_features_t features,
3207 __be16 protocol)
3208 {
3209 return ((features & NETIF_F_GEN_CSUM) ||
3210 ((features & NETIF_F_V4_CSUM) &&
3211 protocol == htons(ETH_P_IP)) ||
3212 ((features & NETIF_F_V6_CSUM) &&
3213 protocol == htons(ETH_P_IPV6)) ||
3214 ((features & NETIF_F_FCOE_CRC) &&
3215 protocol == htons(ETH_P_FCOE)));
3216 }
3217
3218 #ifdef CONFIG_BUG
3219 void netdev_rx_csum_fault(struct net_device *dev);
3220 #else
3221 static inline void netdev_rx_csum_fault(struct net_device *dev)
3222 {
3223 }
3224 #endif
3225 /* rx skb timestamps */
3226 void net_enable_timestamp(void);
3227 void net_disable_timestamp(void);
3228
3229 #ifdef CONFIG_PROC_FS
3230 int __init dev_proc_init(void);
3231 #else
3232 #define dev_proc_init() 0
3233 #endif
3234
3235 int netdev_class_create_file_ns(struct class_attribute *class_attr,
3236 const void *ns);
3237 void netdev_class_remove_file_ns(struct class_attribute *class_attr,
3238 const void *ns);
3239
3240 static inline int netdev_class_create_file(struct class_attribute *class_attr)
3241 {
3242 return netdev_class_create_file_ns(class_attr, NULL);
3243 }
3244
3245 static inline void netdev_class_remove_file(struct class_attribute *class_attr)
3246 {
3247 netdev_class_remove_file_ns(class_attr, NULL);
3248 }
3249
3250 extern struct kobj_ns_type_operations net_ns_type_operations;
3251
3252 const char *netdev_drivername(const struct net_device *dev);
3253
3254 void linkwatch_run_queue(void);
3255
3256 static inline netdev_features_t netdev_intersect_features(netdev_features_t f1,
3257 netdev_features_t f2)
3258 {
3259 if (f1 & NETIF_F_GEN_CSUM)
3260 f1 |= (NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
3261 if (f2 & NETIF_F_GEN_CSUM)
3262 f2 |= (NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
3263 f1 &= f2;
3264 if (f1 & NETIF_F_GEN_CSUM)
3265 f1 &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
3266
3267 return f1;
3268 }
3269
3270 static inline netdev_features_t netdev_get_wanted_features(
3271 struct net_device *dev)
3272 {
3273 return (dev->features & ~dev->hw_features) | dev->wanted_features;
3274 }
3275 netdev_features_t netdev_increment_features(netdev_features_t all,
3276 netdev_features_t one, netdev_features_t mask);
3277
3278 /* Allow TSO being used on stacked device :
3279 * Performing the GSO segmentation before last device
3280 * is a performance improvement.
3281 */
3282 static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
3283 netdev_features_t mask)
3284 {
3285 return netdev_increment_features(features, NETIF_F_ALL_TSO, mask);
3286 }
3287
3288 int __netdev_update_features(struct net_device *dev);
3289 void netdev_update_features(struct net_device *dev);
3290 void netdev_change_features(struct net_device *dev);
3291
3292 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
3293 struct net_device *dev);
3294
3295 netdev_features_t netif_skb_features(struct sk_buff *skb);
3296
3297 static inline bool net_gso_ok(netdev_features_t features, int gso_type)
3298 {
3299 netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT;
3300
3301 /* check flags correspondence */
3302 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
3303 BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT));
3304 BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
3305 BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
3306 BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
3307 BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
3308 BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT));
3309 BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT));
3310 BUILD_BUG_ON(SKB_GSO_IPIP != (NETIF_F_GSO_IPIP >> NETIF_F_GSO_SHIFT));
3311 BUILD_BUG_ON(SKB_GSO_SIT != (NETIF_F_GSO_SIT >> NETIF_F_GSO_SHIFT));
3312 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT));
3313 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
3314 BUILD_BUG_ON(SKB_GSO_MPLS != (NETIF_F_GSO_MPLS >> NETIF_F_GSO_SHIFT));
3315
3316 return (features & feature) == feature;
3317 }
3318
3319 static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
3320 {
3321 return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
3322 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
3323 }
3324
3325 static inline bool netif_needs_gso(struct sk_buff *skb,
3326 netdev_features_t features)
3327 {
3328 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
3329 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
3330 (skb->ip_summed != CHECKSUM_UNNECESSARY)));
3331 }
3332
3333 static inline void netif_set_gso_max_size(struct net_device *dev,
3334 unsigned int size)
3335 {
3336 dev->gso_max_size = size;
3337 }
3338
3339 static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
3340 int pulled_hlen, u16 mac_offset,
3341 int mac_len)
3342 {
3343 skb->protocol = protocol;
3344 skb->encapsulation = 1;
3345 skb_push(skb, pulled_hlen);
3346 skb_reset_transport_header(skb);
3347 skb->mac_header = mac_offset;
3348 skb->network_header = skb->mac_header + mac_len;
3349 skb->mac_len = mac_len;
3350 }
3351
3352 static inline bool netif_is_macvlan(struct net_device *dev)
3353 {
3354 return dev->priv_flags & IFF_MACVLAN;
3355 }
3356
3357 static inline bool netif_is_bond_master(struct net_device *dev)
3358 {
3359 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
3360 }
3361
3362 static inline bool netif_is_bond_slave(struct net_device *dev)
3363 {
3364 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
3365 }
3366
3367 static inline bool netif_supports_nofcs(struct net_device *dev)
3368 {
3369 return dev->priv_flags & IFF_SUPP_NOFCS;
3370 }
3371
3372 extern struct pernet_operations __net_initdata loopback_net_ops;
3373
3374 /* Logging, debugging and troubleshooting/diagnostic helpers. */
3375
3376 /* netdev_printk helpers, similar to dev_printk */
3377
3378 static inline const char *netdev_name(const struct net_device *dev)
3379 {
3380 if (dev->reg_state != NETREG_REGISTERED)
3381 return "(unregistered net_device)";
3382 return dev->name;
3383 }
3384
3385 __printf(3, 4)
3386 int netdev_printk(const char *level, const struct net_device *dev,
3387 const char *format, ...);
3388 __printf(2, 3)
3389 int netdev_emerg(const struct net_device *dev, const char *format, ...);
3390 __printf(2, 3)
3391 int netdev_alert(const struct net_device *dev, const char *format, ...);
3392 __printf(2, 3)
3393 int netdev_crit(const struct net_device *dev, const char *format, ...);
3394 __printf(2, 3)
3395 int netdev_err(const struct net_device *dev, const char *format, ...);
3396 __printf(2, 3)
3397 int netdev_warn(const struct net_device *dev, const char *format, ...);
3398 __printf(2, 3)
3399 int netdev_notice(const struct net_device *dev, const char *format, ...);
3400 __printf(2, 3)
3401 int netdev_info(const struct net_device *dev, const char *format, ...);
3402
3403 #define MODULE_ALIAS_NETDEV(device) \
3404 MODULE_ALIAS("netdev-" device)
3405
3406 #if defined(CONFIG_DYNAMIC_DEBUG)
3407 #define netdev_dbg(__dev, format, args...) \
3408 do { \
3409 dynamic_netdev_dbg(__dev, format, ##args); \
3410 } while (0)
3411 #elif defined(DEBUG)
3412 #define netdev_dbg(__dev, format, args...) \
3413 netdev_printk(KERN_DEBUG, __dev, format, ##args)
3414 #else
3415 #define netdev_dbg(__dev, format, args...) \
3416 ({ \
3417 if (0) \
3418 netdev_printk(KERN_DEBUG, __dev, format, ##args); \
3419 0; \
3420 })
3421 #endif
3422
3423 #if defined(VERBOSE_DEBUG)
3424 #define netdev_vdbg netdev_dbg
3425 #else
3426
3427 #define netdev_vdbg(dev, format, args...) \
3428 ({ \
3429 if (0) \
3430 netdev_printk(KERN_DEBUG, dev, format, ##args); \
3431 0; \
3432 })
3433 #endif
3434
3435 /*
3436 * netdev_WARN() acts like dev_printk(), but with the key difference
3437 * of using a WARN/WARN_ON to get the message out, including the
3438 * file/line information and a backtrace.
3439 */
3440 #define netdev_WARN(dev, format, args...) \
3441 WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args)
3442
3443 /* netif printk helpers, similar to netdev_printk */
3444
3445 #define netif_printk(priv, type, level, dev, fmt, args...) \
3446 do { \
3447 if (netif_msg_##type(priv)) \
3448 netdev_printk(level, (dev), fmt, ##args); \
3449 } while (0)
3450
3451 #define netif_level(level, priv, type, dev, fmt, args...) \
3452 do { \
3453 if (netif_msg_##type(priv)) \
3454 netdev_##level(dev, fmt, ##args); \
3455 } while (0)
3456
3457 #define netif_emerg(priv, type, dev, fmt, args...) \
3458 netif_level(emerg, priv, type, dev, fmt, ##args)
3459 #define netif_alert(priv, type, dev, fmt, args...) \
3460 netif_level(alert, priv, type, dev, fmt, ##args)
3461 #define netif_crit(priv, type, dev, fmt, args...) \
3462 netif_level(crit, priv, type, dev, fmt, ##args)
3463 #define netif_err(priv, type, dev, fmt, args...) \
3464 netif_level(err, priv, type, dev, fmt, ##args)
3465 #define netif_warn(priv, type, dev, fmt, args...) \
3466 netif_level(warn, priv, type, dev, fmt, ##args)
3467 #define netif_notice(priv, type, dev, fmt, args...) \
3468 netif_level(notice, priv, type, dev, fmt, ##args)
3469 #define netif_info(priv, type, dev, fmt, args...) \
3470 netif_level(info, priv, type, dev, fmt, ##args)
3471
3472 #if defined(CONFIG_DYNAMIC_DEBUG)
3473 #define netif_dbg(priv, type, netdev, format, args...) \
3474 do { \
3475 if (netif_msg_##type(priv)) \
3476 dynamic_netdev_dbg(netdev, format, ##args); \
3477 } while (0)
3478 #elif defined(DEBUG)
3479 #define netif_dbg(priv, type, dev, format, args...) \
3480 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
3481 #else
3482 #define netif_dbg(priv, type, dev, format, args...) \
3483 ({ \
3484 if (0) \
3485 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
3486 0; \
3487 })
3488 #endif
3489
3490 #if defined(VERBOSE_DEBUG)
3491 #define netif_vdbg netif_dbg
3492 #else
3493 #define netif_vdbg(priv, type, dev, format, args...) \
3494 ({ \
3495 if (0) \
3496 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
3497 0; \
3498 })
3499 #endif
3500
3501 /*
3502 * The list of packet types we will receive (as opposed to discard)
3503 * and the routines to invoke.
3504 *
3505 * Why 16. Because with 16 the only overlap we get on a hash of the
3506 * low nibble of the protocol value is RARP/SNAP/X.25.
3507 *
3508 * NOTE: That is no longer true with the addition of VLAN tags. Not
3509 * sure which should go first, but I bet it won't make much
3510 * difference if we are running VLANs. The good news is that
3511 * this protocol won't be in the list unless compiled in, so
3512 * the average user (w/out VLANs) will not be adversely affected.
3513 * --BLG
3514 *
3515 * 0800 IP
3516 * 8100 802.1Q VLAN
3517 * 0001 802.3
3518 * 0002 AX.25
3519 * 0004 802.2
3520 * 8035 RARP
3521 * 0005 SNAP
3522 * 0805 X.25
3523 * 0806 ARP
3524 * 8137 IPX
3525 * 0009 Localtalk
3526 * 86DD IPv6
3527 */
3528 #define PTYPE_HASH_SIZE (16)
3529 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
3530
3531 #endif /* _LINUX_NETDEVICE_H */ 1 #ifndef __LINUX_SPINLOCK_H
2 #define __LINUX_SPINLOCK_H
3
4 /*
5 * include/linux/spinlock.h - generic spinlock/rwlock declarations
6 *
7 * here's the role of the various spinlock/rwlock related include files:
8 *
9 * on SMP builds:
10 *
11 * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
12 * initializers
13 *
14 * linux/spinlock_types.h:
15 * defines the generic type and initializers
16 *
17 * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel
18 * implementations, mostly inline assembly code
19 *
20 * (also included on UP-debug builds:)
21 *
22 * linux/spinlock_api_smp.h:
23 * contains the prototypes for the _spin_*() APIs.
24 *
25 * linux/spinlock.h: builds the final spin_*() APIs.
26 *
27 * on UP builds:
28 *
29 * linux/spinlock_type_up.h:
30 * contains the generic, simplified UP spinlock type.
31 * (which is an empty structure on non-debug builds)
32 *
33 * linux/spinlock_types.h:
34 * defines the generic type and initializers
35 *
36 * linux/spinlock_up.h:
37 * contains the arch_spin_*()/etc. version of UP
38 * builds. (which are NOPs on non-debug, non-preempt
39 * builds)
40 *
41 * (included on UP-non-debug builds:)
42 *
43 * linux/spinlock_api_up.h:
44 * builds the _spin_*() APIs.
45 *
46 * linux/spinlock.h: builds the final spin_*() APIs.
47 */
48
49 #include <linux/typecheck.h>
50 #include <linux/preempt.h>
51 #include <linux/linkage.h>
52 #include <linux/compiler.h>
53 #include <linux/irqflags.h>
54 #include <linux/thread_info.h>
55 #include <linux/kernel.h>
56 #include <linux/stringify.h>
57 #include <linux/bottom_half.h>
58 #include <asm/barrier.h>
59
60
61 /*
62 * Must define these before including other files, inline functions need them
63 */
64 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
65
66 #define LOCK_SECTION_START(extra) \
67 ".subsection 1\n\t" \
68 extra \
69 ".ifndef " LOCK_SECTION_NAME "\n\t" \
70 LOCK_SECTION_NAME ":\n\t" \
71 ".endif\n"
72
73 #define LOCK_SECTION_END \
74 ".previous\n\t"
75
76 #define __lockfunc __attribute__((section(".spinlock.text")))
77
78 /*
79 * Pull the arch_spinlock_t and arch_rwlock_t definitions:
80 */
81 #include <linux/spinlock_types.h>
82
83 /*
84 * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
85 */
86 #ifdef CONFIG_SMP
87 # include <asm/spinlock.h>
88 #else
89 # include <linux/spinlock_up.h>
90 #endif
91
92 #ifdef CONFIG_DEBUG_SPINLOCK
93 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
94 struct lock_class_key *key);
95 # define raw_spin_lock_init(lock) \
96 do { \
97 static struct lock_class_key __key; \
98 \
99 __raw_spin_lock_init((lock), #lock, &__key); \
100 } while (0)
101
102 #else
103 # define raw_spin_lock_init(lock) \
104 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
105 #endif
106
107 #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
108
109 #ifdef CONFIG_GENERIC_LOCKBREAK
110 #define raw_spin_is_contended(lock) ((lock)->break_lock)
111 #else
112
113 #ifdef arch_spin_is_contended
114 #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
115 #else
116 #define raw_spin_is_contended(lock) (((void)(lock), 0))
117 #endif /*arch_spin_is_contended*/
118 #endif
119
120 /*
121 * Despite its name it doesn't necessarily has to be a full barrier.
122 * It should only guarantee that a STORE before the critical section
123 * can not be reordered with a LOAD inside this section.
124 * spin_lock() is the one-way barrier, this LOAD can not escape out
125 * of the region. So the default implementation simply ensures that
126 * a STORE can not move into the critical section, smp_wmb() should
127 * serialize it with another STORE done by spin_lock().
128 */
129 #ifndef smp_mb__before_spinlock
130 #define smp_mb__before_spinlock() smp_wmb()
131 #endif
132
133 /*
134 * Place this after a lock-acquisition primitive to guarantee that
135 * an UNLOCK+LOCK pair act as a full barrier. This guarantee applies
136 * if the UNLOCK and LOCK are executed by the same CPU or if the
137 * UNLOCK and LOCK operate on the same lock variable.
138 */
139 #ifndef smp_mb__after_unlock_lock
140 #define smp_mb__after_unlock_lock() do { } while (0)
141 #endif
142
143 /**
144 * raw_spin_unlock_wait - wait until the spinlock gets unlocked
145 * @lock: the spinlock in question.
146 */
147 #define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock)
148
149 #ifdef CONFIG_DEBUG_SPINLOCK
150 extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
151 #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
152 extern int do_raw_spin_trylock(raw_spinlock_t *lock);
153 extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
154 #else
155 static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
156 {
157 __acquire(lock);
158 arch_spin_lock(&lock->raw_lock);
159 }
160
161 static inline void
162 do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
163 {
164 __acquire(lock);
165 arch_spin_lock_flags(&lock->raw_lock, *flags);
166 }
167
168 static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
169 {
170 return arch_spin_trylock(&(lock)->raw_lock);
171 }
172
173 static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
174 {
175 arch_spin_unlock(&lock->raw_lock);
176 __release(lock);
177 }
178 #endif
179
180 /*
181 * Define the various spin_lock methods. Note we define these
182 * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
183 * various methods are defined as nops in the case they are not
184 * required.
185 */
186 #define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
187
188 #define raw_spin_lock(lock) _raw_spin_lock(lock)
189
190 #ifdef CONFIG_DEBUG_LOCK_ALLOC
191 # define raw_spin_lock_nested(lock, subclass) \
192 _raw_spin_lock_nested(lock, subclass)
193
194 # define raw_spin_lock_nest_lock(lock, nest_lock) \
195 do { \
196 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
197 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
198 } while (0)
199 #else
200 # define raw_spin_lock_nested(lock, subclass) _raw_spin_lock(lock)
201 # define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
202 #endif
203
204 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
205
206 #define raw_spin_lock_irqsave(lock, flags) \
207 do { \
208 typecheck(unsigned long, flags); \
209 flags = _raw_spin_lock_irqsave(lock); \
210 } while (0)
211
212 #ifdef CONFIG_DEBUG_LOCK_ALLOC
213 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
214 do { \
215 typecheck(unsigned long, flags); \
216 flags = _raw_spin_lock_irqsave_nested(lock, subclass); \
217 } while (0)
218 #else
219 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
220 do { \
221 typecheck(unsigned long, flags); \
222 flags = _raw_spin_lock_irqsave(lock); \
223 } while (0)
224 #endif
225
226 #else
227
228 #define raw_spin_lock_irqsave(lock, flags) \
229 do { \
230 typecheck(unsigned long, flags); \
231 _raw_spin_lock_irqsave(lock, flags); \
232 } while (0)
233
234 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
235 raw_spin_lock_irqsave(lock, flags)
236
237 #endif
238
239 #define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
240 #define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock)
241 #define raw_spin_unlock(lock) _raw_spin_unlock(lock)
242 #define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
243
244 #define raw_spin_unlock_irqrestore(lock, flags) \
245 do { \
246 typecheck(unsigned long, flags); \
247 _raw_spin_unlock_irqrestore(lock, flags); \
248 } while (0)
249 #define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
250
251 #define raw_spin_trylock_bh(lock) \
252 __cond_lock(lock, _raw_spin_trylock_bh(lock))
253
254 #define raw_spin_trylock_irq(lock) \
255 ({ \
256 local_irq_disable(); \
257 raw_spin_trylock(lock) ? \
258 1 : ({ local_irq_enable(); 0; }); \
259 })
260
261 #define raw_spin_trylock_irqsave(lock, flags) \
262 ({ \
263 local_irq_save(flags); \
264 raw_spin_trylock(lock) ? \
265 1 : ({ local_irq_restore(flags); 0; }); \
266 })
267
268 /**
269 * raw_spin_can_lock - would raw_spin_trylock() succeed?
270 * @lock: the spinlock in question.
271 */
272 #define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
273
274 /* Include rwlock functions */
275 #include <linux/rwlock.h>
276
277 /*
278 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
279 */
280 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
281 # include <linux/spinlock_api_smp.h>
282 #else
283 # include <linux/spinlock_api_up.h>
284 #endif
285
286 /*
287 * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
288 */
289
290 static inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
291 {
292 return &lock->rlock;
293 }
294
295 #define spin_lock_init(_lock) \
296 do { \
297 spinlock_check(_lock); \
298 raw_spin_lock_init(&(_lock)->rlock); \
299 } while (0)
300
301 static inline void spin_lock(spinlock_t *lock)
302 {
303 raw_spin_lock(&lock->rlock);
304 }
305
306 static inline void spin_lock_bh(spinlock_t *lock)
307 {
308 raw_spin_lock_bh(&lock->rlock);
309 }
310
311 static inline int spin_trylock(spinlock_t *lock)
312 {
313 return raw_spin_trylock(&lock->rlock);
314 }
315
316 #define spin_lock_nested(lock, subclass) \
317 do { \
318 raw_spin_lock_nested(spinlock_check(lock), subclass); \
319 } while (0)
320
321 #define spin_lock_nest_lock(lock, nest_lock) \
322 do { \
323 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
324 } while (0)
325
326 static inline void spin_lock_irq(spinlock_t *lock)
327 {
328 raw_spin_lock_irq(&lock->rlock);
329 }
330
331 #define spin_lock_irqsave(lock, flags) \
332 do { \
333 raw_spin_lock_irqsave(spinlock_check(lock), flags); \
334 } while (0)
335
336 #define spin_lock_irqsave_nested(lock, flags, subclass) \
337 do { \
338 raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
339 } while (0)
340
341 static inline void spin_unlock(spinlock_t *lock)
342 {
343 raw_spin_unlock(&lock->rlock);
344 }
345
346 static inline void spin_unlock_bh(spinlock_t *lock)
347 {
348 raw_spin_unlock_bh(&lock->rlock);
349 }
350
351 static inline void spin_unlock_irq(spinlock_t *lock)
352 {
353 raw_spin_unlock_irq(&lock->rlock);
354 }
355
356 static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
357 {
358 raw_spin_unlock_irqrestore(&lock->rlock, flags);
359 }
360
361 static inline int spin_trylock_bh(spinlock_t *lock)
362 {
363 return raw_spin_trylock_bh(&lock->rlock);
364 }
365
366 static inline int spin_trylock_irq(spinlock_t *lock)
367 {
368 return raw_spin_trylock_irq(&lock->rlock);
369 }
370
371 #define spin_trylock_irqsave(lock, flags) \
372 ({ \
373 raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
374 })
375
376 static inline void spin_unlock_wait(spinlock_t *lock)
377 {
378 raw_spin_unlock_wait(&lock->rlock);
379 }
380
381 static inline int spin_is_locked(spinlock_t *lock)
382 {
383 return raw_spin_is_locked(&lock->rlock);
384 }
385
386 static inline int spin_is_contended(spinlock_t *lock)
387 {
388 return raw_spin_is_contended(&lock->rlock);
389 }
390
391 static inline int spin_can_lock(spinlock_t *lock)
392 {
393 return raw_spin_can_lock(&lock->rlock);
394 }
395
396 #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
397
398 /*
399 * Pull the atomic_t declaration:
400 * (asm-mips/atomic.h needs above definitions)
401 */
402 #include <linux/atomic.h>
403 /**
404 * atomic_dec_and_lock - lock on reaching reference count zero
405 * @atomic: the atomic counter
406 * @lock: the spinlock in question
407 *
408 * Decrements @atomic by 1. If the result is 0, returns true and locks
409 * @lock. Returns false for all other cases.
410 */
411 extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
412 #define atomic_dec_and_lock(atomic, lock) \
413 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
414
415 #endif /* __LINUX_SPINLOCK_H */ |
Here is an explanation of a rule violation arisen while checking your driver against a corresponding kernel.
Note that it may be false positive, i.e. there isn't a real error indeed. Please analyze a given error trace and related source code to understand whether there is an error in your driver.
Error trace column contains a path on which the given rule is violated. You can expand/collapse some entity classes by clicking on corresponding checkboxes in a main menu or in an advanced Others menu. Also you can expand/collapse each particular entity by clicking on +/-. In hovering on some entities you can see some tips. Also the error trace is bound with related source code. Line numbers may be shown as links on the left. You can click on them to open corresponding lines in source code.
Source code column contains a content of files related with the error trace. There is source code of your driver (note that there are some LDV modifications at the end), kernel headers and rule model. Tabs show a currently opened file and other available files. In hovering on them you can see full file names. On clicking a corresponding file content will be shown.
Ядро | Модуль | Правило | Верификатор | Вердикт | Статус | Время создания | Описание проблемы |
linux-3.16-rc1.tar.xz | drivers/net/irda/ali-ircc.ko | 39_7a | CPAchecker | Bug | Fixed | 2014-12-12 13:13:53 | L0010 |
Комментарий
Reported again: 12 Sep 2015
[В начало]