Bug
[В начало]
Ошибка # 155
Показать/спрятать трассу ошибок Error trace
{ 19 typedef signed char __s8; 20 typedef unsigned char __u8; 22 typedef short __s16; 23 typedef unsigned short __u16; 25 typedef int __s32; 26 typedef unsigned int __u32; 29 typedef long long __s64; 30 typedef unsigned long long __u64; 15 typedef signed char s8; 16 typedef unsigned char u8; 19 typedef unsigned short u16; 21 typedef int s32; 22 typedef unsigned int u32; 24 typedef long long s64; 25 typedef unsigned long long u64; 14 typedef long __kernel_long_t; 15 typedef unsigned long __kernel_ulong_t; 27 typedef int __kernel_pid_t; 48 typedef unsigned int __kernel_uid32_t; 49 typedef unsigned int __kernel_gid32_t; 71 typedef __kernel_ulong_t __kernel_size_t; 72 typedef __kernel_long_t __kernel_ssize_t; 87 typedef long long __kernel_loff_t; 88 typedef __kernel_long_t __kernel_time_t; 89 typedef __kernel_long_t __kernel_clock_t; 90 typedef int __kernel_timer_t; 91 typedef int __kernel_clockid_t; 32 typedef __u16 __le16; 33 typedef __u16 __be16; 35 typedef __u32 __be32; 37 typedef __u64 __be64; 40 typedef __u32 __wsum; 257 struct kernel_symbol { unsigned long value; const char *name; } ; 33 struct module ; 12 typedef __u32 __kernel_dev_t; 15 typedef __kernel_dev_t dev_t; 18 typedef unsigned short umode_t; 21 typedef __kernel_pid_t pid_t; 26 typedef __kernel_clockid_t clockid_t; 29 typedef _Bool bool; 31 typedef __kernel_uid32_t uid_t; 32 typedef __kernel_gid32_t gid_t; 45 typedef __kernel_loff_t loff_t; 54 typedef __kernel_size_t size_t; 59 typedef __kernel_ssize_t ssize_t; 69 typedef __kernel_time_t time_t; 102 typedef __s32 int32_t; 106 typedef __u8 uint8_t; 108 typedef __u32 uint32_t; 111 typedef __u64 uint64_t; 133 typedef unsigned long sector_t; 134 typedef unsigned long blkcnt_t; 152 typedef u64 dma_addr_t; 157 typedef unsigned int gfp_t; 158 typedef unsigned int fmode_t; 176 struct __anonstruct_atomic_t_6 { int counter; } ; 176 typedef struct __anonstruct_atomic_t_6 atomic_t; 181 struct __anonstruct_atomic64_t_7 { long counter; } ; 181 typedef struct __anonstruct_atomic64_t_7 atomic64_t; 182 struct list_head { struct list_head *next; struct list_head *prev; } ; 187 struct hlist_node ; 187 struct hlist_head { struct hlist_node *first; } ; 191 struct hlist_node { struct hlist_node *next; struct hlist_node **pprev; } ; 202 struct callback_head { struct callback_head *next; void (*func)(struct callback_head *); } ; 125 typedef void (*ctor_fn_t)(); 67 struct ctl_table ; 58 struct device ; 64 struct net_device ; 467 struct file_operations ; 479 struct completion ; 480 struct pt_regs ; 27 union __anonunion___u_9 { struct list_head *__val; char __c[1U]; } ; 189 union __anonunion___u_13 { struct list_head *__val; char __c[1U]; } ; 556 struct bug_entry { int bug_addr_disp; int file_disp; unsigned short line; unsigned short flags; } ; 111 struct timespec ; 112 struct compat_timespec ; 113 struct __anonstruct_futex_25 { u32 *uaddr; u32 val; u32 flags; u32 bitset; u64 time; u32 *uaddr2; } ; 113 struct __anonstruct_nanosleep_26 { clockid_t clockid; struct timespec *rmtp; struct compat_timespec *compat_rmtp; u64 expires; } ; 113 struct pollfd ; 113 struct __anonstruct_poll_27 { struct pollfd *ufds; int nfds; int has_timeout; unsigned long tv_sec; unsigned long tv_nsec; } ; 113 union __anonunion____missing_field_name_24 { struct __anonstruct_futex_25 futex; struct __anonstruct_nanosleep_26 nanosleep; struct __anonstruct_poll_27 poll; } ; 113 struct restart_block { long int (*fn)(struct restart_block *); union __anonunion____missing_field_name_24 __annonCompField4; } ; 39 struct page ; 14 struct __anonstruct_pfn_t_28 { u64 val; } ; 14 typedef struct __anonstruct_pfn_t_28 pfn_t; 26 struct task_struct ; 27 struct mm_struct ; 288 struct pt_regs { unsigned long r15; unsigned long r14; unsigned long r13; unsigned long r12; unsigned long bp; unsigned long bx; unsigned long r11; unsigned long r10; unsigned long r9; unsigned long r8; unsigned long ax; unsigned long cx; unsigned long dx; unsigned long si; unsigned long di; unsigned long orig_ax; unsigned long ip; unsigned long cs; unsigned long flags; unsigned long sp; unsigned long ss; } ; 66 struct __anonstruct____missing_field_name_30 { unsigned int a; unsigned int b; } ; 66 struct __anonstruct____missing_field_name_31 { u16 limit0; u16 base0; unsigned char base1; unsigned char type; unsigned char s; unsigned char dpl; unsigned char p; unsigned char limit; unsigned char avl; unsigned char l; unsigned char d; unsigned char g; unsigned char base2; } ; 66 union __anonunion____missing_field_name_29 { struct __anonstruct____missing_field_name_30 __annonCompField5; struct __anonstruct____missing_field_name_31 __annonCompField6; } ; 66 struct desc_struct { union __anonunion____missing_field_name_29 __annonCompField7; } ; 13 typedef unsigned long pteval_t; 14 typedef unsigned long pmdval_t; 16 typedef unsigned long pgdval_t; 17 typedef unsigned long pgprotval_t; 19 struct __anonstruct_pte_t_32 { pteval_t pte; } ; 19 typedef struct __anonstruct_pte_t_32 pte_t; 21 struct pgprot { pgprotval_t pgprot; } ; 256 typedef struct pgprot pgprot_t; 258 struct __anonstruct_pgd_t_33 { pgdval_t pgd; } ; 258 typedef struct __anonstruct_pgd_t_33 pgd_t; 297 struct __anonstruct_pmd_t_35 { pmdval_t pmd; } ; 297 typedef struct __anonstruct_pmd_t_35 pmd_t; 423 typedef struct page *pgtable_t; 434 struct file ; 447 struct seq_file ; 483 struct thread_struct ; 485 struct cpumask ; 20 struct qspinlock { atomic_t val; } ; 33 typedef struct qspinlock arch_spinlock_t; 34 struct qrwlock { atomic_t cnts; arch_spinlock_t wait_lock; } ; 14 typedef struct qrwlock arch_rwlock_t; 247 struct math_emu_info { long ___orig_eip; struct pt_regs *regs; } ; 341 struct cpumask { unsigned long bits[128U]; } ; 15 typedef struct cpumask cpumask_t; 654 typedef struct cpumask *cpumask_var_t; 38 union __anonunion___u_44 { int __val; char __c[1U]; } ; 23 typedef atomic64_t atomic_long_t; 81 struct static_key { atomic_t enabled; } ; 22 struct tracepoint_func { void *func; void *data; int prio; } ; 28 struct tracepoint { const char *name; struct static_key key; void (*regfunc)(); void (*unregfunc)(); struct tracepoint_func *funcs; } ; 254 struct fregs_state { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u32 status; } ; 26 struct __anonstruct____missing_field_name_59 { u64 rip; u64 rdp; } ; 26 struct __anonstruct____missing_field_name_60 { u32 fip; u32 fcs; u32 foo; u32 fos; } ; 26 union __anonunion____missing_field_name_58 { struct __anonstruct____missing_field_name_59 __annonCompField13; struct __anonstruct____missing_field_name_60 __annonCompField14; } ; 26 union __anonunion____missing_field_name_61 { u32 padding1[12U]; u32 sw_reserved[12U]; } ; 26 struct fxregs_state { u16 cwd; u16 swd; u16 twd; u16 fop; union __anonunion____missing_field_name_58 __annonCompField15; u32 mxcsr; u32 mxcsr_mask; u32 st_space[32U]; u32 xmm_space[64U]; u32 padding[12U]; union __anonunion____missing_field_name_61 __annonCompField16; } ; 66 struct swregs_state { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20U]; u8 ftop; u8 changed; u8 lookahead; u8 no_update; u8 rm; u8 alimit; struct math_emu_info *info; u32 entry_eip; } ; 227 struct xstate_header { u64 xfeatures; u64 xcomp_bv; u64 reserved[6U]; } ; 233 struct xregs_state { struct fxregs_state i387; struct xstate_header header; u8 extended_state_area[0U]; } ; 254 union fpregs_state { struct fregs_state fsave; struct fxregs_state fxsave; struct swregs_state soft; struct xregs_state xsave; u8 __padding[4096U]; } ; 271 struct fpu { unsigned int last_cpu; unsigned char fpstate_active; unsigned char fpregs_active; unsigned char counter; union fpregs_state state; } ; 169 struct seq_operations ; 372 struct perf_event ; 377 struct __anonstruct_mm_segment_t_73 { unsigned long seg; } ; 377 typedef struct __anonstruct_mm_segment_t_73 mm_segment_t; 378 struct thread_struct { struct desc_struct tls_array[3U]; unsigned long sp0; unsigned long sp; unsigned short es; unsigned short ds; unsigned short fsindex; unsigned short gsindex; unsigned long fsbase; unsigned long gsbase; struct perf_event *ptrace_bps[4U]; unsigned long debugreg6; unsigned long ptrace_dr7; unsigned long cr2; unsigned long trap_nr; unsigned long error_code; unsigned long *io_bitmap_ptr; unsigned long iopl; unsigned int io_bitmap_max; mm_segment_t addr_limit; unsigned char sig_on_uaccess_err; unsigned char uaccess_err; struct fpu fpu; } ; 33 struct lockdep_map ; 55 struct stack_trace { unsigned int nr_entries; unsigned int max_entries; unsigned long *entries; int skip; } ; 28 struct lockdep_subclass_key { char __one_byte; } ; 53 struct lock_class_key { struct lockdep_subclass_key subkeys[8U]; } ; 59 struct lock_class { struct hlist_node hash_entry; struct list_head lock_entry; struct lockdep_subclass_key *key; unsigned int subclass; unsigned int dep_gen_id; unsigned long usage_mask; struct stack_trace usage_traces[13U]; struct list_head locks_after; struct list_head locks_before; unsigned int version; unsigned long ops; const char *name; int name_version; unsigned long contention_point[4U]; unsigned long contending_point[4U]; } ; 144 struct lockdep_map { struct lock_class_key *key; struct lock_class *class_cache[2U]; const char *name; int cpu; unsigned long ip; } ; 207 struct held_lock { u64 prev_chain_key; unsigned long acquire_ip; struct lockdep_map *instance; struct lockdep_map *nest_lock; u64 waittime_stamp; u64 holdtime_stamp; unsigned short class_idx; unsigned char irq_context; unsigned char trylock; unsigned char read; unsigned char check; unsigned char hardirqs_off; unsigned short references; unsigned int pin_count; } ; 572 struct raw_spinlock { arch_spinlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ; 32 typedef struct raw_spinlock raw_spinlock_t; 33 struct __anonstruct____missing_field_name_75 { u8 __padding[24U]; struct lockdep_map dep_map; } ; 33 union __anonunion____missing_field_name_74 { struct raw_spinlock rlock; struct __anonstruct____missing_field_name_75 __annonCompField19; } ; 33 struct spinlock { union __anonunion____missing_field_name_74 __annonCompField20; } ; 76 typedef struct spinlock spinlock_t; 23 struct __anonstruct_rwlock_t_76 { arch_rwlock_t raw_lock; unsigned int magic; unsigned int owner_cpu; void *owner; struct lockdep_map dep_map; } ; 23 typedef struct __anonstruct_rwlock_t_76 rwlock_t; 416 struct seqcount { unsigned int sequence; struct lockdep_map dep_map; } ; 52 typedef struct seqcount seqcount_t; 407 struct __anonstruct_seqlock_t_91 { struct seqcount seqcount; spinlock_t lock; } ; 407 typedef struct __anonstruct_seqlock_t_91 seqlock_t; 601 struct timespec { __kernel_time_t tv_sec; long tv_nsec; } ; 7 typedef __s64 time64_t; 83 struct user_namespace ; 22 struct __anonstruct_kuid_t_92 { uid_t val; } ; 22 typedef struct __anonstruct_kuid_t_92 kuid_t; 27 struct __anonstruct_kgid_t_93 { gid_t val; } ; 27 typedef struct __anonstruct_kgid_t_93 kgid_t; 139 struct kstat { u64 ino; dev_t dev; umode_t mode; unsigned int nlink; kuid_t uid; kgid_t gid; dev_t rdev; loff_t size; struct timespec atime; struct timespec mtime; struct timespec ctime; unsigned long blksize; unsigned long long blocks; } ; 36 struct vm_area_struct ; 38 struct __wait_queue_head { spinlock_t lock; struct list_head task_list; } ; 43 typedef struct __wait_queue_head wait_queue_head_t; 97 struct __anonstruct_nodemask_t_94 { unsigned long bits[16U]; } ; 97 typedef struct __anonstruct_nodemask_t_94 nodemask_t; 80 struct free_area { struct list_head free_list[6U]; unsigned long nr_free; } ; 92 struct pglist_data ; 93 struct zone_padding { char x[0U]; } ; 208 struct zone_reclaim_stat { unsigned long recent_rotated[2U]; unsigned long recent_scanned[2U]; } ; 221 struct lruvec { struct list_head lists[5U]; struct zone_reclaim_stat reclaim_stat; atomic_long_t inactive_age; struct pglist_data *pgdat; } ; 247 typedef unsigned int isolate_mode_t; 255 struct per_cpu_pages { int count; int high; int batch; struct list_head lists[3U]; } ; 268 struct per_cpu_pageset { struct per_cpu_pages pcp; s8 expire; s8 stat_threshold; s8 vm_stat_diff[21U]; } ; 278 struct per_cpu_nodestat { s8 stat_threshold; s8 vm_node_stat_diff[26U]; } ; 284 enum zone_type { ZONE_DMA = 0, ZONE_DMA32 = 1, ZONE_NORMAL = 2, ZONE_MOVABLE = 3, __MAX_NR_ZONES = 4 } ; 292 struct zone { unsigned long watermark[3U]; unsigned long nr_reserved_highatomic; long lowmem_reserve[4U]; int node; struct pglist_data *zone_pgdat; struct per_cpu_pageset *pageset; unsigned long zone_start_pfn; unsigned long managed_pages; unsigned long spanned_pages; unsigned long present_pages; const char *name; unsigned long nr_isolate_pageblock; wait_queue_head_t *wait_table; unsigned long wait_table_hash_nr_entries; unsigned long wait_table_bits; struct zone_padding _pad1_; struct free_area free_area[11U]; unsigned long flags; spinlock_t lock; struct zone_padding _pad2_; unsigned long percpu_drift_mark; unsigned long compact_cached_free_pfn; unsigned long compact_cached_migrate_pfn[2U]; unsigned int compact_considered; unsigned int compact_defer_shift; int compact_order_failed; bool compact_blockskip_flush; bool contiguous; struct zone_padding _pad3_; atomic_long_t vm_stat[21U]; } ; 560 struct zoneref { struct zone *zone; int zone_idx; } ; 585 struct zonelist { struct zoneref _zonerefs[4097U]; } ; 608 struct pglist_data { struct zone node_zones[4U]; struct zonelist node_zonelists[2U]; int nr_zones; unsigned long node_start_pfn; unsigned long node_present_pages; unsigned long node_spanned_pages; int node_id; wait_queue_head_t kswapd_wait; wait_queue_head_t pfmemalloc_wait; struct task_struct *kswapd; int kswapd_order; enum zone_type kswapd_classzone_idx; int kcompactd_max_order; enum zone_type kcompactd_classzone_idx; wait_queue_head_t kcompactd_wait; struct task_struct *kcompactd; spinlock_t numabalancing_migrate_lock; unsigned long numabalancing_migrate_next_window; unsigned long numabalancing_migrate_nr_pages; unsigned long totalreserve_pages; unsigned long min_unmapped_pages; unsigned long min_slab_pages; struct zone_padding _pad1_; spinlock_t lru_lock; spinlock_t split_queue_lock; struct list_head split_queue; unsigned long split_queue_len; struct lruvec lruvec; unsigned int inactive_ratio; unsigned long flags; struct zone_padding _pad2_; struct per_cpu_nodestat *per_cpu_nodestats; atomic_long_t vm_stat[26U]; } ; 13 struct optimistic_spin_queue { atomic_t tail; } ; 39 struct mutex { atomic_t count; spinlock_t wait_lock; struct list_head wait_list; struct task_struct *owner; void *magic; struct lockdep_map dep_map; } ; 67 struct mutex_waiter { struct list_head list; struct task_struct *task; void *magic; } ; 177 struct rw_semaphore ; 178 struct rw_semaphore { atomic_long_t count; struct list_head wait_list; raw_spinlock_t wait_lock; struct optimistic_spin_queue osq; struct task_struct *owner; struct lockdep_map dep_map; } ; 178 struct completion { unsigned int done; wait_queue_head_t wait; } ; 446 union ktime { s64 tv64; } ; 41 typedef union ktime ktime_t; 1144 struct timer_list { struct hlist_node entry; unsigned long expires; void (*function)(unsigned long); unsigned long data; u32 flags; int start_pid; void *start_site; char start_comm[16U]; struct lockdep_map lockdep_map; } ; 254 struct hrtimer ; 255 enum hrtimer_restart ; 256 struct rb_node { unsigned long __rb_parent_color; struct rb_node *rb_right; struct rb_node *rb_left; } ; 41 struct rb_root { struct rb_node *rb_node; } ; 835 struct nsproxy ; 836 struct ctl_table_root ; 837 struct ctl_table_header ; 838 struct ctl_dir ; 38 typedef int proc_handler(struct ctl_table *, int, void *, size_t *, loff_t *); 58 struct ctl_table_poll { atomic_t event; wait_queue_head_t wait; } ; 97 struct ctl_table { const char *procname; void *data; int maxlen; umode_t mode; struct ctl_table *child; proc_handler *proc_handler; struct ctl_table_poll *poll; void *extra1; void *extra2; } ; 118 struct ctl_node { struct rb_node node; struct ctl_table_header *header; } ; 123 struct __anonstruct____missing_field_name_100 { struct ctl_table *ctl_table; int used; int count; int nreg; } ; 123 union __anonunion____missing_field_name_99 { struct __anonstruct____missing_field_name_100 __annonCompField21; struct callback_head rcu; } ; 123 struct ctl_table_set ; 123 struct ctl_table_header { union __anonunion____missing_field_name_99 __annonCompField22; struct completion *unregistering; struct ctl_table *ctl_table_arg; struct ctl_table_root *root; struct ctl_table_set *set; struct ctl_dir *parent; struct ctl_node *node; } ; 144 struct ctl_dir { struct ctl_table_header header; struct rb_root root; } ; 150 struct ctl_table_set { int (*is_seen)(struct ctl_table_set *); struct ctl_dir dir; } ; 155 struct ctl_table_root { struct ctl_table_set default_set; struct ctl_table_set * (*lookup)(struct ctl_table_root *, struct nsproxy *); int (*permissions)(struct ctl_table_header *, struct ctl_table *); } ; 278 struct workqueue_struct ; 279 struct work_struct ; 54 struct work_struct { atomic_long_t data; struct list_head entry; void (*func)(struct work_struct *); struct lockdep_map lockdep_map; } ; 107 struct delayed_work { struct work_struct work; struct timer_list timer; struct workqueue_struct *wq; int cpu; } ; 268 struct notifier_block ; 53 struct notifier_block { int (*notifier_call)(struct notifier_block *, unsigned long, void *); struct notifier_block *next; int priority; } ; 58 struct pm_message { int event; } ; 64 typedef struct pm_message pm_message_t; 65 struct dev_pm_ops { int (*prepare)(struct device *); void (*complete)(struct device *); int (*suspend)(struct device *); int (*resume)(struct device *); int (*freeze)(struct device *); int (*thaw)(struct device *); int (*poweroff)(struct device *); int (*restore)(struct device *); int (*suspend_late)(struct device *); int (*resume_early)(struct device *); int (*freeze_late)(struct device *); int (*thaw_early)(struct device *); int (*poweroff_late)(struct device *); int (*restore_early)(struct device *); int (*suspend_noirq)(struct device *); int (*resume_noirq)(struct device *); int (*freeze_noirq)(struct device *); int (*thaw_noirq)(struct device *); int (*poweroff_noirq)(struct device *); int (*restore_noirq)(struct device *); int (*runtime_suspend)(struct device *); int (*runtime_resume)(struct device *); int (*runtime_idle)(struct device *); } ; 320 enum rpm_status { RPM_ACTIVE = 0, RPM_RESUMING = 1, RPM_SUSPENDED = 2, RPM_SUSPENDING = 3 } ; 327 enum rpm_request { RPM_REQ_NONE = 0, RPM_REQ_IDLE = 1, RPM_REQ_SUSPEND = 2, RPM_REQ_AUTOSUSPEND = 3, RPM_REQ_RESUME = 4 } ; 335 struct wakeup_source ; 336 struct wake_irq ; 337 struct pm_domain_data ; 338 struct pm_subsys_data { spinlock_t lock; unsigned int refcount; struct list_head clock_list; struct pm_domain_data *domain_data; } ; 556 struct dev_pm_qos ; 556 struct dev_pm_info { pm_message_t power_state; unsigned char can_wakeup; unsigned char async_suspend; bool is_prepared; bool is_suspended; bool is_noirq_suspended; bool is_late_suspended; bool early_init; bool direct_complete; spinlock_t lock; struct list_head entry; struct completion completion; struct wakeup_source *wakeup; bool wakeup_path; bool syscore; bool no_pm_callbacks; struct timer_list suspend_timer; unsigned long timer_expires; struct work_struct work; wait_queue_head_t wait_queue; struct wake_irq *wakeirq; atomic_t usage_count; atomic_t child_count; unsigned char disable_depth; unsigned char idle_notification; unsigned char request_pending; unsigned char deferred_resume; unsigned char run_wake; unsigned char runtime_auto; bool ignore_children; unsigned char no_callbacks; unsigned char irq_safe; unsigned char use_autosuspend; unsigned char timer_autosuspends; unsigned char memalloc_noio; enum rpm_request request; enum rpm_status runtime_status; int runtime_error; int autosuspend_delay; unsigned long last_busy; unsigned long active_jiffies; unsigned long suspended_jiffies; unsigned long accounting_timestamp; struct pm_subsys_data *subsys_data; void (*set_latency_tolerance)(struct device *, s32 ); struct dev_pm_qos *qos; } ; 616 struct dev_pm_domain { struct dev_pm_ops ops; void (*detach)(struct device *, bool ); int (*activate)(struct device *); void (*sync)(struct device *); void (*dismiss)(struct device *); } ; 26 struct ldt_struct ; 26 struct vdso_image ; 26 struct __anonstruct_mm_context_t_165 { struct ldt_struct *ldt; unsigned short ia32_compat; struct mutex lock; void *vdso; const struct vdso_image *vdso_image; atomic_t perf_rdpmc_allowed; } ; 26 typedef struct __anonstruct_mm_context_t_165 mm_context_t; 22 struct bio_vec ; 1276 struct llist_node ; 64 struct llist_node { struct llist_node *next; } ; 17 struct call_single_data { struct llist_node llist; void (*func)(void *); void *info; unsigned int flags; } ; 37 struct cred ; 19 struct inode ; 58 struct arch_uprobe_task { unsigned long saved_scratch_register; unsigned int saved_trap_nr; unsigned int saved_tf; } ; 66 enum uprobe_task_state { UTASK_RUNNING = 0, UTASK_SSTEP = 1, UTASK_SSTEP_ACK = 2, UTASK_SSTEP_TRAPPED = 3 } ; 73 struct __anonstruct____missing_field_name_211 { struct arch_uprobe_task autask; unsigned long vaddr; } ; 73 struct __anonstruct____missing_field_name_212 { struct callback_head dup_xol_work; unsigned long dup_xol_addr; } ; 73 union __anonunion____missing_field_name_210 { struct __anonstruct____missing_field_name_211 __annonCompField35; struct __anonstruct____missing_field_name_212 __annonCompField36; } ; 73 struct uprobe ; 73 struct return_instance ; 73 struct uprobe_task { enum uprobe_task_state state; union __anonunion____missing_field_name_210 __annonCompField37; struct uprobe *active_uprobe; unsigned long xol_vaddr; struct return_instance *return_instances; unsigned int depth; } ; 94 struct return_instance { struct uprobe *uprobe; unsigned long func; unsigned long stack; unsigned long orig_ret_vaddr; bool chained; struct return_instance *next; } ; 110 struct xol_area ; 111 struct uprobes_state { struct xol_area *xol_area; } ; 150 struct address_space ; 151 struct mem_cgroup ; 152 union __anonunion____missing_field_name_213 { struct address_space *mapping; void *s_mem; atomic_t compound_mapcount; } ; 152 union __anonunion____missing_field_name_214 { unsigned long index; void *freelist; } ; 152 struct __anonstruct____missing_field_name_218 { unsigned short inuse; unsigned short objects; unsigned char frozen; } ; 152 union __anonunion____missing_field_name_217 { atomic_t _mapcount; unsigned int active; struct __anonstruct____missing_field_name_218 __annonCompField40; int units; } ; 152 struct __anonstruct____missing_field_name_216 { union __anonunion____missing_field_name_217 __annonCompField41; atomic_t _refcount; } ; 152 union __anonunion____missing_field_name_215 { unsigned long counters; struct __anonstruct____missing_field_name_216 __annonCompField42; } ; 152 struct dev_pagemap ; 152 struct __anonstruct____missing_field_name_220 { struct page *next; int pages; int pobjects; } ; 152 struct __anonstruct____missing_field_name_221 { unsigned long compound_head; unsigned int compound_dtor; unsigned int compound_order; } ; 152 struct __anonstruct____missing_field_name_222 { unsigned long __pad; pgtable_t pmd_huge_pte; } ; 152 union __anonunion____missing_field_name_219 { struct list_head lru; struct dev_pagemap *pgmap; struct __anonstruct____missing_field_name_220 __annonCompField44; struct callback_head callback_head; struct __anonstruct____missing_field_name_221 __annonCompField45; struct __anonstruct____missing_field_name_222 __annonCompField46; } ; 152 struct kmem_cache ; 152 union __anonunion____missing_field_name_223 { unsigned long private; spinlock_t *ptl; struct kmem_cache *slab_cache; } ; 152 struct page { unsigned long flags; union __anonunion____missing_field_name_213 __annonCompField38; union __anonunion____missing_field_name_214 __annonCompField39; union __anonunion____missing_field_name_215 __annonCompField43; union __anonunion____missing_field_name_219 __annonCompField47; union __anonunion____missing_field_name_223 __annonCompField48; struct mem_cgroup *mem_cgroup; } ; 197 struct page_frag { struct page *page; __u32 offset; __u32 size; } ; 282 struct userfaultfd_ctx ; 282 struct vm_userfaultfd_ctx { struct userfaultfd_ctx *ctx; } ; 289 struct __anonstruct_shared_224 { struct rb_node rb; unsigned long rb_subtree_last; } ; 289 struct anon_vma ; 289 struct vm_operations_struct ; 289 struct mempolicy ; 289 struct vm_area_struct { unsigned long vm_start; unsigned long vm_end; struct vm_area_struct *vm_next; struct vm_area_struct *vm_prev; struct rb_node vm_rb; unsigned long rb_subtree_gap; struct mm_struct *vm_mm; pgprot_t vm_page_prot; unsigned long vm_flags; struct __anonstruct_shared_224 shared; struct list_head anon_vma_chain; struct anon_vma *anon_vma; const struct vm_operations_struct *vm_ops; unsigned long vm_pgoff; struct file *vm_file; void *vm_private_data; struct mempolicy *vm_policy; struct vm_userfaultfd_ctx vm_userfaultfd_ctx; } ; 362 struct core_thread { struct task_struct *task; struct core_thread *next; } ; 367 struct core_state { atomic_t nr_threads; struct core_thread dumper; struct completion startup; } ; 381 struct task_rss_stat { int events; int count[4U]; } ; 389 struct mm_rss_stat { atomic_long_t count[4U]; } ; 394 struct kioctx_table ; 395 struct linux_binfmt ; 395 struct mmu_notifier_mm ; 395 struct mm_struct { struct vm_area_struct *mmap; struct rb_root mm_rb; u32 vmacache_seqnum; unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); unsigned long mmap_base; unsigned long mmap_legacy_base; unsigned long task_size; unsigned long highest_vm_end; pgd_t *pgd; atomic_t mm_users; atomic_t mm_count; atomic_long_t nr_ptes; atomic_long_t nr_pmds; int map_count; spinlock_t page_table_lock; struct rw_semaphore mmap_sem; struct list_head mmlist; unsigned long hiwater_rss; unsigned long hiwater_vm; unsigned long total_vm; unsigned long locked_vm; unsigned long pinned_vm; unsigned long data_vm; unsigned long exec_vm; unsigned long stack_vm; unsigned long def_flags; unsigned long start_code; unsigned long end_code; unsigned long start_data; unsigned long end_data; unsigned long start_brk; unsigned long brk; unsigned long start_stack; unsigned long arg_start; unsigned long arg_end; unsigned long env_start; unsigned long env_end; unsigned long saved_auxv[46U]; struct mm_rss_stat rss_stat; struct linux_binfmt *binfmt; cpumask_var_t cpu_vm_mask_var; mm_context_t context; unsigned long flags; struct core_state *core_state; spinlock_t ioctx_lock; struct kioctx_table *ioctx_table; struct task_struct *owner; struct file *exe_file; struct mmu_notifier_mm *mmu_notifier_mm; struct cpumask cpumask_allocation; unsigned long numa_next_scan; unsigned long numa_scan_offset; int numa_scan_seq; bool tlb_flush_pending; struct uprobes_state uprobes_state; void *bd_addr; atomic_long_t hugetlb_usage; struct work_struct async_put_work; } ; 565 struct vm_fault ; 619 struct vdso_image { void *data; unsigned long size; unsigned long alt; unsigned long alt_len; long sym_vvar_start; long sym_vvar_page; long sym_hpet_page; long sym_pvclock_page; long sym_VDSO32_NOTE_MASK; long sym___kernel_sigreturn; long sym___kernel_rt_sigreturn; long sym___kernel_vsyscall; long sym_int80_landing_pad; } ; 15 typedef __u64 Elf64_Addr; 16 typedef __u16 Elf64_Half; 18 typedef __u64 Elf64_Off; 20 typedef __u32 Elf64_Word; 21 typedef __u64 Elf64_Xword; 190 struct elf64_sym { Elf64_Word st_name; unsigned char st_info; unsigned char st_other; Elf64_Half st_shndx; Elf64_Addr st_value; Elf64_Xword st_size; } ; 198 typedef struct elf64_sym Elf64_Sym; 219 struct elf64_hdr { unsigned char e_ident[16U]; Elf64_Half e_type; Elf64_Half e_machine; Elf64_Word e_version; Elf64_Addr e_entry; Elf64_Off e_phoff; Elf64_Off e_shoff; Elf64_Word e_flags; Elf64_Half e_ehsize; Elf64_Half e_phentsize; Elf64_Half e_phnum; Elf64_Half e_shentsize; Elf64_Half e_shnum; Elf64_Half e_shstrndx; } ; 235 typedef struct elf64_hdr Elf64_Ehdr; 314 struct elf64_shdr { Elf64_Word sh_name; Elf64_Word sh_type; Elf64_Xword sh_flags; Elf64_Addr sh_addr; Elf64_Off sh_offset; Elf64_Xword sh_size; Elf64_Word sh_link; Elf64_Word sh_info; Elf64_Xword sh_addralign; Elf64_Xword sh_entsize; } ; 326 typedef struct elf64_shdr Elf64_Shdr; 53 union __anonunion____missing_field_name_229 { unsigned long bitmap[4U]; struct callback_head callback_head; } ; 53 struct idr_layer { int prefix; int layer; struct idr_layer *ary[256U]; int count; union __anonunion____missing_field_name_229 __annonCompField49; } ; 41 struct idr { struct idr_layer *hint; struct idr_layer *top; int layers; int cur; spinlock_t lock; int id_free_cnt; struct idr_layer *id_free; } ; 124 struct ida_bitmap { long nr_busy; unsigned long bitmap[15U]; } ; 167 struct ida { struct idr idr; struct ida_bitmap *free_bitmap; } ; 199 struct dentry ; 200 struct iattr ; 201 struct super_block ; 202 struct file_system_type ; 203 struct kernfs_open_node ; 204 struct kernfs_iattrs ; 227 struct kernfs_root ; 227 struct kernfs_elem_dir { unsigned long subdirs; struct rb_root children; struct kernfs_root *root; } ; 85 struct kernfs_node ; 85 struct kernfs_elem_symlink { struct kernfs_node *target_kn; } ; 89 struct kernfs_ops ; 89 struct kernfs_elem_attr { const struct kernfs_ops *ops; struct kernfs_open_node *open; loff_t size; struct kernfs_node *notify_next; } ; 96 union __anonunion____missing_field_name_234 { struct kernfs_elem_dir dir; struct kernfs_elem_symlink symlink; struct kernfs_elem_attr attr; } ; 96 struct kernfs_node { atomic_t count; atomic_t active; struct lockdep_map dep_map; struct kernfs_node *parent; const char *name; struct rb_node rb; const void *ns; unsigned int hash; union __anonunion____missing_field_name_234 __annonCompField50; void *priv; unsigned short flags; umode_t mode; unsigned int ino; struct kernfs_iattrs *iattr; } ; 138 struct kernfs_syscall_ops { int (*remount_fs)(struct kernfs_root *, int *, char *); int (*show_options)(struct seq_file *, struct kernfs_root *); int (*mkdir)(struct kernfs_node *, const char *, umode_t ); int (*rmdir)(struct kernfs_node *); int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *); int (*show_path)(struct seq_file *, struct kernfs_node *, struct kernfs_root *); } ; 157 struct kernfs_root { struct kernfs_node *kn; unsigned int flags; struct ida ino_ida; struct kernfs_syscall_ops *syscall_ops; struct list_head supers; wait_queue_head_t deactivate_waitq; } ; 173 struct kernfs_open_file { struct kernfs_node *kn; struct file *file; void *priv; struct mutex mutex; struct mutex prealloc_mutex; int event; struct list_head list; char *prealloc_buf; size_t atomic_write_len; bool mmapped; const struct vm_operations_struct *vm_ops; } ; 191 struct kernfs_ops { int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); ssize_t (*read)(struct kernfs_open_file *, char *, size_t , loff_t ); size_t atomic_write_len; bool prealloc; ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *); struct lock_class_key lockdep_key; } ; 499 struct sock ; 500 struct kobject ; 501 enum kobj_ns_type { KOBJ_NS_TYPE_NONE = 0, KOBJ_NS_TYPE_NET = 1, KOBJ_NS_TYPES = 2 } ; 507 struct kobj_ns_type_operations { enum kobj_ns_type type; bool (*current_may_mount)(); void * (*grab_current_ns)(); const void * (*netlink_ns)(struct sock *); const void * (*initial_ns)(); void (*drop_ns)(void *); } ; 59 struct bin_attribute ; 60 struct attribute { const char *name; umode_t mode; bool ignore_lockdep; struct lock_class_key *key; struct lock_class_key skey; } ; 37 struct attribute_group { const char *name; umode_t (*is_visible)(struct kobject *, struct attribute *, int); umode_t (*is_bin_visible)(struct kobject *, struct bin_attribute *, int); struct attribute **attrs; struct bin_attribute **bin_attrs; } ; 92 struct bin_attribute { struct attribute attr; size_t size; void *private; ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t , size_t ); int (*mmap)(struct file *, struct kobject *, struct bin_attribute *, struct vm_area_struct *); } ; 165 struct sysfs_ops { ssize_t (*show)(struct kobject *, struct attribute *, char *); ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t ); } ; 530 struct kref { atomic_t refcount; } ; 52 struct kset ; 52 struct kobj_type ; 52 struct kobject { const char *name; struct list_head entry; struct kobject *parent; struct kset *kset; struct kobj_type *ktype; struct kernfs_node *sd; struct kref kref; struct delayed_work release; unsigned char state_initialized; unsigned char state_in_sysfs; unsigned char state_add_uevent_sent; unsigned char state_remove_uevent_sent; unsigned char uevent_suppress; } ; 115 struct kobj_type { void (*release)(struct kobject *); const struct sysfs_ops *sysfs_ops; struct attribute **default_attrs; const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject *); const void * (*namespace)(struct kobject *); } ; 123 struct kobj_uevent_env { char *argv[3U]; char *envp[32U]; int envp_idx; char buf[2048U]; int buflen; } ; 131 struct kset_uevent_ops { const int (*filter)(struct kset *, struct kobject *); const const char * (*name)(struct kset *, struct kobject *); const int (*uevent)(struct kset *, struct kobject *, struct kobj_uevent_env *); } ; 148 struct kset { struct list_head list; spinlock_t list_lock; struct kobject kobj; const struct kset_uevent_ops *uevent_ops; } ; 223 struct kernel_param ; 228 struct kernel_param_ops { unsigned int flags; int (*set)(const char *, const struct kernel_param *); int (*get)(char *, const struct kernel_param *); void (*free)(void *); } ; 62 struct kparam_string ; 62 struct kparam_array ; 62 union __anonunion____missing_field_name_237 { void *arg; const struct kparam_string *str; const struct kparam_array *arr; } ; 62 struct kernel_param { const char *name; struct module *mod; const struct kernel_param_ops *ops; const u16 perm; s8 level; u8 flags; union __anonunion____missing_field_name_237 __annonCompField51; } ; 83 struct kparam_string { unsigned int maxlen; char *string; } ; 89 struct kparam_array { unsigned int max; unsigned int elemsize; unsigned int *num; const struct kernel_param_ops *ops; void *elem; } ; 470 struct exception_table_entry ; 24 struct latch_tree_node { struct rb_node node[2U]; } ; 211 struct mod_arch_specific { } ; 39 struct module_param_attrs ; 39 struct module_kobject { struct kobject kobj; struct module *mod; struct kobject *drivers_dir; struct module_param_attrs *mp; struct completion *kobj_completion; } ; 50 struct module_attribute { struct attribute attr; ssize_t (*show)(struct module_attribute *, struct module_kobject *, char *); ssize_t (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t ); void (*setup)(struct module *, const char *); int (*test)(struct module *); void (*free)(struct module *); } ; 277 enum module_state { MODULE_STATE_LIVE = 0, MODULE_STATE_COMING = 1, MODULE_STATE_GOING = 2, MODULE_STATE_UNFORMED = 3 } ; 284 struct mod_tree_node { struct module *mod; struct latch_tree_node node; } ; 291 struct module_layout { void *base; unsigned int size; unsigned int text_size; unsigned int ro_size; unsigned int ro_after_init_size; struct mod_tree_node mtn; } ; 307 struct mod_kallsyms { Elf64_Sym *symtab; unsigned int num_symtab; char *strtab; } ; 321 struct klp_modinfo { Elf64_Ehdr hdr; Elf64_Shdr *sechdrs; char *secstrings; unsigned int symndx; } ; 329 struct module_sect_attrs ; 329 struct module_notes_attrs ; 329 struct trace_event_call ; 329 struct trace_enum_map ; 329 struct module { enum module_state state; struct list_head list; char name[56U]; struct module_kobject mkobj; struct module_attribute *modinfo_attrs; const char *version; const char *srcversion; struct kobject *holders_dir; const struct kernel_symbol *syms; const unsigned long *crcs; unsigned int num_syms; struct mutex param_lock; struct kernel_param *kp; unsigned int num_kp; unsigned int num_gpl_syms; const struct kernel_symbol *gpl_syms; const unsigned long *gpl_crcs; const struct kernel_symbol *unused_syms; const unsigned long *unused_crcs; unsigned int num_unused_syms; unsigned int num_unused_gpl_syms; const struct kernel_symbol *unused_gpl_syms; const unsigned long *unused_gpl_crcs; bool sig_ok; bool async_probe_requested; const struct kernel_symbol *gpl_future_syms; const unsigned long *gpl_future_crcs; unsigned int num_gpl_future_syms; unsigned int num_exentries; struct exception_table_entry *extable; int (*init)(); struct module_layout core_layout; struct module_layout init_layout; struct mod_arch_specific arch; unsigned int taints; unsigned int num_bugs; struct list_head bug_list; struct bug_entry *bug_table; struct mod_kallsyms *kallsyms; struct mod_kallsyms core_kallsyms; struct module_sect_attrs *sect_attrs; struct module_notes_attrs *notes_attrs; char *args; void *percpu; unsigned int percpu_size; unsigned int num_tracepoints; const struct tracepoint **tracepoints_ptrs; unsigned int num_trace_bprintk_fmt; const char **trace_bprintk_fmt_start; struct trace_event_call **trace_events; unsigned int num_trace_events; struct trace_enum_map **trace_enums; unsigned int num_trace_enums; bool klp; bool klp_alive; struct klp_modinfo *klp_info; struct list_head source_list; struct list_head target_list; void (*exit)(); atomic_t refcnt; ctor_fn_t (**ctors)(); unsigned int num_ctors; } ; 799 struct percpu_ref ; 55 typedef void percpu_ref_func_t(struct percpu_ref *); 68 struct percpu_ref { atomic_long_t count; unsigned long percpu_count_ptr; percpu_ref_func_t *release; percpu_ref_func_t *confirm_switch; bool force_atomic; struct callback_head rcu; } ; 93 struct shrink_control { gfp_t gfp_mask; unsigned long nr_to_scan; int nid; struct mem_cgroup *memcg; } ; 27 struct shrinker { unsigned long int (*count_objects)(struct shrinker *, struct shrink_control *); unsigned long int (*scan_objects)(struct shrinker *, struct shrink_control *); int seeks; long batch; unsigned long flags; struct list_head list; atomic_long_t *nr_deferred; } ; 41 struct rlimit { __kernel_ulong_t rlim_cur; __kernel_ulong_t rlim_max; } ; 182 struct file_ra_state ; 183 struct user_struct ; 184 struct writeback_control ; 185 struct bdi_writeback ; 273 struct vm_fault { unsigned int flags; gfp_t gfp_mask; unsigned long pgoff; void *virtual_address; struct page *cow_page; struct page *page; void *entry; } ; 308 struct fault_env { struct vm_area_struct *vma; unsigned long address; unsigned int flags; pmd_t *pmd; pte_t *pte; spinlock_t *ptl; pgtable_t prealloc_pte; } ; 335 struct vm_operations_struct { void (*open)(struct vm_area_struct *); void (*close)(struct vm_area_struct *); int (*mremap)(struct vm_area_struct *); int (*fault)(struct vm_area_struct *, struct vm_fault *); int (*pmd_fault)(struct vm_area_struct *, unsigned long, pmd_t *, unsigned int); void (*map_pages)(struct fault_env *, unsigned long, unsigned long); int (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *); int (*pfn_mkwrite)(struct vm_area_struct *, struct vm_fault *); int (*access)(struct vm_area_struct *, unsigned long, void *, int, int); const char * (*name)(struct vm_area_struct *); int (*set_policy)(struct vm_area_struct *, struct mempolicy *); struct mempolicy * (*get_policy)(struct vm_area_struct *, unsigned long); struct page * (*find_special_page)(struct vm_area_struct *, unsigned long); } ; 1348 struct kvec ; 2451 struct scatterlist { unsigned long sg_magic; unsigned long page_link; unsigned int offset; unsigned int length; dma_addr_t dma_address; unsigned int dma_length; } ; 21 struct sg_table { struct scatterlist *sgl; unsigned int nents; unsigned int orig_nents; } ; 406 struct iovec { void *iov_base; __kernel_size_t iov_len; } ; 21 struct kvec { void *iov_base; size_t iov_len; } ; 27 union __anonunion____missing_field_name_262 { const struct iovec *iov; const struct kvec *kvec; const struct bio_vec *bvec; } ; 27 struct iov_iter { int type; size_t iov_offset; size_t count; union __anonunion____missing_field_name_262 __annonCompField52; unsigned long nr_segs; } ; 11 typedef unsigned short __kernel_sa_family_t; 12 struct __kernel_sockaddr_storage { __kernel_sa_family_t ss_family; char __data[126U]; } ; 18 struct pid ; 23 typedef __kernel_sa_family_t sa_family_t; 24 struct sockaddr { sa_family_t sa_family; char sa_data[14U]; } ; 38 struct kiocb ; 38 struct msghdr { void *msg_name; int msg_namelen; struct iov_iter msg_iter; void *msg_control; __kernel_size_t msg_controllen; unsigned int msg_flags; struct kiocb *msg_iocb; } ; 104 union __anonunion_in6_u_263 { __u8 u6_addr8[16U]; __be16 u6_addr16[8U]; __be32 u6_addr32[4U]; } ; 104 struct in6_addr { union __anonunion_in6_u_263 in6_u; } ; 46 struct klist_node ; 37 struct klist_node { void *n_klist; struct list_head n_node; struct kref n_ref; } ; 580 struct hlist_bl_node ; 580 struct hlist_bl_head { struct hlist_bl_node *first; } ; 36 struct hlist_bl_node { struct hlist_bl_node *next; struct hlist_bl_node **pprev; } ; 114 struct __anonstruct____missing_field_name_307 { spinlock_t lock; int count; } ; 114 union __anonunion____missing_field_name_306 { struct __anonstruct____missing_field_name_307 __annonCompField53; } ; 114 struct lockref { union __anonunion____missing_field_name_306 __annonCompField54; } ; 77 struct path ; 78 struct vfsmount ; 79 struct __anonstruct____missing_field_name_309 { u32 hash; u32 len; } ; 79 union __anonunion____missing_field_name_308 { struct __anonstruct____missing_field_name_309 __annonCompField55; u64 hash_len; } ; 79 struct qstr { union __anonunion____missing_field_name_308 __annonCompField56; const unsigned char *name; } ; 65 struct dentry_operations ; 65 union __anonunion____missing_field_name_310 { struct list_head d_lru; wait_queue_head_t *d_wait; } ; 65 union __anonunion_d_u_311 { struct hlist_node d_alias; struct hlist_bl_node d_in_lookup_hash; struct callback_head d_rcu; } ; 65 struct dentry { unsigned int d_flags; seqcount_t d_seq; struct hlist_bl_node d_hash; struct dentry *d_parent; struct qstr d_name; struct inode *d_inode; unsigned char d_iname[32U]; struct lockref d_lockref; const struct dentry_operations *d_op; struct super_block *d_sb; unsigned long d_time; void *d_fsdata; union __anonunion____missing_field_name_310 __annonCompField57; struct list_head d_child; struct list_head d_subdirs; union __anonunion_d_u_311 d_u; } ; 121 struct dentry_operations { int (*d_revalidate)(struct dentry *, unsigned int); int (*d_weak_revalidate)(struct dentry *, unsigned int); int (*d_hash)(const struct dentry *, struct qstr *); int (*d_compare)(const struct dentry *, unsigned int, const char *, const struct qstr *); int (*d_delete)(const struct dentry *); int (*d_init)(struct dentry *); void (*d_release)(struct dentry *); void (*d_prune)(struct dentry *); void (*d_iput)(struct dentry *, struct inode *); char * (*d_dname)(struct dentry *, char *, int); struct vfsmount * (*d_automount)(struct path *); int (*d_manage)(struct dentry *, bool ); struct dentry * (*d_real)(struct dentry *, const struct inode *, unsigned int); } ; 591 struct path { struct vfsmount *mnt; struct dentry *dentry; } ; 27 struct list_lru_one { struct list_head list; long nr_items; } ; 32 struct list_lru_memcg { struct list_lru_one *lru[0U]; } ; 37 struct list_lru_node { spinlock_t lock; struct list_lru_one lru; struct list_lru_memcg *memcg_lrus; } ; 47 struct list_lru { struct list_lru_node *node; struct list_head list; } ; 63 struct __anonstruct____missing_field_name_313 { struct radix_tree_node *parent; void *private_data; } ; 63 union __anonunion____missing_field_name_312 { struct __anonstruct____missing_field_name_313 __annonCompField58; struct callback_head callback_head; } ; 63 struct radix_tree_node { unsigned char shift; unsigned char offset; unsigned int count; union __anonunion____missing_field_name_312 __annonCompField59; struct list_head private_list; void *slots[64U]; unsigned long tags[3U][1U]; } ; 106 struct radix_tree_root { gfp_t gfp_mask; struct radix_tree_node *rnode; } ; 523 enum pid_type { PIDTYPE_PID = 0, PIDTYPE_PGID = 1, PIDTYPE_SID = 2, PIDTYPE_MAX = 3 } ; 530 struct pid_namespace ; 530 struct upid { int nr; struct pid_namespace *ns; struct hlist_node pid_chain; } ; 56 struct pid { atomic_t count; unsigned int level; struct hlist_head tasks[3U]; struct callback_head rcu; struct upid numbers[1U]; } ; 68 struct pid_link { struct hlist_node node; struct pid *pid; } ; 22 struct kernel_cap_struct { __u32 cap[2U]; } ; 25 typedef struct kernel_cap_struct kernel_cap_t; 248 struct semaphore { raw_spinlock_t lock; unsigned int count; struct list_head wait_list; } ; 45 struct fiemap_extent { __u64 fe_logical; __u64 fe_physical; __u64 fe_length; __u64 fe_reserved64[2U]; __u32 fe_flags; __u32 fe_reserved[3U]; } ; 38 enum migrate_mode { MIGRATE_ASYNC = 0, MIGRATE_SYNC_LIGHT = 1, MIGRATE_SYNC = 2 } ; 44 enum rcu_sync_type { RCU_SYNC = 0, RCU_SCHED_SYNC = 1, RCU_BH_SYNC = 2 } ; 50 struct rcu_sync { int gp_state; int gp_count; wait_queue_head_t gp_wait; int cb_state; struct callback_head cb_head; enum rcu_sync_type gp_type; } ; 65 struct percpu_rw_semaphore { struct rcu_sync rss; unsigned int *fast_read_ctr; struct rw_semaphore rw_sem; atomic_t slow_read_ctr; wait_queue_head_t write_waitq; } ; 54 struct bio_vec { struct page *bv_page; unsigned int bv_len; unsigned int bv_offset; } ; 34 struct bvec_iter { sector_t bi_sector; unsigned int bi_size; unsigned int bi_idx; unsigned int bi_bvec_done; } ; 84 struct bio_set ; 85 struct bio ; 86 struct bio_integrity_payload ; 87 struct block_device ; 88 struct io_context ; 89 struct cgroup_subsys_state ; 18 typedef void bio_end_io_t(struct bio *); 20 union __anonunion____missing_field_name_321 { struct bio_integrity_payload *bi_integrity; } ; 20 struct bio { struct bio *bi_next; struct block_device *bi_bdev; int bi_error; unsigned int bi_opf; unsigned short bi_flags; unsigned short bi_ioprio; struct bvec_iter bi_iter; unsigned int bi_phys_segments; unsigned int bi_seg_front_size; unsigned int bi_seg_back_size; atomic_t __bi_remaining; bio_end_io_t *bi_end_io; void *bi_private; struct io_context *bi_ioc; struct cgroup_subsys_state *bi_css; union __anonunion____missing_field_name_321 __annonCompField60; unsigned short bi_vcnt; unsigned short bi_max_vecs; atomic_t __bi_cnt; struct bio_vec *bi_io_vec; struct bio_set *bi_pool; struct bio_vec bi_inline_vecs[0U]; } ; 243 typedef unsigned int blk_qc_t; 266 struct delayed_call { void (*fn)(void *); void *arg; } ; 261 struct backing_dev_info ; 262 struct export_operations ; 263 struct hd_geometry ; 264 struct pipe_inode_info ; 265 struct poll_table_struct ; 266 struct kstatfs ; 267 struct swap_info_struct ; 268 struct fscrypt_info ; 269 struct fscrypt_operations ; 76 struct iattr { unsigned int ia_valid; umode_t ia_mode; kuid_t ia_uid; kgid_t ia_gid; loff_t ia_size; struct timespec ia_atime; struct timespec ia_mtime; struct timespec ia_ctime; struct file *ia_file; } ; 261 struct percpu_counter { raw_spinlock_t lock; s64 count; struct list_head list; s32 *counters; } ; 213 struct dquot ; 214 struct kqid ; 19 typedef __kernel_uid32_t projid_t; 23 struct __anonstruct_kprojid_t_322 { projid_t val; } ; 23 typedef struct __anonstruct_kprojid_t_322 kprojid_t; 181 enum quota_type { USRQUOTA = 0, GRPQUOTA = 1, PRJQUOTA = 2 } ; 66 typedef long long qsize_t; 67 union __anonunion____missing_field_name_323 { kuid_t uid; kgid_t gid; kprojid_t projid; } ; 67 struct kqid { union __anonunion____missing_field_name_323 __annonCompField61; enum quota_type type; } ; 194 struct mem_dqblk { qsize_t dqb_bhardlimit; qsize_t dqb_bsoftlimit; qsize_t dqb_curspace; qsize_t dqb_rsvspace; qsize_t dqb_ihardlimit; qsize_t dqb_isoftlimit; qsize_t dqb_curinodes; time64_t dqb_btime; time64_t dqb_itime; } ; 216 struct quota_format_type ; 217 struct mem_dqinfo { struct quota_format_type *dqi_format; int dqi_fmt_id; struct list_head dqi_dirty_list; unsigned long dqi_flags; unsigned int dqi_bgrace; unsigned int dqi_igrace; qsize_t dqi_max_spc_limit; qsize_t dqi_max_ino_limit; void *dqi_priv; } ; 282 struct dquot { struct hlist_node dq_hash; struct list_head dq_inuse; struct list_head dq_free; struct list_head dq_dirty; struct mutex dq_lock; atomic_t dq_count; wait_queue_head_t dq_wait_unused; struct super_block *dq_sb; struct kqid dq_id; loff_t dq_off; unsigned long dq_flags; struct mem_dqblk dq_dqb; } ; 309 struct quota_format_ops { int (*check_quota_file)(struct super_block *, int); int (*read_file_info)(struct super_block *, int); int (*write_file_info)(struct super_block *, int); int (*free_file_info)(struct super_block *, int); int (*read_dqblk)(struct dquot *); int (*commit_dqblk)(struct dquot *); int (*release_dqblk)(struct dquot *); int (*get_next_id)(struct super_block *, struct kqid *); } ; 321 struct dquot_operations { int (*write_dquot)(struct dquot *); struct dquot * (*alloc_dquot)(struct super_block *, int); void (*destroy_dquot)(struct dquot *); int (*acquire_dquot)(struct dquot *); int (*release_dquot)(struct dquot *); int (*mark_dirty)(struct dquot *); int (*write_info)(struct super_block *, int); qsize_t * (*get_reserved_space)(struct inode *); int (*get_projid)(struct inode *, kprojid_t *); int (*get_next_id)(struct super_block *, struct kqid *); } ; 338 struct qc_dqblk { int d_fieldmask; u64 d_spc_hardlimit; u64 d_spc_softlimit; u64 d_ino_hardlimit; u64 d_ino_softlimit; u64 d_space; u64 d_ino_count; s64 d_ino_timer; s64 d_spc_timer; int d_ino_warns; int d_spc_warns; u64 d_rt_spc_hardlimit; u64 d_rt_spc_softlimit; u64 d_rt_space; s64 d_rt_spc_timer; int d_rt_spc_warns; } ; 361 struct qc_type_state { unsigned int flags; unsigned int spc_timelimit; unsigned int ino_timelimit; unsigned int rt_spc_timelimit; unsigned int spc_warnlimit; unsigned int ino_warnlimit; unsigned int rt_spc_warnlimit; unsigned long long ino; blkcnt_t blocks; blkcnt_t nextents; } ; 407 struct qc_state { unsigned int s_incoredqs; struct qc_type_state s_state[3U]; } ; 418 struct qc_info { int i_fieldmask; unsigned int i_flags; unsigned int i_spc_timelimit; unsigned int i_ino_timelimit; unsigned int i_rt_spc_timelimit; unsigned int i_spc_warnlimit; unsigned int i_ino_warnlimit; unsigned int i_rt_spc_warnlimit; } ; 431 struct quotactl_ops { int (*quota_on)(struct super_block *, int, int, struct path *); int (*quota_off)(struct super_block *, int); int (*quota_enable)(struct super_block *, unsigned int); int (*quota_disable)(struct super_block *, unsigned int); int (*quota_sync)(struct super_block *, int); int (*set_info)(struct super_block *, int, struct qc_info *); int (*get_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *); int (*get_nextdqblk)(struct super_block *, struct kqid *, struct qc_dqblk *); int (*set_dqblk)(struct super_block *, struct kqid , struct qc_dqblk *); int (*get_state)(struct super_block *, struct qc_state *); int (*rm_xquota)(struct super_block *, unsigned int); } ; 447 struct quota_format_type { int qf_fmt_id; const struct quota_format_ops *qf_ops; struct module *qf_owner; struct quota_format_type *qf_next; } ; 511 struct quota_info { unsigned int flags; struct mutex dqio_mutex; struct mutex dqonoff_mutex; struct inode *files[3U]; struct mem_dqinfo info[3U]; const struct quota_format_ops *ops[3U]; } ; 541 struct kiocb { struct file *ki_filp; loff_t ki_pos; void (*ki_complete)(struct kiocb *, long, long); void *private; int ki_flags; } ; 367 struct address_space_operations { int (*writepage)(struct page *, struct writeback_control *); int (*readpage)(struct file *, struct page *); int (*writepages)(struct address_space *, struct writeback_control *); int (*set_page_dirty)(struct page *); int (*readpages)(struct file *, struct address_space *, struct list_head *, unsigned int); int (*write_begin)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page **, void **); int (*write_end)(struct file *, struct address_space *, loff_t , unsigned int, unsigned int, struct page *, void *); sector_t (*bmap)(struct address_space *, sector_t ); void (*invalidatepage)(struct page *, unsigned int, unsigned int); int (*releasepage)(struct page *, gfp_t ); void (*freepage)(struct page *); ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *); int (*migratepage)(struct address_space *, struct page *, struct page *, enum migrate_mode ); bool (*isolate_page)(struct page *, isolate_mode_t ); void (*putback_page)(struct page *); int (*launder_page)(struct page *); int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long); void (*is_dirty_writeback)(struct page *, bool *, bool *); int (*error_remove_page)(struct address_space *, struct page *); int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *); void (*swap_deactivate)(struct file *); } ; 426 struct address_space { struct inode *host; struct radix_tree_root page_tree; spinlock_t tree_lock; atomic_t i_mmap_writable; struct rb_root i_mmap; struct rw_semaphore i_mmap_rwsem; unsigned long nrpages; unsigned long nrexceptional; unsigned long writeback_index; const struct address_space_operations *a_ops; unsigned long flags; spinlock_t private_lock; struct list_head private_list; void *private_data; } ; 447 struct request_queue ; 448 struct hd_struct ; 448 struct gendisk ; 448 struct block_device { dev_t bd_dev; int bd_openers; struct inode *bd_inode; struct super_block *bd_super; struct mutex bd_mutex; void *bd_claiming; void *bd_holder; int bd_holders; bool bd_write_holder; struct list_head bd_holder_disks; struct block_device *bd_contains; unsigned int bd_block_size; struct hd_struct *bd_part; unsigned int bd_part_count; int bd_invalidated; struct gendisk *bd_disk; struct request_queue *bd_queue; struct list_head bd_list; unsigned long bd_private; int bd_fsfreeze_count; struct mutex bd_fsfreeze_mutex; } ; 563 struct posix_acl ; 589 struct inode_operations ; 589 union __anonunion____missing_field_name_328 { const unsigned int i_nlink; unsigned int __i_nlink; } ; 589 union __anonunion____missing_field_name_329 { struct hlist_head i_dentry; struct callback_head i_rcu; } ; 589 struct file_lock_context ; 589 struct cdev ; 589 union __anonunion____missing_field_name_330 { struct pipe_inode_info *i_pipe; struct block_device *i_bdev; struct cdev *i_cdev; char *i_link; unsigned int i_dir_seq; } ; 589 struct inode { umode_t i_mode; unsigned short i_opflags; kuid_t i_uid; kgid_t i_gid; unsigned int i_flags; struct posix_acl *i_acl; struct posix_acl *i_default_acl; const struct inode_operations *i_op; struct super_block *i_sb; struct address_space *i_mapping; void *i_security; unsigned long i_ino; union __anonunion____missing_field_name_328 __annonCompField62; dev_t i_rdev; loff_t i_size; struct timespec i_atime; struct timespec i_mtime; struct timespec i_ctime; spinlock_t i_lock; unsigned short i_bytes; unsigned int i_blkbits; blkcnt_t i_blocks; unsigned long i_state; struct rw_semaphore i_rwsem; unsigned long dirtied_when; unsigned long dirtied_time_when; struct hlist_node i_hash; struct list_head i_io_list; struct bdi_writeback *i_wb; int i_wb_frn_winner; u16 i_wb_frn_avg_time; u16 i_wb_frn_history; struct list_head i_lru; struct list_head i_sb_list; struct list_head i_wb_list; union __anonunion____missing_field_name_329 __annonCompField63; u64 i_version; atomic_t i_count; atomic_t i_dio_count; atomic_t i_writecount; atomic_t i_readcount; const struct file_operations *i_fop; struct file_lock_context *i_flctx; struct address_space i_data; struct list_head i_devices; union __anonunion____missing_field_name_330 __annonCompField64; __u32 i_generation; __u32 i_fsnotify_mask; struct hlist_head i_fsnotify_marks; struct fscrypt_info *i_crypt_info; void *i_private; } ; 843 struct fown_struct { rwlock_t lock; struct pid *pid; enum pid_type pid_type; kuid_t uid; kuid_t euid; int signum; } ; 851 struct file_ra_state { unsigned long start; unsigned int size; unsigned int async_size; unsigned int ra_pages; unsigned int mmap_miss; loff_t prev_pos; } ; 874 union __anonunion_f_u_331 { struct llist_node fu_llist; struct callback_head fu_rcuhead; } ; 874 struct file { union __anonunion_f_u_331 f_u; struct path f_path; struct inode *f_inode; const struct file_operations *f_op; spinlock_t f_lock; atomic_long_t f_count; unsigned int f_flags; fmode_t f_mode; struct mutex f_pos_lock; loff_t f_pos; struct fown_struct f_owner; const struct cred *f_cred; struct file_ra_state f_ra; u64 f_version; void *f_security; void *private_data; struct list_head f_ep_links; struct list_head f_tfile_llink; struct address_space *f_mapping; } ; 959 typedef void *fl_owner_t; 960 struct file_lock ; 961 struct file_lock_operations { void (*fl_copy_lock)(struct file_lock *, struct file_lock *); void (*fl_release_private)(struct file_lock *); } ; 967 struct lock_manager_operations { int (*lm_compare_owner)(struct file_lock *, struct file_lock *); unsigned long int (*lm_owner_key)(struct file_lock *); fl_owner_t (*lm_get_owner)(fl_owner_t ); void (*lm_put_owner)(fl_owner_t ); void (*lm_notify)(struct file_lock *); int (*lm_grant)(struct file_lock *, int); bool (*lm_break)(struct file_lock *); int (*lm_change)(struct file_lock *, int, struct list_head *); void (*lm_setup)(struct file_lock *, void **); } ; 988 struct net ; 994 struct nlm_lockowner ; 995 struct nfs_lock_info { u32 state; struct nlm_lockowner *owner; struct list_head list; } ; 14 struct nfs4_lock_state ; 15 struct nfs4_lock_info { struct nfs4_lock_state *owner; } ; 19 struct fasync_struct ; 19 struct __anonstruct_afs_333 { struct list_head link; int state; } ; 19 union __anonunion_fl_u_332 { struct nfs_lock_info nfs_fl; struct nfs4_lock_info nfs4_fl; struct __anonstruct_afs_333 afs; } ; 19 struct file_lock { struct file_lock *fl_next; struct list_head fl_list; struct hlist_node fl_link; struct list_head fl_block; fl_owner_t fl_owner; unsigned int fl_flags; unsigned char fl_type; unsigned int fl_pid; int fl_link_cpu; struct pid *fl_nspid; wait_queue_head_t fl_wait; struct file *fl_file; loff_t fl_start; loff_t fl_end; struct fasync_struct *fl_fasync; unsigned long fl_break_time; unsigned long fl_downgrade_time; const struct file_lock_operations *fl_ops; const struct lock_manager_operations *fl_lmops; union __anonunion_fl_u_332 fl_u; } ; 1047 struct file_lock_context { spinlock_t flc_lock; struct list_head flc_flock; struct list_head flc_posix; struct list_head flc_lease; } ; 1102 struct files_struct ; 1255 struct fasync_struct { spinlock_t fa_lock; int magic; int fa_fd; struct fasync_struct *fa_next; struct file *fa_file; struct callback_head fa_rcu; } ; 1290 struct sb_writers { int frozen; wait_queue_head_t wait_unfrozen; struct percpu_rw_semaphore rw_sem[3U]; } ; 1320 struct super_operations ; 1320 struct xattr_handler ; 1320 struct mtd_info ; 1320 struct super_block { struct list_head s_list; dev_t s_dev; unsigned char s_blocksize_bits; unsigned long s_blocksize; loff_t s_maxbytes; struct file_system_type *s_type; const struct super_operations *s_op; const struct dquot_operations *dq_op; const struct quotactl_ops *s_qcop; const struct export_operations *s_export_op; unsigned long s_flags; unsigned long s_iflags; unsigned long s_magic; struct dentry *s_root; struct rw_semaphore s_umount; int s_count; atomic_t s_active; void *s_security; const struct xattr_handler **s_xattr; const struct fscrypt_operations *s_cop; struct hlist_bl_head s_anon; struct list_head s_mounts; struct block_device *s_bdev; struct backing_dev_info *s_bdi; struct mtd_info *s_mtd; struct hlist_node s_instances; unsigned int s_quota_types; struct quota_info s_dquot; struct sb_writers s_writers; char s_id[32U]; u8 s_uuid[16U]; void *s_fs_info; unsigned int s_max_links; fmode_t s_mode; u32 s_time_gran; struct mutex s_vfs_rename_mutex; char *s_subtype; char *s_options; const struct dentry_operations *s_d_op; int cleancache_poolid; struct shrinker s_shrink; atomic_long_t s_remove_count; int s_readonly_remount; struct workqueue_struct *s_dio_done_wq; struct hlist_head s_pins; struct user_namespace *s_user_ns; struct list_lru s_dentry_lru; struct list_lru s_inode_lru; struct callback_head rcu; struct work_struct destroy_work; struct mutex s_sync_lock; int s_stack_depth; spinlock_t s_inode_list_lock; struct list_head s_inodes; spinlock_t s_inode_wblist_lock; struct list_head s_inodes_wb; } ; 1603 struct fiemap_extent_info { unsigned int fi_flags; unsigned int fi_extents_mapped; unsigned int fi_extents_max; struct fiemap_extent *fi_extents_start; } ; 1616 struct dir_context ; 1641 struct dir_context { int (*actor)(struct dir_context *, const char *, int, loff_t , u64 , unsigned int); loff_t pos; } ; 1647 struct block_device_operations ; 1648 struct file_operations { struct module *owner; loff_t (*llseek)(struct file *, loff_t , int); ssize_t (*read)(struct file *, char *, size_t , loff_t *); ssize_t (*write)(struct file *, const char *, size_t , loff_t *); ssize_t (*read_iter)(struct kiocb *, struct iov_iter *); ssize_t (*write_iter)(struct kiocb *, struct iov_iter *); int (*iterate)(struct file *, struct dir_context *); int (*iterate_shared)(struct file *, struct dir_context *); unsigned int (*poll)(struct file *, struct poll_table_struct *); long int (*unlocked_ioctl)(struct file *, unsigned int, unsigned long); long int (*compat_ioctl)(struct file *, unsigned int, unsigned long); int (*mmap)(struct file *, struct vm_area_struct *); int (*open)(struct inode *, struct file *); int (*flush)(struct file *, fl_owner_t ); int (*release)(struct inode *, struct file *); int (*fsync)(struct file *, loff_t , loff_t , int); int (*aio_fsync)(struct kiocb *, int); int (*fasync)(int, struct file *, int); int (*lock)(struct file *, int, struct file_lock *); ssize_t (*sendpage)(struct file *, struct page *, int, size_t , loff_t *, int); unsigned long int (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); int (*check_flags)(int); int (*flock)(struct file *, int, struct file_lock *); ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t , unsigned int); ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t , unsigned int); int (*setlease)(struct file *, long, struct file_lock **, void **); long int (*fallocate)(struct file *, int, loff_t , loff_t ); void (*show_fdinfo)(struct seq_file *, struct file *); ssize_t (*copy_file_range)(struct file *, loff_t , struct file *, loff_t , size_t , unsigned int); int (*clone_file_range)(struct file *, loff_t , struct file *, loff_t , u64 ); ssize_t (*dedupe_file_range)(struct file *, u64 , u64 , struct file *, u64 ); } ; 1717 struct inode_operations { struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int); const char * (*get_link)(struct dentry *, struct inode *, struct delayed_call *); int (*permission)(struct inode *, int); struct posix_acl * (*get_acl)(struct inode *, int); int (*readlink)(struct dentry *, char *, int); int (*create)(struct inode *, struct dentry *, umode_t , bool ); int (*link)(struct dentry *, struct inode *, struct dentry *); int (*unlink)(struct inode *, struct dentry *); int (*symlink)(struct inode *, struct dentry *, const char *); int (*mkdir)(struct inode *, struct dentry *, umode_t ); int (*rmdir)(struct inode *, struct dentry *); int (*mknod)(struct inode *, struct dentry *, umode_t , dev_t ); int (*rename)(struct inode *, struct dentry *, struct inode *, struct dentry *); int (*rename2)(struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); int (*setattr)(struct dentry *, struct iattr *); int (*getattr)(struct vfsmount *, struct dentry *, struct kstat *); int (*setxattr)(struct dentry *, struct inode *, const char *, const void *, size_t , int); ssize_t (*getxattr)(struct dentry *, struct inode *, const char *, void *, size_t ); ssize_t (*listxattr)(struct dentry *, char *, size_t ); int (*removexattr)(struct dentry *, const char *); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 , u64 ); int (*update_time)(struct inode *, struct timespec *, int); int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t , int *); int (*tmpfile)(struct inode *, struct dentry *, umode_t ); int (*set_acl)(struct inode *, struct posix_acl *, int); } ; 1774 struct super_operations { struct inode * (*alloc_inode)(struct super_block *); void (*destroy_inode)(struct inode *); void (*dirty_inode)(struct inode *, int); int (*write_inode)(struct inode *, struct writeback_control *); int (*drop_inode)(struct inode *); void (*evict_inode)(struct inode *); void (*put_super)(struct super_block *); int (*sync_fs)(struct super_block *, int); int (*freeze_super)(struct super_block *); int (*freeze_fs)(struct super_block *); int (*thaw_super)(struct super_block *); int (*unfreeze_fs)(struct super_block *); int (*statfs)(struct dentry *, struct kstatfs *); int (*remount_fs)(struct super_block *, int *, char *); void (*umount_begin)(struct super_block *); int (*show_options)(struct seq_file *, struct dentry *); int (*show_devname)(struct seq_file *, struct dentry *); int (*show_path)(struct seq_file *, struct dentry *); int (*show_stats)(struct seq_file *, struct dentry *); ssize_t (*quota_read)(struct super_block *, int, char *, size_t , loff_t ); ssize_t (*quota_write)(struct super_block *, int, const char *, size_t , loff_t ); struct dquot ** (*get_dquots)(struct inode *); int (*bdev_try_to_free_page)(struct super_block *, struct page *, gfp_t ); long int (*nr_cached_objects)(struct super_block *, struct shrink_control *); long int (*free_cached_objects)(struct super_block *, struct shrink_control *); } ; 2018 struct file_system_type { const char *name; int fs_flags; struct dentry * (*mount)(struct file_system_type *, int, const char *, void *); void (*kill_sb)(struct super_block *); struct module *owner; struct file_system_type *next; struct hlist_head fs_supers; struct lock_class_key s_lock_key; struct lock_class_key s_umount_key; struct lock_class_key s_vfs_rename_key; struct lock_class_key s_writers_key[3U]; struct lock_class_key i_lock_key; struct lock_class_key i_mutex_key; struct lock_class_key i_mutex_dir_key; } ; 3193 struct assoc_array_ptr ; 3193 struct assoc_array { struct assoc_array_ptr *root; unsigned long nr_leaves_on_tree; } ; 31 typedef int32_t key_serial_t; 34 typedef uint32_t key_perm_t; 35 struct key ; 36 struct signal_struct ; 37 struct key_type ; 41 struct keyring_index_key { struct key_type *type; const char *description; size_t desc_len; } ; 91 union key_payload { void *rcu_data0; void *data[4U]; } ; 128 union __anonunion____missing_field_name_334 { struct list_head graveyard_link; struct rb_node serial_node; } ; 128 struct key_user ; 128 union __anonunion____missing_field_name_335 { time_t expiry; time_t revoked_at; } ; 128 struct __anonstruct____missing_field_name_337 { struct key_type *type; char *description; } ; 128 union __anonunion____missing_field_name_336 { struct keyring_index_key index_key; struct __anonstruct____missing_field_name_337 __annonCompField67; } ; 128 struct __anonstruct____missing_field_name_339 { struct list_head name_link; struct assoc_array keys; } ; 128 union __anonunion____missing_field_name_338 { union key_payload payload; struct __anonstruct____missing_field_name_339 __annonCompField69; int reject_error; } ; 128 struct key { atomic_t usage; key_serial_t serial; union __anonunion____missing_field_name_334 __annonCompField65; struct rw_semaphore sem; struct key_user *user; void *security; union __anonunion____missing_field_name_335 __annonCompField66; time_t last_used_at; kuid_t uid; kgid_t gid; key_perm_t perm; unsigned short quotalen; unsigned short datalen; unsigned long flags; union __anonunion____missing_field_name_336 __annonCompField68; union __anonunion____missing_field_name_338 __annonCompField70; int (*restrict_link)(struct key *, const struct key_type *, const union key_payload *); } ; 377 struct audit_context ; 27 struct group_info { atomic_t usage; int ngroups; int nblocks; kgid_t small_block[32U]; kgid_t *blocks[0U]; } ; 90 struct cred { atomic_t usage; atomic_t subscribers; void *put_addr; unsigned int magic; kuid_t uid; kgid_t gid; kuid_t suid; kgid_t sgid; kuid_t euid; kgid_t egid; kuid_t fsuid; kgid_t fsgid; unsigned int securebits; kernel_cap_t cap_inheritable; kernel_cap_t cap_permitted; kernel_cap_t cap_effective; kernel_cap_t cap_bset; kernel_cap_t cap_ambient; unsigned char jit_keyring; struct key *session_keyring; struct key *process_keyring; struct key *thread_keyring; struct key *request_key_auth; void *security; struct user_struct *user; struct user_namespace *user_ns; struct group_info *group_info; struct callback_head rcu; } ; 377 struct seq_file { char *buf; size_t size; size_t from; size_t count; size_t pad_until; loff_t index; loff_t read_pos; u64 version; struct mutex lock; const struct seq_operations *op; int poll_event; const struct file *file; void *private; } ; 30 struct seq_operations { void * (*start)(struct seq_file *, loff_t *); void (*stop)(struct seq_file *, void *); void * (*next)(struct seq_file *, void *, loff_t *); int (*show)(struct seq_file *, void *); } ; 222 struct pinctrl ; 223 struct pinctrl_state ; 194 struct dev_pin_info { struct pinctrl *p; struct pinctrl_state *default_state; struct pinctrl_state *init_state; struct pinctrl_state *sleep_state; struct pinctrl_state *idle_state; } ; 84 struct plist_node { int prio; struct list_head prio_list; struct list_head node_list; } ; 4 typedef unsigned long cputime_t; 25 struct sem_undo_list ; 25 struct sysv_sem { struct sem_undo_list *undo_list; } ; 26 struct sysv_shm { struct list_head shm_clist; } ; 24 struct __anonstruct_sigset_t_340 { unsigned long sig[1U]; } ; 24 typedef struct __anonstruct_sigset_t_340 sigset_t; 25 struct siginfo ; 17 typedef void __signalfn_t(int); 18 typedef __signalfn_t *__sighandler_t; 20 typedef void __restorefn_t(); 21 typedef __restorefn_t *__sigrestore_t; 34 union sigval { int sival_int; void *sival_ptr; } ; 10 typedef union sigval sigval_t; 11 struct __anonstruct__kill_342 { __kernel_pid_t _pid; __kernel_uid32_t _uid; } ; 11 struct __anonstruct__timer_343 { __kernel_timer_t _tid; int _overrun; char _pad[0U]; sigval_t _sigval; int _sys_private; } ; 11 struct __anonstruct__rt_344 { __kernel_pid_t _pid; __kernel_uid32_t _uid; sigval_t _sigval; } ; 11 struct __anonstruct__sigchld_345 { __kernel_pid_t _pid; __kernel_uid32_t _uid; int _status; __kernel_clock_t _utime; __kernel_clock_t _stime; } ; 11 struct __anonstruct__addr_bnd_348 { void *_lower; void *_upper; } ; 11 union __anonunion____missing_field_name_347 { struct __anonstruct__addr_bnd_348 _addr_bnd; __u32 _pkey; } ; 11 struct __anonstruct__sigfault_346 { void *_addr; short _addr_lsb; union __anonunion____missing_field_name_347 __annonCompField71; } ; 11 struct __anonstruct__sigpoll_349 { long _band; int _fd; } ; 11 struct __anonstruct__sigsys_350 { void *_call_addr; int _syscall; unsigned int _arch; } ; 11 union __anonunion__sifields_341 { int _pad[28U]; struct __anonstruct__kill_342 _kill; struct __anonstruct__timer_343 _timer; struct __anonstruct__rt_344 _rt; struct __anonstruct__sigchld_345 _sigchld; struct __anonstruct__sigfault_346 _sigfault; struct __anonstruct__sigpoll_349 _sigpoll; struct __anonstruct__sigsys_350 _sigsys; } ; 11 struct siginfo { int si_signo; int si_errno; int si_code; union __anonunion__sifields_341 _sifields; } ; 118 typedef struct siginfo siginfo_t; 22 struct sigpending { struct list_head list; sigset_t signal; } ; 257 struct sigaction { __sighandler_t sa_handler; unsigned long sa_flags; __sigrestore_t sa_restorer; sigset_t sa_mask; } ; 271 struct k_sigaction { struct sigaction sa; } ; 43 struct seccomp_filter ; 44 struct seccomp { int mode; struct seccomp_filter *filter; } ; 40 struct rt_mutex_waiter ; 100 struct timerqueue_node { struct rb_node node; ktime_t expires; } ; 12 struct timerqueue_head { struct rb_root head; struct timerqueue_node *next; } ; 50 struct hrtimer_clock_base ; 51 struct hrtimer_cpu_base ; 60 enum hrtimer_restart { HRTIMER_NORESTART = 0, HRTIMER_RESTART = 1 } ; 65 struct hrtimer { struct timerqueue_node node; ktime_t _softexpires; enum hrtimer_restart (*function)(struct hrtimer *); struct hrtimer_clock_base *base; u8 state; u8 is_rel; int start_pid; void *start_site; char start_comm[16U]; } ; 125 struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base; int index; clockid_t clockid; struct timerqueue_head active; ktime_t (*get_time)(); ktime_t offset; } ; 158 struct hrtimer_cpu_base { raw_spinlock_t lock; seqcount_t seq; struct hrtimer *running; unsigned int cpu; unsigned int active_bases; unsigned int clock_was_set_seq; bool migration_enabled; bool nohz_active; unsigned char in_hrtirq; unsigned char hres_active; unsigned char hang_detected; ktime_t expires_next; struct hrtimer *next_timer; unsigned int nr_events; unsigned int nr_retries; unsigned int nr_hangs; unsigned int max_hang_time; struct hrtimer_clock_base clock_base[4U]; } ; 12 enum kcov_mode { KCOV_MODE_DISABLED = 0, KCOV_MODE_TRACE = 1 } ; 17 struct task_io_accounting { u64 rchar; u64 wchar; u64 syscr; u64 syscw; u64 read_bytes; u64 write_bytes; u64 cancelled_write_bytes; } ; 45 struct latency_record { unsigned long backtrace[12U]; unsigned int count; unsigned long time; unsigned long max; } ; 41 struct cgroup ; 42 struct cgroup_root ; 43 struct cgroup_subsys ; 44 struct cgroup_taskset ; 88 struct cgroup_file { struct kernfs_node *kn; } ; 90 struct cgroup_subsys_state { struct cgroup *cgroup; struct cgroup_subsys *ss; struct percpu_ref refcnt; struct cgroup_subsys_state *parent; struct list_head sibling; struct list_head children; int id; unsigned int flags; u64 serial_nr; atomic_t online_cnt; struct callback_head callback_head; struct work_struct destroy_work; } ; 141 struct css_set { atomic_t refcount; struct hlist_node hlist; struct list_head tasks; struct list_head mg_tasks; struct list_head cgrp_links; struct cgroup *dfl_cgrp; struct cgroup_subsys_state *subsys[13U]; struct list_head mg_preload_node; struct list_head mg_node; struct cgroup *mg_src_cgrp; struct cgroup *mg_dst_cgrp; struct css_set *mg_dst_cset; struct list_head e_cset_node[13U]; struct list_head task_iters; bool dead; struct callback_head callback_head; } ; 221 struct cgroup { struct cgroup_subsys_state self; unsigned long flags; int id; int level; int populated_cnt; struct kernfs_node *kn; struct cgroup_file procs_file; struct cgroup_file events_file; u16 subtree_control; u16 subtree_ss_mask; u16 old_subtree_control; u16 old_subtree_ss_mask; struct cgroup_subsys_state *subsys[13U]; struct cgroup_root *root; struct list_head cset_links; struct list_head e_csets[13U]; struct list_head pidlists; struct mutex pidlist_mutex; wait_queue_head_t offline_waitq; struct work_struct release_agent_work; int ancestor_ids[]; } ; 306 struct cgroup_root { struct kernfs_root *kf_root; unsigned int subsys_mask; int hierarchy_id; struct cgroup cgrp; int cgrp_ancestor_id_storage; atomic_t nr_cgrps; struct list_head root_list; unsigned int flags; struct idr cgroup_idr; char release_agent_path[4096U]; char name[64U]; } ; 345 struct cftype { char name[64U]; unsigned long private; size_t max_write_len; unsigned int flags; unsigned int file_offset; struct cgroup_subsys *ss; struct list_head node; struct kernfs_ops *kf_ops; u64 (*read_u64)(struct cgroup_subsys_state *, struct cftype *); s64 (*read_s64)(struct cgroup_subsys_state *, struct cftype *); int (*seq_show)(struct seq_file *, void *); void * (*seq_start)(struct seq_file *, loff_t *); void * (*seq_next)(struct seq_file *, void *, loff_t *); void (*seq_stop)(struct seq_file *, void *); int (*write_u64)(struct cgroup_subsys_state *, struct cftype *, u64 ); int (*write_s64)(struct cgroup_subsys_state *, struct cftype *, s64 ); ssize_t (*write)(struct kernfs_open_file *, char *, size_t , loff_t ); struct lock_class_key lockdep_key; } ; 430 struct cgroup_subsys { struct cgroup_subsys_state * (*css_alloc)(struct cgroup_subsys_state *); int (*css_online)(struct cgroup_subsys_state *); void (*css_offline)(struct cgroup_subsys_state *); void (*css_released)(struct cgroup_subsys_state *); void (*css_free)(struct cgroup_subsys_state *); void (*css_reset)(struct cgroup_subsys_state *); int (*can_attach)(struct cgroup_taskset *); void (*cancel_attach)(struct cgroup_taskset *); void (*attach)(struct cgroup_taskset *); void (*post_attach)(); int (*can_fork)(struct task_struct *); void (*cancel_fork)(struct task_struct *); void (*fork)(struct task_struct *); void (*exit)(struct task_struct *); void (*free)(struct task_struct *); void (*bind)(struct cgroup_subsys_state *); bool early_init; bool implicit_on_dfl; bool broken_hierarchy; bool warned_broken_hierarchy; int id; const char *name; const char *legacy_name; struct cgroup_root *root; struct idr css_idr; struct list_head cfts; struct cftype *dfl_cftypes; struct cftype *legacy_cftypes; unsigned int depends_on; } ; 546 struct __anonstruct____missing_field_name_354 { u8 is_data; u8 padding; u16 prioidx; u32 classid; } ; 546 union __anonunion____missing_field_name_353 { struct __anonstruct____missing_field_name_354 __annonCompField72; u64 val; } ; 546 struct sock_cgroup_data { union __anonunion____missing_field_name_353 __annonCompField73; } ; 128 struct futex_pi_state ; 129 struct robust_list_head ; 130 struct bio_list ; 131 struct fs_struct ; 132 struct perf_event_context ; 133 struct blk_plug ; 134 struct nameidata ; 188 struct cfs_rq ; 189 struct task_group ; 493 struct sighand_struct { atomic_t count; struct k_sigaction action[64U]; spinlock_t siglock; wait_queue_head_t signalfd_wqh; } ; 536 struct pacct_struct { int ac_flag; long ac_exitcode; unsigned long ac_mem; cputime_t ac_utime; cputime_t ac_stime; unsigned long ac_minflt; unsigned long ac_majflt; } ; 544 struct cpu_itimer { cputime_t expires; cputime_t incr; u32 error; u32 incr_error; } ; 551 struct prev_cputime { cputime_t utime; cputime_t stime; raw_spinlock_t lock; } ; 576 struct task_cputime { cputime_t utime; cputime_t stime; unsigned long long sum_exec_runtime; } ; 592 struct task_cputime_atomic { atomic64_t utime; atomic64_t stime; atomic64_t sum_exec_runtime; } ; 614 struct thread_group_cputimer { struct task_cputime_atomic cputime_atomic; bool running; bool checking_timer; } ; 659 struct autogroup ; 660 struct tty_struct ; 660 struct taskstats ; 660 struct tty_audit_buf ; 660 struct signal_struct { atomic_t sigcnt; atomic_t live; int nr_threads; atomic_t oom_victims; struct list_head thread_head; wait_queue_head_t wait_chldexit; struct task_struct *curr_target; struct sigpending shared_pending; int group_exit_code; int notify_count; struct task_struct *group_exit_task; int group_stop_count; unsigned int flags; unsigned char is_child_subreaper; unsigned char has_child_subreaper; int posix_timer_id; struct list_head posix_timers; struct hrtimer real_timer; struct pid *leader_pid; ktime_t it_real_incr; struct cpu_itimer it[2U]; struct thread_group_cputimer cputimer; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; struct pid *tty_old_pgrp; int leader; struct tty_struct *tty; struct autogroup *autogroup; seqlock_t stats_lock; cputime_t utime; cputime_t stime; cputime_t cutime; cputime_t cstime; cputime_t gtime; cputime_t cgtime; struct prev_cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; unsigned long cnvcsw; unsigned long cnivcsw; unsigned long min_flt; unsigned long maj_flt; unsigned long cmin_flt; unsigned long cmaj_flt; unsigned long inblock; unsigned long oublock; unsigned long cinblock; unsigned long coublock; unsigned long maxrss; unsigned long cmaxrss; struct task_io_accounting ioac; unsigned long long sum_sched_runtime; struct rlimit rlim[16U]; struct pacct_struct pacct; struct taskstats *stats; unsigned int audit_tty; struct tty_audit_buf *tty_audit_buf; bool oom_flag_origin; short oom_score_adj; short oom_score_adj_min; struct mutex cred_guard_mutex; } ; 835 struct user_struct { atomic_t __count; atomic_t processes; atomic_t sigpending; atomic_t inotify_watches; atomic_t inotify_devs; atomic_t fanotify_listeners; atomic_long_t epoll_watches; unsigned long mq_bytes; unsigned long locked_shm; unsigned long unix_inflight; atomic_long_t pipe_bufs; struct key *uid_keyring; struct key *session_keyring; struct hlist_node uidhash_node; kuid_t uid; atomic_long_t locked_vm; } ; 880 struct reclaim_state ; 881 struct sched_info { unsigned long pcount; unsigned long long run_delay; unsigned long long last_arrival; unsigned long long last_queued; } ; 896 struct task_delay_info { spinlock_t lock; unsigned int flags; u64 blkio_start; u64 blkio_delay; u64 swapin_delay; u32 blkio_count; u32 swapin_count; u64 freepages_start; u64 freepages_delay; u32 freepages_count; } ; 953 struct wake_q_node { struct wake_q_node *next; } ; 1219 struct uts_namespace ; 1220 struct load_weight { unsigned long weight; u32 inv_weight; } ; 1228 struct sched_avg { u64 last_update_time; u64 load_sum; u32 util_sum; u32 period_contrib; unsigned long load_avg; unsigned long util_avg; } ; 1286 struct sched_statistics { u64 wait_start; u64 wait_max; u64 wait_count; u64 wait_sum; u64 iowait_count; u64 iowait_sum; u64 sleep_start; u64 sleep_max; s64 sum_sleep_runtime; u64 block_start; u64 block_max; u64 exec_max; u64 slice_max; u64 nr_migrations_cold; u64 nr_failed_migrations_affine; u64 nr_failed_migrations_running; u64 nr_failed_migrations_hot; u64 nr_forced_migrations; u64 nr_wakeups; u64 nr_wakeups_sync; u64 nr_wakeups_migrate; u64 nr_wakeups_local; u64 nr_wakeups_remote; u64 nr_wakeups_affine; u64 nr_wakeups_affine_attempts; u64 nr_wakeups_passive; u64 nr_wakeups_idle; } ; 1321 struct sched_entity { struct load_weight load; struct rb_node run_node; struct list_head group_node; unsigned int on_rq; u64 exec_start; u64 sum_exec_runtime; u64 vruntime; u64 prev_sum_exec_runtime; u64 nr_migrations; struct sched_statistics statistics; int depth; struct sched_entity *parent; struct cfs_rq *cfs_rq; struct cfs_rq *my_q; struct sched_avg avg; } ; 1358 struct rt_rq ; 1358 struct sched_rt_entity { struct list_head run_list; unsigned long timeout; unsigned long watchdog_stamp; unsigned int time_slice; unsigned short on_rq; unsigned short on_list; struct sched_rt_entity *back; struct sched_rt_entity *parent; struct rt_rq *rt_rq; struct rt_rq *my_q; } ; 1376 struct sched_dl_entity { struct rb_node rb_node; u64 dl_runtime; u64 dl_deadline; u64 dl_period; u64 dl_bw; s64 runtime; u64 deadline; unsigned int flags; int dl_throttled; int dl_boosted; int dl_yielded; struct hrtimer dl_timer; } ; 1440 struct tlbflush_unmap_batch { struct cpumask cpumask; bool flush_required; bool writable; } ; 1459 struct sched_class ; 1459 struct compat_robust_list_head ; 1459 struct numa_group ; 1459 struct kcov ; 1459 struct task_struct { volatile long state; void *stack; atomic_t usage; unsigned int flags; unsigned int ptrace; struct llist_node wake_entry; int on_cpu; unsigned int wakee_flips; unsigned long wakee_flip_decay_ts; struct task_struct *last_wakee; int wake_cpu; int on_rq; int prio; int static_prio; int normal_prio; unsigned int rt_priority; const struct sched_class *sched_class; struct sched_entity se; struct sched_rt_entity rt; struct task_group *sched_task_group; struct sched_dl_entity dl; struct hlist_head preempt_notifiers; unsigned int policy; int nr_cpus_allowed; cpumask_t cpus_allowed; unsigned long rcu_tasks_nvcsw; bool rcu_tasks_holdout; struct list_head rcu_tasks_holdout_list; int rcu_tasks_idle_cpu; struct sched_info sched_info; struct list_head tasks; struct plist_node pushable_tasks; struct rb_node pushable_dl_tasks; struct mm_struct *mm; struct mm_struct *active_mm; u32 vmacache_seqnum; struct vm_area_struct *vmacache[4U]; struct task_rss_stat rss_stat; int exit_state; int exit_code; int exit_signal; int pdeath_signal; unsigned long jobctl; unsigned int personality; unsigned char sched_reset_on_fork; unsigned char sched_contributes_to_load; unsigned char sched_migrated; unsigned char sched_remote_wakeup; unsigned char; unsigned char in_execve; unsigned char in_iowait; unsigned char restore_sigmask; unsigned char memcg_may_oom; unsigned char memcg_kmem_skip_account; unsigned char brk_randomized; unsigned long atomic_flags; struct restart_block restart_block; pid_t pid; pid_t tgid; struct task_struct *real_parent; struct task_struct *parent; struct list_head children; struct list_head sibling; struct task_struct *group_leader; struct list_head ptraced; struct list_head ptrace_entry; struct pid_link pids[3U]; struct list_head thread_group; struct list_head thread_node; struct completion *vfork_done; int *set_child_tid; int *clear_child_tid; cputime_t utime; cputime_t stime; cputime_t utimescaled; cputime_t stimescaled; cputime_t gtime; struct prev_cputime prev_cputime; unsigned long nvcsw; unsigned long nivcsw; u64 start_time; u64 real_start_time; unsigned long min_flt; unsigned long maj_flt; struct task_cputime cputime_expires; struct list_head cpu_timers[3U]; const struct cred *real_cred; const struct cred *cred; char comm[16U]; struct nameidata *nameidata; struct sysv_sem sysvsem; struct sysv_shm sysvshm; unsigned long last_switch_count; struct fs_struct *fs; struct files_struct *files; struct nsproxy *nsproxy; struct signal_struct *signal; struct sighand_struct *sighand; sigset_t blocked; sigset_t real_blocked; sigset_t saved_sigmask; struct sigpending pending; unsigned long sas_ss_sp; size_t sas_ss_size; unsigned int sas_ss_flags; struct callback_head *task_works; struct audit_context *audit_context; kuid_t loginuid; unsigned int sessionid; struct seccomp seccomp; u32 parent_exec_id; u32 self_exec_id; spinlock_t alloc_lock; raw_spinlock_t pi_lock; struct wake_q_node wake_q; struct rb_root pi_waiters; struct rb_node *pi_waiters_leftmost; struct rt_mutex_waiter *pi_blocked_on; struct mutex_waiter *blocked_on; unsigned int irq_events; unsigned long hardirq_enable_ip; unsigned long hardirq_disable_ip; unsigned int hardirq_enable_event; unsigned int hardirq_disable_event; int hardirqs_enabled; int hardirq_context; unsigned long softirq_disable_ip; unsigned long softirq_enable_ip; unsigned int softirq_disable_event; unsigned int softirq_enable_event; int softirqs_enabled; int softirq_context; u64 curr_chain_key; int lockdep_depth; unsigned int lockdep_recursion; struct held_lock held_locks[48U]; gfp_t lockdep_reclaim_gfp; unsigned int in_ubsan; void *journal_info; struct bio_list *bio_list; struct blk_plug *plug; struct reclaim_state *reclaim_state; struct backing_dev_info *backing_dev_info; struct io_context *io_context; unsigned long ptrace_message; siginfo_t *last_siginfo; struct task_io_accounting ioac; u64 acct_rss_mem1; u64 acct_vm_mem1; cputime_t acct_timexpd; nodemask_t mems_allowed; seqcount_t mems_allowed_seq; int cpuset_mem_spread_rotor; int cpuset_slab_spread_rotor; struct css_set *cgroups; struct list_head cg_list; struct robust_list_head *robust_list; struct compat_robust_list_head *compat_robust_list; struct list_head pi_state_list; struct futex_pi_state *pi_state_cache; struct perf_event_context *perf_event_ctxp[2U]; struct mutex perf_event_mutex; struct list_head perf_event_list; struct mempolicy *mempolicy; short il_next; short pref_node_fork; int numa_scan_seq; unsigned int numa_scan_period; unsigned int numa_scan_period_max; int numa_preferred_nid; unsigned long numa_migrate_retry; u64 node_stamp; u64 last_task_numa_placement; u64 last_sum_exec_runtime; struct callback_head numa_work; struct list_head numa_entry; struct numa_group *numa_group; unsigned long *numa_faults; unsigned long total_numa_faults; unsigned long numa_faults_locality[3U]; unsigned long numa_pages_migrated; struct tlbflush_unmap_batch tlb_ubc; struct callback_head rcu; struct pipe_inode_info *splice_pipe; struct page_frag task_frag; struct task_delay_info *delays; int make_it_fail; int nr_dirtied; int nr_dirtied_pause; unsigned long dirty_paused_when; int latency_record_count; struct latency_record latency_record[32U]; u64 timer_slack_ns; u64 default_timer_slack_ns; unsigned int kasan_depth; unsigned long trace; unsigned long trace_recursion; enum kcov_mode kcov_mode; unsigned int kcov_size; void *kcov_area; struct kcov *kcov; struct mem_cgroup *memcg_in_oom; gfp_t memcg_oom_gfp_mask; int memcg_oom_order; unsigned int memcg_nr_pages_over_high; struct uprobe_task *utask; unsigned int sequential_io; unsigned int sequential_io_avg; unsigned long task_state_change; int pagefault_disabled; struct task_struct *oom_reaper_list; struct thread_struct thread; } ; 76 struct dma_map_ops ; 76 struct dev_archdata { struct dma_map_ops *dma_ops; void *iommu; } ; 24 struct device_private ; 25 struct device_driver ; 26 struct driver_private ; 27 struct class ; 28 struct subsys_private ; 29 struct bus_type ; 30 struct device_node ; 31 struct fwnode_handle ; 32 struct iommu_ops ; 33 struct iommu_group ; 61 struct device_attribute ; 61 struct bus_type { const char *name; const char *dev_name; struct device *dev_root; struct device_attribute *dev_attrs; const struct attribute_group **bus_groups; const struct attribute_group **dev_groups; const struct attribute_group **drv_groups; int (*match)(struct device *, struct device_driver *); int (*uevent)(struct device *, struct kobj_uevent_env *); int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*online)(struct device *); int (*offline)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct dev_pm_ops *pm; const struct iommu_ops *iommu_ops; struct subsys_private *p; struct lock_class_key lock_key; } ; 142 struct device_type ; 201 enum probe_type { PROBE_DEFAULT_STRATEGY = 0, PROBE_PREFER_ASYNCHRONOUS = 1, PROBE_FORCE_SYNCHRONOUS = 2 } ; 207 struct of_device_id ; 207 struct acpi_device_id ; 207 struct device_driver { const char *name; struct bus_type *bus; struct module *owner; const char *mod_name; bool suppress_bind_attrs; enum probe_type probe_type; const struct of_device_id *of_match_table; const struct acpi_device_id *acpi_match_table; int (*probe)(struct device *); int (*remove)(struct device *); void (*shutdown)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct attribute_group **groups; const struct dev_pm_ops *pm; struct driver_private *p; } ; 357 struct class_attribute ; 357 struct class { const char *name; struct module *owner; struct class_attribute *class_attrs; const struct attribute_group **dev_groups; struct kobject *dev_kobj; int (*dev_uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *); void (*class_release)(struct class *); void (*dev_release)(struct device *); int (*suspend)(struct device *, pm_message_t ); int (*resume)(struct device *); const struct kobj_ns_type_operations *ns_type; const void * (*namespace)(struct device *); const struct dev_pm_ops *pm; struct subsys_private *p; } ; 450 struct class_attribute { struct attribute attr; ssize_t (*show)(struct class *, struct class_attribute *, char *); ssize_t (*store)(struct class *, struct class_attribute *, const char *, size_t ); } ; 518 struct device_type { const char *name; const struct attribute_group **groups; int (*uevent)(struct device *, struct kobj_uevent_env *); char * (*devnode)(struct device *, umode_t *, kuid_t *, kgid_t *); void (*release)(struct device *); const struct dev_pm_ops *pm; } ; 546 struct device_attribute { struct attribute attr; ssize_t (*show)(struct device *, struct device_attribute *, char *); ssize_t (*store)(struct device *, struct device_attribute *, const char *, size_t ); } ; 699 struct device_dma_parameters { unsigned int max_segment_size; unsigned long segment_boundary_mask; } ; 708 struct irq_domain ; 708 struct dma_coherent_mem ; 708 struct cma ; 708 struct device { struct device *parent; struct device_private *p; struct kobject kobj; const char *init_name; const struct device_type *type; struct mutex mutex; struct bus_type *bus; struct device_driver *driver; void *platform_data; void *driver_data; struct dev_pm_info power; struct dev_pm_domain *pm_domain; struct irq_domain *msi_domain; struct dev_pin_info *pins; struct list_head msi_list; int numa_node; u64 *dma_mask; u64 coherent_dma_mask; unsigned long dma_pfn_offset; struct device_dma_parameters *dma_parms; struct list_head dma_pools; struct dma_coherent_mem *dma_mem; struct cma *cma_area; struct dev_archdata archdata; struct device_node *of_node; struct fwnode_handle *fwnode; dev_t devt; u32 id; spinlock_t devres_lock; struct list_head devres_head; struct klist_node knode_class; struct class *class; const struct attribute_group **groups; void (*release)(struct device *); struct iommu_group *iommu_group; bool offline_disabled; bool offline; } ; 862 struct wakeup_source { const char *name; struct list_head entry; spinlock_t lock; struct wake_irq *wakeirq; struct timer_list timer; unsigned long timer_expires; ktime_t total_time; ktime_t max_time; ktime_t last_time; ktime_t start_prevent_time; ktime_t prevent_sleep_time; unsigned long event_count; unsigned long active_count; unsigned long relax_count; unsigned long expire_count; unsigned long wakeup_count; bool active; bool autosleep_enabled; } ; 89 enum dma_data_direction { DMA_BIDIRECTIONAL = 0, DMA_TO_DEVICE = 1, DMA_FROM_DEVICE = 2, DMA_NONE = 3 } ; 158 struct dma_map_ops { void * (*alloc)(struct device *, size_t , dma_addr_t *, gfp_t , unsigned long); void (*free)(struct device *, size_t , void *, dma_addr_t , unsigned long); int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t , size_t , unsigned long); int (*get_sgtable)(struct device *, struct sg_table *, void *, dma_addr_t , size_t , unsigned long); dma_addr_t (*map_page)(struct device *, struct page *, unsigned long, size_t , enum dma_data_direction , unsigned long); void (*unmap_page)(struct device *, dma_addr_t , size_t , enum dma_data_direction , unsigned long); int (*map_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , unsigned long); void (*unmap_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction , unsigned long); void (*sync_single_for_cpu)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_single_for_device)(struct device *, dma_addr_t , size_t , enum dma_data_direction ); void (*sync_sg_for_cpu)(struct device *, struct scatterlist *, int, enum dma_data_direction ); void (*sync_sg_for_device)(struct device *, struct scatterlist *, int, enum dma_data_direction ); int (*mapping_error)(struct device *, dma_addr_t ); int (*dma_supported)(struct device *, u64 ); int (*set_dma_mask)(struct device *, u64 ); int is_phys; } ; 716 struct irq_poll ; 5 typedef int irq_poll_fn(struct irq_poll *, int); 6 struct irq_poll { struct list_head list; unsigned long state; int weight; irq_poll_fn *poll; } ; 24 struct ethhdr { unsigned char h_dest[6U]; unsigned char h_source[6U]; __be16 h_proto; } ; 180 struct ipv6_stable_secret { bool initialized; struct in6_addr secret; } ; 64 struct ipv6_devconf { __s32 forwarding; __s32 hop_limit; __s32 mtu6; __s32 accept_ra; __s32 accept_redirects; __s32 autoconf; __s32 dad_transmits; __s32 rtr_solicits; __s32 rtr_solicit_interval; __s32 rtr_solicit_delay; __s32 force_mld_version; __s32 mldv1_unsolicited_report_interval; __s32 mldv2_unsolicited_report_interval; __s32 use_tempaddr; __s32 temp_valid_lft; __s32 temp_prefered_lft; __s32 regen_max_retry; __s32 max_desync_factor; __s32 max_addresses; __s32 accept_ra_defrtr; __s32 accept_ra_min_hop_limit; __s32 accept_ra_pinfo; __s32 ignore_routes_with_linkdown; __s32 accept_ra_rtr_pref; __s32 rtr_probe_interval; __s32 accept_ra_rt_info_max_plen; __s32 proxy_ndp; __s32 accept_source_route; __s32 accept_ra_from_local; __s32 optimistic_dad; __s32 use_optimistic; __s32 mc_forwarding; __s32 disable_ipv6; __s32 drop_unicast_in_l2_multicast; __s32 accept_dad; __s32 force_tllao; __s32 ndisc_notify; __s32 suppress_frag_ndisc; __s32 accept_ra_mtu; __s32 drop_unsolicited_na; struct ipv6_stable_secret stable_secret; __s32 use_oif_addrs_only; __s32 keep_addr_on_down; struct ctl_table_header *sysctl_header; } ; 129 enum ldv_25500 { SS_FREE = 0, SS_UNCONNECTED = 1, SS_CONNECTING = 2, SS_CONNECTED = 3, SS_DISCONNECTING = 4 } ; 53 typedef enum ldv_25500 socket_state; 70 struct socket_wq { wait_queue_head_t wait; struct fasync_struct *fasync_list; unsigned long flags; struct callback_head rcu; } ; 99 struct proto_ops ; 99 struct socket { socket_state state; short type; unsigned long flags; struct socket_wq *wq; struct file *file; struct sock *sk; const struct proto_ops *ops; } ; 125 struct proto_ops { int family; struct module *owner; int (*release)(struct socket *); int (*bind)(struct socket *, struct sockaddr *, int); int (*connect)(struct socket *, struct sockaddr *, int, int); int (*socketpair)(struct socket *, struct socket *); int (*accept)(struct socket *, struct socket *, int); int (*getname)(struct socket *, struct sockaddr *, int *, int); unsigned int (*poll)(struct file *, struct socket *, struct poll_table_struct *); int (*ioctl)(struct socket *, unsigned int, unsigned long); int (*compat_ioctl)(struct socket *, unsigned int, unsigned long); int (*listen)(struct socket *, int); int (*shutdown)(struct socket *, int); int (*setsockopt)(struct socket *, int, int, char *, unsigned int); int (*getsockopt)(struct socket *, int, int, char *, int *); int (*compat_setsockopt)(struct socket *, int, int, char *, unsigned int); int (*compat_getsockopt)(struct socket *, int, int, char *, int *); int (*sendmsg)(struct socket *, struct msghdr *, size_t ); int (*recvmsg)(struct socket *, struct msghdr *, size_t , int); int (*mmap)(struct file *, struct socket *, struct vm_area_struct *); ssize_t (*sendpage)(struct socket *, struct page *, int, size_t , int); ssize_t (*splice_read)(struct socket *, loff_t *, struct pipe_inode_info *, size_t , unsigned int); int (*set_peek_off)(struct sock *, int); int (*peek_len)(struct socket *); } ; 63 struct exception_table_entry { int insn; int fixup; int handler; } ; 145 struct sk_buff ; 15 typedef u64 netdev_features_t; 199 struct pipe_buf_operations ; 199 struct pipe_buffer { struct page *page; unsigned int offset; unsigned int len; const struct pipe_buf_operations *ops; unsigned int flags; unsigned long private; } ; 27 struct pipe_inode_info { struct mutex mutex; wait_queue_head_t wait; unsigned int nrbufs; unsigned int curbuf; unsigned int buffers; unsigned int readers; unsigned int writers; unsigned int files; unsigned int waiting_writers; unsigned int r_counter; unsigned int w_counter; struct page *tmp_page; struct fasync_struct *fasync_readers; struct fasync_struct *fasync_writers; struct pipe_buffer *bufs; struct user_struct *user; } ; 63 struct pipe_buf_operations { int can_merge; int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *); void (*release)(struct pipe_inode_info *, struct pipe_buffer *); int (*steal)(struct pipe_inode_info *, struct pipe_buffer *); void (*get)(struct pipe_inode_info *, struct pipe_buffer *); } ; 295 struct flowi_tunnel { __be64 tun_id; } ; 26 struct flowi_common { int flowic_oif; int flowic_iif; __u32 flowic_mark; __u8 flowic_tos; __u8 flowic_scope; __u8 flowic_proto; __u8 flowic_flags; __u32 flowic_secid; struct flowi_tunnel flowic_tun_key; } ; 42 struct __anonstruct_ports_389 { __be16 dport; __be16 sport; } ; 42 struct __anonstruct_icmpt_390 { __u8 type; __u8 code; } ; 42 struct __anonstruct_dnports_391 { __le16 dport; __le16 sport; } ; 42 struct __anonstruct_mht_392 { __u8 type; } ; 42 union flowi_uli { struct __anonstruct_ports_389 ports; struct __anonstruct_icmpt_390 icmpt; struct __anonstruct_dnports_391 dnports; __be32 spi; __be32 gre_key; struct __anonstruct_mht_392 mht; } ; 66 struct flowi4 { struct flowi_common __fl_common; __be32 saddr; __be32 daddr; union flowi_uli uli; } ; 123 struct flowi6 { struct flowi_common __fl_common; struct in6_addr daddr; struct in6_addr saddr; __be32 flowlabel; union flowi_uli uli; } ; 141 struct flowidn { struct flowi_common __fl_common; __le16 daddr; __le16 saddr; union flowi_uli uli; } ; 161 union __anonunion_u_393 { struct flowi_common __fl_common; struct flowi4 ip4; struct flowi6 ip6; struct flowidn dn; } ; 161 struct flowi { union __anonunion_u_393 u; } ; 265 struct napi_struct ; 266 struct nf_conntrack { atomic_t use; } ; 254 union __anonunion____missing_field_name_394 { __be32 ipv4_daddr; struct in6_addr ipv6_daddr; char neigh_header[8U]; } ; 254 struct nf_bridge_info { atomic_t use; unsigned char orig_proto; unsigned char pkt_otherhost; unsigned char in_prerouting; unsigned char bridged_dnat; __u16 frag_max_size; struct net_device *physindev; struct net_device *physoutdev; union __anonunion____missing_field_name_394 __annonCompField82; } ; 278 struct sk_buff_head { struct sk_buff *next; struct sk_buff *prev; __u32 qlen; spinlock_t lock; } ; 500 typedef unsigned int sk_buff_data_t; 501 struct __anonstruct____missing_field_name_397 { u32 stamp_us; u32 stamp_jiffies; } ; 501 union __anonunion____missing_field_name_396 { u64 v64; struct __anonstruct____missing_field_name_397 __annonCompField83; } ; 501 struct skb_mstamp { union __anonunion____missing_field_name_396 __annonCompField84; } ; 564 union __anonunion____missing_field_name_400 { ktime_t tstamp; struct skb_mstamp skb_mstamp; } ; 564 struct __anonstruct____missing_field_name_399 { struct sk_buff *next; struct sk_buff *prev; union __anonunion____missing_field_name_400 __annonCompField85; } ; 564 union __anonunion____missing_field_name_398 { struct __anonstruct____missing_field_name_399 __annonCompField86; struct rb_node rbnode; } ; 564 struct sec_path ; 564 struct __anonstruct____missing_field_name_402 { __u16 csum_start; __u16 csum_offset; } ; 564 union __anonunion____missing_field_name_401 { __wsum csum; struct __anonstruct____missing_field_name_402 __annonCompField88; } ; 564 union __anonunion____missing_field_name_403 { unsigned int napi_id; unsigned int sender_cpu; } ; 564 union __anonunion____missing_field_name_404 { __u32 secmark; __u32 offload_fwd_mark; } ; 564 union __anonunion____missing_field_name_405 { __u32 mark; __u32 reserved_tailroom; } ; 564 union __anonunion____missing_field_name_406 { __be16 inner_protocol; __u8 inner_ipproto; } ; 564 struct sk_buff { union __anonunion____missing_field_name_398 __annonCompField87; struct sock *sk; struct net_device *dev; char cb[48U]; unsigned long _skb_refdst; void (*destructor)(struct sk_buff *); struct sec_path *sp; struct nf_conntrack *nfct; struct nf_bridge_info *nf_bridge; unsigned int len; unsigned int data_len; __u16 mac_len; __u16 hdr_len; __u16 queue_mapping; unsigned char cloned; unsigned char nohdr; unsigned char fclone; unsigned char peeked; unsigned char head_frag; unsigned char xmit_more; __u32 headers_start[0U]; __u8 __pkt_type_offset[0U]; unsigned char pkt_type; unsigned char pfmemalloc; unsigned char ignore_df; unsigned char nfctinfo; unsigned char nf_trace; unsigned char ip_summed; unsigned char ooo_okay; unsigned char l4_hash; unsigned char sw_hash; unsigned char wifi_acked_valid; unsigned char wifi_acked; unsigned char no_fcs; unsigned char encapsulation; unsigned char encap_hdr_csum; unsigned char csum_valid; unsigned char csum_complete_sw; unsigned char csum_level; unsigned char csum_bad; unsigned char ndisc_nodetype; unsigned char ipvs_property; unsigned char inner_protocol_type; unsigned char remcsum_offload; __u16 tc_index; __u16 tc_verd; union __anonunion____missing_field_name_401 __annonCompField89; __u32 priority; int skb_iif; __u32 hash; __be16 vlan_proto; __u16 vlan_tci; union __anonunion____missing_field_name_403 __annonCompField90; union __anonunion____missing_field_name_404 __annonCompField91; union __anonunion____missing_field_name_405 __annonCompField92; union __anonunion____missing_field_name_406 __annonCompField93; __u16 inner_transport_header; __u16 inner_network_header; __u16 inner_mac_header; __be16 protocol; __u16 transport_header; __u16 network_header; __u16 mac_header; __u32 headers_end[0U]; sk_buff_data_t tail; sk_buff_data_t end; unsigned char *head; unsigned char *data; unsigned int truesize; atomic_t users; } ; 831 struct dst_entry ; 880 struct rtable ; 1402 struct dql { unsigned int num_queued; unsigned int adj_limit; unsigned int last_obj_cnt; unsigned int limit; unsigned int num_completed; unsigned int prev_ovlimit; unsigned int prev_num_queued; unsigned int prev_last_obj_cnt; unsigned int lowest_slack; unsigned long slack_start_time; unsigned int max_limit; unsigned int min_limit; unsigned int slack_hold_time; } ; 43 struct __anonstruct_sync_serial_settings_410 { unsigned int clock_rate; unsigned int clock_type; unsigned short loopback; } ; 43 typedef struct __anonstruct_sync_serial_settings_410 sync_serial_settings; 50 struct __anonstruct_te1_settings_411 { unsigned int clock_rate; unsigned int clock_type; unsigned short loopback; unsigned int slot_map; } ; 50 typedef struct __anonstruct_te1_settings_411 te1_settings; 55 struct __anonstruct_raw_hdlc_proto_412 { unsigned short encoding; unsigned short parity; } ; 55 typedef struct __anonstruct_raw_hdlc_proto_412 raw_hdlc_proto; 65 struct __anonstruct_fr_proto_413 { unsigned int t391; unsigned int t392; unsigned int n391; unsigned int n392; unsigned int n393; unsigned short lmi; unsigned short dce; } ; 65 typedef struct __anonstruct_fr_proto_413 fr_proto; 69 struct __anonstruct_fr_proto_pvc_414 { unsigned int dlci; } ; 69 typedef struct __anonstruct_fr_proto_pvc_414 fr_proto_pvc; 74 struct __anonstruct_fr_proto_pvc_info_415 { unsigned int dlci; char master[16U]; } ; 74 typedef struct __anonstruct_fr_proto_pvc_info_415 fr_proto_pvc_info; 79 struct __anonstruct_cisco_proto_416 { unsigned int interval; unsigned int timeout; } ; 79 typedef struct __anonstruct_cisco_proto_416 cisco_proto; 117 struct ifmap { unsigned long mem_start; unsigned long mem_end; unsigned short base_addr; unsigned char irq; unsigned char dma; unsigned char port; } ; 197 union __anonunion_ifs_ifsu_417 { raw_hdlc_proto *raw_hdlc; cisco_proto *cisco; fr_proto *fr; fr_proto_pvc *fr_pvc; fr_proto_pvc_info *fr_pvc_info; sync_serial_settings *sync; te1_settings *te1; } ; 197 struct if_settings { unsigned int type; unsigned int size; union __anonunion_ifs_ifsu_417 ifs_ifsu; } ; 216 union __anonunion_ifr_ifrn_418 { char ifrn_name[16U]; } ; 216 union __anonunion_ifr_ifru_419 { struct sockaddr ifru_addr; struct sockaddr ifru_dstaddr; struct sockaddr ifru_broadaddr; struct sockaddr ifru_netmask; struct sockaddr ifru_hwaddr; short ifru_flags; int ifru_ivalue; int ifru_mtu; struct ifmap ifru_map; char ifru_slave[16U]; char ifru_newname[16U]; void *ifru_data; struct if_settings ifru_settings; } ; 216 struct ifreq { union __anonunion_ifr_ifrn_418 ifr_ifrn; union __anonunion_ifr_ifru_419 ifr_ifru; } ; 18 typedef s32 compat_time_t; 39 typedef s32 compat_long_t; 45 typedef u32 compat_uptr_t; 46 struct compat_timespec { compat_time_t tv_sec; s32 tv_nsec; } ; 278 struct compat_robust_list { compat_uptr_t next; } ; 282 struct compat_robust_list_head { struct compat_robust_list list; compat_long_t futex_offset; compat_uptr_t list_op_pending; } ; 39 struct ethtool_cmd { __u32 cmd; __u32 supported; __u32 advertising; __u16 speed; __u8 duplex; __u8 port; __u8 phy_address; __u8 transceiver; __u8 autoneg; __u8 mdio_support; __u32 maxtxpkt; __u32 maxrxpkt; __u16 speed_hi; __u8 eth_tp_mdix; __u8 eth_tp_mdix_ctrl; __u32 lp_advertising; __u32 reserved[2U]; } ; 131 struct ethtool_drvinfo { __u32 cmd; char driver[32U]; char version[32U]; char fw_version[32U]; char bus_info[32U]; char erom_version[32U]; char reserved2[12U]; __u32 n_priv_flags; __u32 n_stats; __u32 testinfo_len; __u32 eedump_len; __u32 regdump_len; } ; 195 struct ethtool_wolinfo { __u32 cmd; __u32 supported; __u32 wolopts; __u8 sopass[6U]; } ; 239 struct ethtool_tunable { __u32 cmd; __u32 id; __u32 type_id; __u32 len; void *data[0U]; } ; 251 struct ethtool_regs { __u32 cmd; __u32 version; __u32 len; __u8 data[0U]; } ; 273 struct ethtool_eeprom { __u32 cmd; __u32 magic; __u32 offset; __u32 len; __u8 data[0U]; } ; 299 struct ethtool_eee { __u32 cmd; __u32 supported; __u32 advertised; __u32 lp_advertised; __u32 eee_active; __u32 eee_enabled; __u32 tx_lpi_enabled; __u32 tx_lpi_timer; __u32 reserved[2U]; } ; 328 struct ethtool_modinfo { __u32 cmd; __u32 type; __u32 eeprom_len; __u32 reserved[8U]; } ; 345 struct ethtool_coalesce { __u32 cmd; __u32 rx_coalesce_usecs; __u32 rx_max_coalesced_frames; __u32 rx_coalesce_usecs_irq; __u32 rx_max_coalesced_frames_irq; __u32 tx_coalesce_usecs; __u32 tx_max_coalesced_frames; __u32 tx_coalesce_usecs_irq; __u32 tx_max_coalesced_frames_irq; __u32 stats_block_coalesce_usecs; __u32 use_adaptive_rx_coalesce; __u32 use_adaptive_tx_coalesce; __u32 pkt_rate_low; __u32 rx_coalesce_usecs_low; __u32 rx_max_coalesced_frames_low; __u32 tx_coalesce_usecs_low; __u32 tx_max_coalesced_frames_low; __u32 pkt_rate_high; __u32 rx_coalesce_usecs_high; __u32 rx_max_coalesced_frames_high; __u32 tx_coalesce_usecs_high; __u32 tx_max_coalesced_frames_high; __u32 rate_sample_interval; } ; 444 struct ethtool_ringparam { __u32 cmd; __u32 rx_max_pending; __u32 rx_mini_max_pending; __u32 rx_jumbo_max_pending; __u32 tx_max_pending; __u32 rx_pending; __u32 rx_mini_pending; __u32 rx_jumbo_pending; __u32 tx_pending; } ; 481 struct ethtool_channels { __u32 cmd; __u32 max_rx; __u32 max_tx; __u32 max_other; __u32 max_combined; __u32 rx_count; __u32 tx_count; __u32 other_count; __u32 combined_count; } ; 509 struct ethtool_pauseparam { __u32 cmd; __u32 autoneg; __u32 rx_pause; __u32 tx_pause; } ; 613 struct ethtool_test { __u32 cmd; __u32 flags; __u32 reserved; __u32 len; __u64 data[0U]; } ; 645 struct ethtool_stats { __u32 cmd; __u32 n_stats; __u64 data[0U]; } ; 687 struct ethtool_tcpip4_spec { __be32 ip4src; __be32 ip4dst; __be16 psrc; __be16 pdst; __u8 tos; } ; 720 struct ethtool_ah_espip4_spec { __be32 ip4src; __be32 ip4dst; __be32 spi; __u8 tos; } ; 736 struct ethtool_usrip4_spec { __be32 ip4src; __be32 ip4dst; __be32 l4_4_bytes; __u8 tos; __u8 ip_ver; __u8 proto; } ; 756 struct ethtool_tcpip6_spec { __be32 ip6src[4U]; __be32 ip6dst[4U]; __be16 psrc; __be16 pdst; __u8 tclass; } ; 774 struct ethtool_ah_espip6_spec { __be32 ip6src[4U]; __be32 ip6dst[4U]; __be32 spi; __u8 tclass; } ; 790 struct ethtool_usrip6_spec { __be32 ip6src[4U]; __be32 ip6dst[4U]; __be32 l4_4_bytes; __u8 tclass; __u8 l4_proto; } ; 806 union ethtool_flow_union { struct ethtool_tcpip4_spec tcp_ip4_spec; struct ethtool_tcpip4_spec udp_ip4_spec; struct ethtool_tcpip4_spec sctp_ip4_spec; struct ethtool_ah_espip4_spec ah_ip4_spec; struct ethtool_ah_espip4_spec esp_ip4_spec; struct ethtool_usrip4_spec usr_ip4_spec; struct ethtool_tcpip6_spec tcp_ip6_spec; struct ethtool_tcpip6_spec udp_ip6_spec; struct ethtool_tcpip6_spec sctp_ip6_spec; struct ethtool_ah_espip6_spec ah_ip6_spec; struct ethtool_ah_espip6_spec esp_ip6_spec; struct ethtool_usrip6_spec usr_ip6_spec; struct ethhdr ether_spec; __u8 hdata[52U]; } ; 823 struct ethtool_flow_ext { __u8 padding[2U]; unsigned char h_dest[6U]; __be16 vlan_etype; __be16 vlan_tci; __be32 data[2U]; } ; 842 struct ethtool_rx_flow_spec { __u32 flow_type; union ethtool_flow_union h_u; struct ethtool_flow_ext h_ext; union ethtool_flow_union m_u; struct ethtool_flow_ext m_ext; __u64 ring_cookie; __u32 location; } ; 892 struct ethtool_rxnfc { __u32 cmd; __u32 flow_type; __u64 data; struct ethtool_rx_flow_spec fs; __u32 rule_cnt; __u32 rule_locs[0U]; } ; 1063 struct ethtool_flash { __u32 cmd; __u32 region; char data[128U]; } ; 1071 struct ethtool_dump { __u32 cmd; __u32 version; __u32 flag; __u32 len; __u8 data[0U]; } ; 1147 struct ethtool_ts_info { __u32 cmd; __u32 so_timestamping; __s32 phc_index; __u32 tx_types; __u32 tx_reserved[3U]; __u32 rx_filters; __u32 rx_reserved[3U]; } ; 1515 struct ethtool_link_settings { __u32 cmd; __u32 speed; __u8 duplex; __u8 port; __u8 phy_address; __u8 autoneg; __u8 mdio_support; __u8 eth_tp_mdix; __u8 eth_tp_mdix_ctrl; __s8 link_mode_masks_nwords; __u32 reserved[8U]; __u32 link_mode_masks[0U]; } ; 39 enum ethtool_phys_id_state { ETHTOOL_ID_INACTIVE = 0, ETHTOOL_ID_ACTIVE = 1, ETHTOOL_ID_ON = 2, ETHTOOL_ID_OFF = 3 } ; 97 struct __anonstruct_link_modes_439 { unsigned long supported[1U]; unsigned long advertising[1U]; unsigned long lp_advertising[1U]; } ; 97 struct ethtool_link_ksettings { struct ethtool_link_settings base; struct __anonstruct_link_modes_439 link_modes; } ; 158 struct ethtool_ops { int (*get_settings)(struct net_device *, struct ethtool_cmd *); int (*set_settings)(struct net_device *, struct ethtool_cmd *); void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); int (*get_regs_len)(struct net_device *); void (*get_regs)(struct net_device *, struct ethtool_regs *, void *); void (*get_wol)(struct net_device *, struct ethtool_wolinfo *); int (*set_wol)(struct net_device *, struct ethtool_wolinfo *); u32 (*get_msglevel)(struct net_device *); void (*set_msglevel)(struct net_device *, u32 ); int (*nway_reset)(struct net_device *); u32 (*get_link)(struct net_device *); int (*get_eeprom_len)(struct net_device *); int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *); int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *); void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *); int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *); void (*get_pauseparam)(struct net_device *, struct ethtool_pauseparam *); int (*set_pauseparam)(struct net_device *, struct ethtool_pauseparam *); void (*self_test)(struct net_device *, struct ethtool_test *, u64 *); void (*get_strings)(struct net_device *, u32 , u8 *); int (*set_phys_id)(struct net_device *, enum ethtool_phys_id_state ); void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, u64 *); int (*begin)(struct net_device *); void (*complete)(struct net_device *); u32 (*get_priv_flags)(struct net_device *); int (*set_priv_flags)(struct net_device *, u32 ); int (*get_sset_count)(struct net_device *, int); int (*get_rxnfc)(struct net_device *, struct ethtool_rxnfc *, u32 *); int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *); int (*flash_device)(struct net_device *, struct ethtool_flash *); int (*reset)(struct net_device *, u32 *); u32 (*get_rxfh_key_size)(struct net_device *); u32 (*get_rxfh_indir_size)(struct net_device *); int (*get_rxfh)(struct net_device *, u32 *, u8 *, u8 *); int (*set_rxfh)(struct net_device *, const u32 *, const u8 *, const u8 ); void (*get_channels)(struct net_device *, struct ethtool_channels *); int (*set_channels)(struct net_device *, struct ethtool_channels *); int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); int (*get_dump_data)(struct net_device *, struct ethtool_dump *, void *); int (*set_dump)(struct net_device *, struct ethtool_dump *); int (*get_ts_info)(struct net_device *, struct ethtool_ts_info *); int (*get_module_info)(struct net_device *, struct ethtool_modinfo *); int (*get_module_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*get_eee)(struct net_device *, struct ethtool_eee *); int (*set_eee)(struct net_device *, struct ethtool_eee *); int (*get_tunable)(struct net_device *, const struct ethtool_tunable *, void *); int (*set_tunable)(struct net_device *, const struct ethtool_tunable *, const void *); int (*get_per_queue_coalesce)(struct net_device *, u32 , struct ethtool_coalesce *); int (*set_per_queue_coalesce)(struct net_device *, u32 , struct ethtool_coalesce *); int (*get_link_ksettings)(struct net_device *, struct ethtool_link_ksettings *); int (*set_link_ksettings)(struct net_device *, const struct ethtool_link_ksettings *); } ; 375 struct prot_inuse ; 376 struct netns_core { struct ctl_table_header *sysctl_hdr; int sysctl_somaxconn; struct prot_inuse *inuse; } ; 38 struct u64_stats_sync { } ; 160 struct ipstats_mib { u64 mibs[36U]; struct u64_stats_sync syncp; } ; 61 struct icmp_mib { unsigned long mibs[28U]; } ; 67 struct icmpmsg_mib { atomic_long_t mibs[512U]; } ; 72 struct icmpv6_mib { unsigned long mibs[6U]; } ; 79 struct icmpv6_mib_device { atomic_long_t mibs[6U]; } ; 83 struct icmpv6msg_mib { atomic_long_t mibs[512U]; } ; 89 struct icmpv6msg_mib_device { atomic_long_t mibs[512U]; } ; 93 struct tcp_mib { unsigned long mibs[16U]; } ; 100 struct udp_mib { unsigned long mibs[9U]; } ; 106 struct linux_mib { unsigned long mibs[117U]; } ; 112 struct linux_xfrm_mib { unsigned long mibs[29U]; } ; 118 struct proc_dir_entry ; 118 struct netns_mib { struct tcp_mib *tcp_statistics; struct ipstats_mib *ip_statistics; struct linux_mib *net_statistics; struct udp_mib *udp_statistics; struct udp_mib *udplite_statistics; struct icmp_mib *icmp_statistics; struct icmpmsg_mib *icmpmsg_statistics; struct proc_dir_entry *proc_net_devsnmp6; struct udp_mib *udp_stats_in6; struct udp_mib *udplite_stats_in6; struct ipstats_mib *ipv6_statistics; struct icmpv6_mib *icmpv6_statistics; struct icmpv6msg_mib *icmpv6msg_statistics; struct linux_xfrm_mib *xfrm_statistics; } ; 26 struct netns_unix { int sysctl_max_dgram_qlen; struct ctl_table_header *ctl; } ; 12 struct netns_packet { struct mutex sklist_lock; struct hlist_head sklist; } ; 14 struct netns_frags { struct percpu_counter mem; int timeout; int high_thresh; int low_thresh; int max_dist; } ; 187 struct ipv4_devconf ; 188 struct fib_rules_ops ; 189 struct fib_table ; 190 struct local_ports { seqlock_t lock; int range[2U]; bool warned; } ; 24 struct ping_group_range { seqlock_t lock; kgid_t range[2U]; } ; 29 struct inet_peer_base ; 29 struct xt_table ; 29 struct netns_ipv4 { struct ctl_table_header *forw_hdr; struct ctl_table_header *frags_hdr; struct ctl_table_header *ipv4_hdr; struct ctl_table_header *route_hdr; struct ctl_table_header *xfrm4_hdr; struct ipv4_devconf *devconf_all; struct ipv4_devconf *devconf_dflt; struct fib_rules_ops *rules_ops; bool fib_has_custom_rules; struct fib_table *fib_local; struct fib_table *fib_main; struct fib_table *fib_default; int fib_num_tclassid_users; struct hlist_head *fib_table_hash; bool fib_offload_disabled; struct sock *fibnl; struct sock **icmp_sk; struct sock *mc_autojoin_sk; struct inet_peer_base *peers; struct sock **tcp_sk; struct netns_frags frags; struct xt_table *iptable_filter; struct xt_table *iptable_mangle; struct xt_table *iptable_raw; struct xt_table *arptable_filter; struct xt_table *iptable_security; struct xt_table *nat_table; int sysctl_icmp_echo_ignore_all; int sysctl_icmp_echo_ignore_broadcasts; int sysctl_icmp_ignore_bogus_error_responses; int sysctl_icmp_ratelimit; int sysctl_icmp_ratemask; int sysctl_icmp_errors_use_inbound_ifaddr; struct local_ports ip_local_ports; int sysctl_tcp_ecn; int sysctl_tcp_ecn_fallback; int sysctl_ip_default_ttl; int sysctl_ip_no_pmtu_disc; int sysctl_ip_fwd_use_pmtu; int sysctl_ip_nonlocal_bind; int sysctl_ip_dynaddr; int sysctl_ip_early_demux; int sysctl_fwmark_reflect; int sysctl_tcp_fwmark_accept; int sysctl_tcp_l3mdev_accept; int sysctl_tcp_mtu_probing; int sysctl_tcp_base_mss; int sysctl_tcp_probe_threshold; u32 sysctl_tcp_probe_interval; int sysctl_tcp_keepalive_time; int sysctl_tcp_keepalive_probes; int sysctl_tcp_keepalive_intvl; int sysctl_tcp_syn_retries; int sysctl_tcp_synack_retries; int sysctl_tcp_syncookies; int sysctl_tcp_reordering; int sysctl_tcp_retries1; int sysctl_tcp_retries2; int sysctl_tcp_orphan_retries; int sysctl_tcp_fin_timeout; unsigned int sysctl_tcp_notsent_lowat; int sysctl_igmp_max_memberships; int sysctl_igmp_max_msf; int sysctl_igmp_llm_reports; int sysctl_igmp_qrv; struct ping_group_range ping_group_range; atomic_t dev_addr_genid; unsigned long *sysctl_local_reserved_ports; struct list_head mr_tables; struct fib_rules_ops *mr_rules_ops; int sysctl_fib_multipath_use_neigh; atomic_t rt_genid; } ; 142 struct neighbour ; 142 struct dst_ops { unsigned short family; unsigned int gc_thresh; int (*gc)(struct dst_ops *); struct dst_entry * (*check)(struct dst_entry *, __u32 ); unsigned int (*default_advmss)(const struct dst_entry *); unsigned int (*mtu)(const struct dst_entry *); u32 * (*cow_metrics)(struct dst_entry *, unsigned long); void (*destroy)(struct dst_entry *); void (*ifdown)(struct dst_entry *, struct net_device *, int); struct dst_entry * (*negative_advice)(struct dst_entry *); void (*link_failure)(struct sk_buff *); void (*update_pmtu)(struct dst_entry *, struct sock *, struct sk_buff *, u32 ); void (*redirect)(struct dst_entry *, struct sock *, struct sk_buff *); int (*local_out)(struct net *, struct sock *, struct sk_buff *); struct neighbour * (*neigh_lookup)(const struct dst_entry *, struct sk_buff *, const void *); struct kmem_cache *kmem_cachep; struct percpu_counter pcpuc_entries; } ; 73 struct netns_sysctl_ipv6 { struct ctl_table_header *hdr; struct ctl_table_header *route_hdr; struct ctl_table_header *icmp_hdr; struct ctl_table_header *frags_hdr; struct ctl_table_header *xfrm6_hdr; int bindv6only; int flush_delay; int ip6_rt_max_size; int ip6_rt_gc_min_interval; int ip6_rt_gc_timeout; int ip6_rt_gc_interval; int ip6_rt_gc_elasticity; int ip6_rt_mtu_expires; int ip6_rt_min_advmss; int flowlabel_consistency; int auto_flowlabels; int icmpv6_time; int anycast_src_echo_reply; int ip_nonlocal_bind; int fwmark_reflect; int idgen_retries; int idgen_delay; int flowlabel_state_ranges; } ; 40 struct rt6_info ; 40 struct rt6_statistics ; 40 struct fib6_table ; 40 struct netns_ipv6 { struct netns_sysctl_ipv6 sysctl; struct ipv6_devconf *devconf_all; struct ipv6_devconf *devconf_dflt; struct inet_peer_base *peers; struct netns_frags frags; struct xt_table *ip6table_filter; struct xt_table *ip6table_mangle; struct xt_table *ip6table_raw; struct xt_table *ip6table_security; struct xt_table *ip6table_nat; struct rt6_info *ip6_null_entry; struct rt6_statistics *rt6_stats; struct timer_list ip6_fib_timer; struct hlist_head *fib_table_hash; struct fib6_table *fib6_main_tbl; struct list_head fib6_walkers; struct dst_ops ip6_dst_ops; rwlock_t fib6_walker_lock; spinlock_t fib6_gc_lock; unsigned int ip6_rt_gc_expire; unsigned long ip6_rt_last_gc; struct rt6_info *ip6_prohibit_entry; struct rt6_info *ip6_blk_hole_entry; struct fib6_table *fib6_local_tbl; struct fib_rules_ops *fib6_rules_ops; struct sock **icmp_sk; struct sock *ndisc_sk; struct sock *tcp_sk; struct sock *igmp_sk; struct sock *mc_autojoin_sk; struct list_head mr6_tables; struct fib_rules_ops *mr6_rules_ops; atomic_t dev_addr_genid; atomic_t fib6_sernum; } ; 89 struct netns_nf_frag { struct netns_sysctl_ipv6 sysctl; struct netns_frags frags; } ; 95 struct netns_sysctl_lowpan { struct ctl_table_header *frags_hdr; } ; 14 struct netns_ieee802154_lowpan { struct netns_sysctl_lowpan sysctl; struct netns_frags frags; } ; 20 struct sctp_mib ; 21 struct netns_sctp { struct sctp_mib *sctp_statistics; struct proc_dir_entry *proc_net_sctp; struct ctl_table_header *sysctl_header; struct sock *ctl_sock; struct list_head local_addr_list; struct list_head addr_waitq; struct timer_list addr_wq_timer; struct list_head auto_asconf_splist; spinlock_t addr_wq_lock; spinlock_t local_addr_lock; unsigned int rto_initial; unsigned int rto_min; unsigned int rto_max; int rto_alpha; int rto_beta; int max_burst; int cookie_preserve_enable; char *sctp_hmac_alg; unsigned int valid_cookie_life; unsigned int sack_timeout; unsigned int hb_interval; int max_retrans_association; int max_retrans_path; int max_retrans_init; int pf_retrans; int pf_enable; int sndbuf_policy; int rcvbuf_policy; int default_auto_asconf; int addip_enable; int addip_noauth; int prsctp_enable; int auth_enable; int scope_policy; int rwnd_upd_shift; unsigned long max_autoclose; } ; 141 struct netns_dccp { struct sock *v4_ctl_sk; struct sock *v6_ctl_sk; } ; 79 struct nf_logger ; 80 struct nf_queue_handler ; 81 struct netns_nf { struct proc_dir_entry *proc_netfilter; const struct nf_queue_handler *queue_handler; const struct nf_logger *nf_loggers[13U]; struct ctl_table_header *nf_log_dir_header; struct list_head hooks[13U][8U]; } ; 21 struct ebt_table ; 22 struct netns_xt { struct list_head tables[13U]; bool notrack_deprecated_warning; bool clusterip_deprecated_warning; struct ebt_table *broute_table; struct ebt_table *frame_filter; struct ebt_table *frame_nat; } ; 19 struct hlist_nulls_node ; 19 struct hlist_nulls_head { struct hlist_nulls_node *first; } ; 23 struct hlist_nulls_node { struct hlist_nulls_node *next; struct hlist_nulls_node **pprev; } ; 32 struct nf_proto_net { struct ctl_table_header *ctl_table_header; struct ctl_table *ctl_table; struct ctl_table_header *ctl_compat_header; struct ctl_table *ctl_compat_table; unsigned int users; } ; 25 struct nf_generic_net { struct nf_proto_net pn; unsigned int timeout; } ; 30 struct nf_tcp_net { struct nf_proto_net pn; unsigned int timeouts[14U]; unsigned int tcp_loose; unsigned int tcp_be_liberal; unsigned int tcp_max_retrans; } ; 44 struct nf_udp_net { struct nf_proto_net pn; unsigned int timeouts[2U]; } ; 49 struct nf_icmp_net { struct nf_proto_net pn; unsigned int timeout; } ; 54 struct nf_ip_net { struct nf_generic_net generic; struct nf_tcp_net tcp; struct nf_udp_net udp; struct nf_icmp_net icmp; struct nf_icmp_net icmpv6; struct ctl_table_header *ctl_table_header; struct ctl_table *ctl_table; } ; 65 struct ct_pcpu { spinlock_t lock; struct hlist_nulls_head unconfirmed; struct hlist_nulls_head dying; } ; 72 struct ip_conntrack_stat ; 72 struct nf_ct_event_notifier ; 72 struct nf_exp_event_notifier ; 72 struct netns_ct { atomic_t count; unsigned int expect_count; struct delayed_work ecache_dwork; bool ecache_dwork_pending; struct ctl_table_header *sysctl_header; struct ctl_table_header *acct_sysctl_header; struct ctl_table_header *tstamp_sysctl_header; struct ctl_table_header *event_sysctl_header; struct ctl_table_header *helper_sysctl_header; unsigned int sysctl_log_invalid; int sysctl_events; int sysctl_acct; int sysctl_auto_assign_helper; bool auto_assign_helper_warned; int sysctl_tstamp; int sysctl_checksum; struct ct_pcpu *pcpu_lists; struct ip_conntrack_stat *stat; struct nf_ct_event_notifier *nf_conntrack_event_cb; struct nf_exp_event_notifier *nf_expect_event_cb; struct nf_ip_net nf_ct_proto; unsigned int labels_used; u8 label_words; } ; 104 struct nft_af_info ; 105 struct netns_nftables { struct list_head af_info; struct list_head commit_list; struct nft_af_info *ipv4; struct nft_af_info *ipv6; struct nft_af_info *inet; struct nft_af_info *arp; struct nft_af_info *bridge; struct nft_af_info *netdev; unsigned int base_seq; u8 gencursor; } ; 486 struct tasklet_struct { struct tasklet_struct *next; unsigned long state; atomic_t count; void (*func)(unsigned long); unsigned long data; } ; 708 struct flow_cache_percpu { struct hlist_head *hash_table; int hash_count; u32 hash_rnd; int hash_rnd_recalc; struct tasklet_struct flush_tasklet; } ; 16 struct flow_cache { u32 hash_shift; struct flow_cache_percpu *percpu; struct notifier_block hotcpu_notifier; int low_watermark; int high_watermark; struct timer_list rnd_timer; } ; 25 struct xfrm_policy_hash { struct hlist_head *table; unsigned int hmask; u8 dbits4; u8 sbits4; u8 dbits6; u8 sbits6; } ; 21 struct xfrm_policy_hthresh { struct work_struct work; seqlock_t lock; u8 lbits4; u8 rbits4; u8 lbits6; u8 rbits6; } ; 30 struct netns_xfrm { struct list_head state_all; struct hlist_head *state_bydst; struct hlist_head *state_bysrc; struct hlist_head *state_byspi; unsigned int state_hmask; unsigned int state_num; struct work_struct state_hash_work; struct hlist_head state_gc_list; struct work_struct state_gc_work; struct list_head policy_all; struct hlist_head *policy_byidx; unsigned int policy_idx_hmask; struct hlist_head policy_inexact[3U]; struct xfrm_policy_hash policy_bydst[3U]; unsigned int policy_count[6U]; struct work_struct policy_hash_work; struct xfrm_policy_hthresh policy_hthresh; struct sock *nlsk; struct sock *nlsk_stash; u32 sysctl_aevent_etime; u32 sysctl_aevent_rseqth; int sysctl_larval_drop; u32 sysctl_acq_expires; struct ctl_table_header *sysctl_hdr; struct dst_ops xfrm4_dst_ops; struct dst_ops xfrm6_dst_ops; spinlock_t xfrm_state_lock; rwlock_t xfrm_policy_lock; struct mutex xfrm_cfg_mutex; struct flow_cache flow_cache_global; atomic_t flow_cache_genid; struct list_head flow_cache_gc_list; atomic_t flow_cache_gc_count; spinlock_t flow_cache_gc_lock; struct work_struct flow_cache_gc_work; struct work_struct flow_cache_flush_work; struct mutex flow_flush_sem; } ; 89 struct mpls_route ; 90 struct netns_mpls { size_t platform_labels; struct mpls_route **platform_label; struct ctl_table_header *ctl; } ; 16 struct proc_ns_operations ; 17 struct ns_common { atomic_long_t stashed; const struct proc_ns_operations *ops; unsigned int inum; } ; 11 struct net_generic ; 12 struct netns_ipvs ; 13 struct net { atomic_t passive; atomic_t count; spinlock_t rules_mod_lock; atomic64_t cookie_gen; struct list_head list; struct list_head cleanup_list; struct list_head exit_list; struct user_namespace *user_ns; spinlock_t nsid_lock; struct idr netns_ids; struct ns_common ns; struct proc_dir_entry *proc_net; struct proc_dir_entry *proc_net_stat; struct ctl_table_set sysctls; struct sock *rtnl; struct sock *genl_sock; struct list_head dev_base_head; struct hlist_head *dev_name_head; struct hlist_head *dev_index_head; unsigned int dev_base_seq; int ifindex; unsigned int dev_unreg_count; struct list_head rules_ops; struct net_device *loopback_dev; struct netns_core core; struct netns_mib mib; struct netns_packet packet; struct netns_unix unx; struct netns_ipv4 ipv4; struct netns_ipv6 ipv6; struct netns_ieee802154_lowpan ieee802154_lowpan; struct netns_sctp sctp; struct netns_dccp dccp; struct netns_nf nf; struct netns_xt xt; struct netns_ct ct; struct netns_nftables nft; struct netns_nf_frag nf_frag; struct sock *nfnl; struct sock *nfnl_stash; struct list_head nfnl_acct_list; struct list_head nfct_timeout_list; struct sk_buff_head wext_nlevents; struct net_generic *gen; struct netns_xfrm xfrm; struct netns_ipvs *ipvs; struct netns_mpls mpls; struct sock *diag_nlsk; atomic_t fnhe_genid; } ; 247 struct __anonstruct_possible_net_t_454 { struct net *net; } ; 247 typedef struct __anonstruct_possible_net_t_454 possible_net_t; 13 typedef unsigned long kernel_ulong_t; 186 struct acpi_device_id { __u8 id[9U]; kernel_ulong_t driver_data; __u32 cls; __u32 cls_msk; } ; 229 struct of_device_id { char name[32U]; char type[32U]; char compatible[128U]; const void *data; } ; 674 enum fwnode_type { FWNODE_INVALID = 0, FWNODE_OF = 1, FWNODE_ACPI = 2, FWNODE_ACPI_DATA = 3, FWNODE_PDATA = 4, FWNODE_IRQCHIP = 5 } ; 683 struct fwnode_handle { enum fwnode_type type; struct fwnode_handle *secondary; } ; 32 typedef u32 phandle; 34 struct property { char *name; int length; void *value; struct property *next; unsigned long _flags; unsigned int unique_id; struct bin_attribute attr; } ; 44 struct device_node { const char *name; const char *type; phandle phandle; const char *full_name; struct fwnode_handle fwnode; struct property *properties; struct property *deadprops; struct device_node *parent; struct device_node *child; struct device_node *sibling; struct kobject kobj; unsigned long _flags; void *data; } ; 296 struct mii_bus ; 303 struct mdio_device { struct device dev; const struct dev_pm_ops *pm_ops; struct mii_bus *bus; int (*bus_match)(struct device *, struct device_driver *); void (*device_free)(struct mdio_device *); void (*device_remove)(struct mdio_device *); int addr; int flags; } ; 41 struct mdio_driver_common { struct device_driver driver; int flags; } ; 244 struct phy_device ; 245 enum ldv_30622 { PHY_INTERFACE_MODE_NA = 0, PHY_INTERFACE_MODE_MII = 1, PHY_INTERFACE_MODE_GMII = 2, PHY_INTERFACE_MODE_SGMII = 3, PHY_INTERFACE_MODE_TBI = 4, PHY_INTERFACE_MODE_REVMII = 5, PHY_INTERFACE_MODE_RMII = 6, PHY_INTERFACE_MODE_RGMII = 7, PHY_INTERFACE_MODE_RGMII_ID = 8, PHY_INTERFACE_MODE_RGMII_RXID = 9, PHY_INTERFACE_MODE_RGMII_TXID = 10, PHY_INTERFACE_MODE_RTBI = 11, PHY_INTERFACE_MODE_SMII = 12, PHY_INTERFACE_MODE_XGMII = 13, PHY_INTERFACE_MODE_MOCA = 14, PHY_INTERFACE_MODE_QSGMII = 15, PHY_INTERFACE_MODE_MAX = 16 } ; 84 typedef enum ldv_30622 phy_interface_t; 130 enum ldv_30673 { MDIOBUS_ALLOCATED = 1, MDIOBUS_REGISTERED = 2, MDIOBUS_UNREGISTERED = 3, MDIOBUS_RELEASED = 4 } ; 137 struct mii_bus { struct module *owner; const char *name; char id[17U]; void *priv; int (*read)(struct mii_bus *, int, int); int (*write)(struct mii_bus *, int, int, u16 ); int (*reset)(struct mii_bus *); struct mutex mdio_lock; struct device *parent; enum ldv_30673 state; struct device dev; struct mdio_device *mdio_map[32U]; u32 phy_mask; u32 phy_ignore_ta_mask; int irq[32U]; } ; 218 enum phy_state { PHY_DOWN = 0, PHY_STARTING = 1, PHY_READY = 2, PHY_PENDING = 3, PHY_UP = 4, PHY_AN = 5, PHY_RUNNING = 6, PHY_NOLINK = 7, PHY_FORCING = 8, PHY_CHANGELINK = 9, PHY_HALTED = 10, PHY_RESUMING = 11 } ; 233 struct phy_c45_device_ids { u32 devices_in_package; u32 device_ids[8U]; } ; 326 struct phy_driver ; 326 struct phy_device { struct mdio_device mdio; struct phy_driver *drv; u32 phy_id; struct phy_c45_device_ids c45_ids; bool is_c45; bool is_internal; bool is_pseudo_fixed_link; bool has_fixups; bool suspended; enum phy_state state; u32 dev_flags; phy_interface_t interface; int speed; int duplex; int pause; int asym_pause; int link; u32 interrupts; u32 supported; u32 advertising; u32 lp_advertising; int autoneg; int link_timeout; int irq; void *priv; struct work_struct phy_queue; struct delayed_work state_queue; atomic_t irq_disable; struct mutex lock; struct net_device *attached_dev; u8 mdix; void (*adjust_link)(struct net_device *); } ; 428 struct phy_driver { struct mdio_driver_common mdiodrv; u32 phy_id; char *name; unsigned int phy_id_mask; u32 features; u32 flags; const void *driver_data; int (*soft_reset)(struct phy_device *); int (*config_init)(struct phy_device *); int (*probe)(struct phy_device *); int (*suspend)(struct phy_device *); int (*resume)(struct phy_device *); int (*config_aneg)(struct phy_device *); int (*aneg_done)(struct phy_device *); int (*read_status)(struct phy_device *); int (*ack_interrupt)(struct phy_device *); int (*config_intr)(struct phy_device *); int (*did_interrupt)(struct phy_device *); void (*remove)(struct phy_device *); int (*match_phy_device)(struct phy_device *); int (*ts_info)(struct phy_device *, struct ethtool_ts_info *); int (*hwtstamp)(struct phy_device *, struct ifreq *); bool (*rxtstamp)(struct phy_device *, struct sk_buff *, int); void (*txtstamp)(struct phy_device *, struct sk_buff *, int); int (*set_wol)(struct phy_device *, struct ethtool_wolinfo *); void (*get_wol)(struct phy_device *, struct ethtool_wolinfo *); void (*link_change_notify)(struct phy_device *); int (*read_mmd_indirect)(struct phy_device *, int, int, int); void (*write_mmd_indirect)(struct phy_device *, int, int, int, u32 ); int (*module_info)(struct phy_device *, struct ethtool_modinfo *); int (*module_eeprom)(struct phy_device *, struct ethtool_eeprom *, u8 *); int (*get_sset_count)(struct phy_device *); void (*get_strings)(struct phy_device *, u8 *); void (*get_stats)(struct phy_device *, struct ethtool_stats *, u64 *); } ; 841 struct fixed_phy_status { int link; int speed; int duplex; int pause; int asym_pause; } ; 27 enum dsa_tag_protocol { DSA_TAG_PROTO_NONE = 0, DSA_TAG_PROTO_DSA = 1, DSA_TAG_PROTO_TRAILER = 2, DSA_TAG_PROTO_EDSA = 3, DSA_TAG_PROTO_BRCM = 4, DSA_TAG_LAST = 5 } ; 36 struct dsa_chip_data { struct device *host_dev; int sw_addr; int eeprom_len; struct device_node *of_node; char *port_names[12U]; struct device_node *port_dn[12U]; s8 rtable[4U]; } ; 70 struct dsa_platform_data { struct device *netdev; struct net_device *of_netdev; int nr_chips; struct dsa_chip_data *chip; } ; 86 struct packet_type ; 87 struct dsa_switch ; 87 struct dsa_device_ops ; 87 struct dsa_switch_tree { struct list_head list; u32 tree; struct kref refcount; bool applied; struct dsa_platform_data *pd; struct net_device *master_netdev; int (*rcv)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); struct ethtool_ops master_ethtool_ops; const struct ethtool_ops *master_orig_ethtool_ops; s8 cpu_switch; s8 cpu_port; struct dsa_switch *ds[4U]; const struct dsa_device_ops *tag_ops; } ; 140 struct dsa_port { struct net_device *netdev; struct device_node *dn; unsigned int ageing_time; } ; 146 struct dsa_switch_driver ; 146 struct dsa_switch { struct device *dev; struct dsa_switch_tree *dst; int index; void *priv; struct dsa_chip_data *cd; struct dsa_switch_driver *drv; s8 rtable[4U]; char hwmon_name[24U]; struct device *hwmon_dev; struct net_device *master_netdev; u32 dsa_port_mask; u32 cpu_port_mask; u32 enabled_port_mask; u32 phys_mii_mask; struct dsa_port ports[12U]; struct mii_bus *slave_mii_bus; } ; 233 struct switchdev_trans ; 234 struct switchdev_obj ; 235 struct switchdev_obj_port_fdb ; 236 struct switchdev_obj_port_vlan ; 237 struct dsa_switch_driver { struct list_head list; enum dsa_tag_protocol tag_protocol; const char * (*probe)(struct device *, struct device *, int, void **); int (*setup)(struct dsa_switch *); int (*set_addr)(struct dsa_switch *, u8 *); u32 (*get_phy_flags)(struct dsa_switch *, int); int (*phy_read)(struct dsa_switch *, int, int); int (*phy_write)(struct dsa_switch *, int, int, u16 ); void (*adjust_link)(struct dsa_switch *, int, struct phy_device *); void (*fixed_link_update)(struct dsa_switch *, int, struct fixed_phy_status *); void (*get_strings)(struct dsa_switch *, int, uint8_t *); void (*get_ethtool_stats)(struct dsa_switch *, int, uint64_t *); int (*get_sset_count)(struct dsa_switch *); void (*get_wol)(struct dsa_switch *, int, struct ethtool_wolinfo *); int (*set_wol)(struct dsa_switch *, int, struct ethtool_wolinfo *); int (*suspend)(struct dsa_switch *); int (*resume)(struct dsa_switch *); int (*port_enable)(struct dsa_switch *, int, struct phy_device *); void (*port_disable)(struct dsa_switch *, int, struct phy_device *); int (*set_eee)(struct dsa_switch *, int, struct phy_device *, struct ethtool_eee *); int (*get_eee)(struct dsa_switch *, int, struct ethtool_eee *); int (*get_temp)(struct dsa_switch *, int *); int (*get_temp_limit)(struct dsa_switch *, int *); int (*set_temp_limit)(struct dsa_switch *, int); int (*get_temp_alarm)(struct dsa_switch *, bool *); int (*get_eeprom_len)(struct dsa_switch *); int (*get_eeprom)(struct dsa_switch *, struct ethtool_eeprom *, u8 *); int (*set_eeprom)(struct dsa_switch *, struct ethtool_eeprom *, u8 *); int (*get_regs_len)(struct dsa_switch *, int); void (*get_regs)(struct dsa_switch *, int, struct ethtool_regs *, void *); int (*set_ageing_time)(struct dsa_switch *, unsigned int); int (*port_bridge_join)(struct dsa_switch *, int, struct net_device *); void (*port_bridge_leave)(struct dsa_switch *, int); void (*port_stp_state_set)(struct dsa_switch *, int, u8 ); int (*port_vlan_filtering)(struct dsa_switch *, int, bool ); int (*port_vlan_prepare)(struct dsa_switch *, int, const struct switchdev_obj_port_vlan *, struct switchdev_trans *); void (*port_vlan_add)(struct dsa_switch *, int, const struct switchdev_obj_port_vlan *, struct switchdev_trans *); int (*port_vlan_del)(struct dsa_switch *, int, const struct switchdev_obj_port_vlan *); int (*port_vlan_dump)(struct dsa_switch *, int, struct switchdev_obj_port_vlan *, int (*)(struct switchdev_obj *)); int (*port_fdb_prepare)(struct dsa_switch *, int, const struct switchdev_obj_port_fdb *, struct switchdev_trans *); void (*port_fdb_add)(struct dsa_switch *, int, const struct switchdev_obj_port_fdb *, struct switchdev_trans *); int (*port_fdb_del)(struct dsa_switch *, int, const struct switchdev_obj_port_fdb *); int (*port_fdb_dump)(struct dsa_switch *, int, struct switchdev_obj_port_fdb *, int (*)(struct switchdev_obj *)); } ; 389 struct ieee_ets { __u8 willing; __u8 ets_cap; __u8 cbs; __u8 tc_tx_bw[8U]; __u8 tc_rx_bw[8U]; __u8 tc_tsa[8U]; __u8 prio_tc[8U]; __u8 tc_reco_bw[8U]; __u8 tc_reco_tsa[8U]; __u8 reco_prio_tc[8U]; } ; 69 struct ieee_maxrate { __u64 tc_maxrate[8U]; } ; 87 struct ieee_qcn { __u8 rpg_enable[8U]; __u32 rppp_max_rps[8U]; __u32 rpg_time_reset[8U]; __u32 rpg_byte_reset[8U]; __u32 rpg_threshold[8U]; __u32 rpg_max_rate[8U]; __u32 rpg_ai_rate[8U]; __u32 rpg_hai_rate[8U]; __u32 rpg_gd[8U]; __u32 rpg_min_dec_fac[8U]; __u32 rpg_min_rate[8U]; __u32 cndd_state_machine[8U]; } ; 132 struct ieee_qcn_stats { __u64 rppp_rp_centiseconds[8U]; __u32 rppp_created_rps[8U]; } ; 144 struct ieee_pfc { __u8 pfc_cap; __u8 pfc_en; __u8 mbc; __u16 delay; __u64 requests[8U]; __u64 indications[8U]; } ; 164 struct cee_pg { __u8 willing; __u8 error; __u8 pg_en; __u8 tcs_supported; __u8 pg_bw[8U]; __u8 prio_pg[8U]; } ; 187 struct cee_pfc { __u8 willing; __u8 error; __u8 pfc_en; __u8 tcs_supported; } ; 202 struct dcb_app { __u8 selector; __u8 priority; __u16 protocol; } ; 236 struct dcb_peer_app_info { __u8 willing; __u8 error; } ; 40 struct dcbnl_rtnl_ops { int (*ieee_getets)(struct net_device *, struct ieee_ets *); int (*ieee_setets)(struct net_device *, struct ieee_ets *); int (*ieee_getmaxrate)(struct net_device *, struct ieee_maxrate *); int (*ieee_setmaxrate)(struct net_device *, struct ieee_maxrate *); int (*ieee_getqcn)(struct net_device *, struct ieee_qcn *); int (*ieee_setqcn)(struct net_device *, struct ieee_qcn *); int (*ieee_getqcnstats)(struct net_device *, struct ieee_qcn_stats *); int (*ieee_getpfc)(struct net_device *, struct ieee_pfc *); int (*ieee_setpfc)(struct net_device *, struct ieee_pfc *); int (*ieee_getapp)(struct net_device *, struct dcb_app *); int (*ieee_setapp)(struct net_device *, struct dcb_app *); int (*ieee_delapp)(struct net_device *, struct dcb_app *); int (*ieee_peer_getets)(struct net_device *, struct ieee_ets *); int (*ieee_peer_getpfc)(struct net_device *, struct ieee_pfc *); u8 (*getstate)(struct net_device *); u8 (*setstate)(struct net_device *, u8 ); void (*getpermhwaddr)(struct net_device *, u8 *); void (*setpgtccfgtx)(struct net_device *, int, u8 , u8 , u8 , u8 ); void (*setpgbwgcfgtx)(struct net_device *, int, u8 ); void (*setpgtccfgrx)(struct net_device *, int, u8 , u8 , u8 , u8 ); void (*setpgbwgcfgrx)(struct net_device *, int, u8 ); void (*getpgtccfgtx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *); void (*getpgbwgcfgtx)(struct net_device *, int, u8 *); void (*getpgtccfgrx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *); void (*getpgbwgcfgrx)(struct net_device *, int, u8 *); void (*setpfccfg)(struct net_device *, int, u8 ); void (*getpfccfg)(struct net_device *, int, u8 *); u8 (*setall)(struct net_device *); u8 (*getcap)(struct net_device *, int, u8 *); int (*getnumtcs)(struct net_device *, int, u8 *); int (*setnumtcs)(struct net_device *, int, u8 ); u8 (*getpfcstate)(struct net_device *); void (*setpfcstate)(struct net_device *, u8 ); void (*getbcncfg)(struct net_device *, int, u32 *); void (*setbcncfg)(struct net_device *, int, u32 ); void (*getbcnrp)(struct net_device *, int, u8 *); void (*setbcnrp)(struct net_device *, int, u8 ); int (*setapp)(struct net_device *, u8 , u16 , u8 ); int (*getapp)(struct net_device *, u8 , u16 ); u8 (*getfeatcfg)(struct net_device *, int, u8 *); u8 (*setfeatcfg)(struct net_device *, int, u8 ); u8 (*getdcbx)(struct net_device *); u8 (*setdcbx)(struct net_device *, u8 ); int (*peer_getappinfo)(struct net_device *, struct dcb_peer_app_info *, u16 *); int (*peer_getapptable)(struct net_device *, struct dcb_app *); int (*cee_peer_getpg)(struct net_device *, struct cee_pg *); int (*cee_peer_getpfc)(struct net_device *, struct cee_pfc *); } ; 105 struct taskstats { __u16 version; __u32 ac_exitcode; __u8 ac_flag; __u8 ac_nice; __u64 cpu_count; __u64 cpu_delay_total; __u64 blkio_count; __u64 blkio_delay_total; __u64 swapin_count; __u64 swapin_delay_total; __u64 cpu_run_real_total; __u64 cpu_run_virtual_total; char ac_comm[32U]; __u8 ac_sched; __u8 ac_pad[3U]; __u32 ac_uid; __u32 ac_gid; __u32 ac_pid; __u32 ac_ppid; __u32 ac_btime; __u64 ac_etime; __u64 ac_utime; __u64 ac_stime; __u64 ac_minflt; __u64 ac_majflt; __u64 coremem; __u64 virtmem; __u64 hiwater_rss; __u64 hiwater_vm; __u64 read_char; __u64 write_char; __u64 read_syscalls; __u64 write_syscalls; __u64 read_bytes; __u64 write_bytes; __u64 cancelled_write_bytes; __u64 nvcsw; __u64 nivcsw; __u64 ac_utimescaled; __u64 ac_stimescaled; __u64 cpu_scaled_run_real_total; __u64 freepages_count; __u64 freepages_delay_total; } ; 58 struct mnt_namespace ; 59 struct ipc_namespace ; 60 struct cgroup_namespace ; 61 struct nsproxy { atomic_t count; struct uts_namespace *uts_ns; struct ipc_namespace *ipc_ns; struct mnt_namespace *mnt_ns; struct pid_namespace *pid_ns_for_children; struct net *net_ns; struct cgroup_namespace *cgroup_ns; } ; 86 struct uid_gid_extent { u32 first; u32 lower_first; u32 count; } ; 19 struct uid_gid_map { u32 nr_extents; struct uid_gid_extent extent[5U]; } ; 20 struct user_namespace { struct uid_gid_map uid_map; struct uid_gid_map gid_map; struct uid_gid_map projid_map; atomic_t count; struct user_namespace *parent; int level; kuid_t owner; kgid_t group; struct ns_common ns; unsigned long flags; struct key *persistent_keyring_register; struct rw_semaphore persistent_keyring_register_sem; } ; 609 struct cgroup_namespace { atomic_t count; struct ns_common ns; struct user_namespace *user_ns; struct css_set *root_cset; } ; 663 struct netprio_map { struct callback_head rcu; u32 priomap_len; u32 priomap[]; } ; 99 struct xfrm_policy ; 100 struct xfrm_state ; 116 struct request_sock ; 41 struct nlmsghdr { __u32 nlmsg_len; __u16 nlmsg_type; __u16 nlmsg_flags; __u32 nlmsg_seq; __u32 nlmsg_pid; } ; 143 struct nlattr { __u16 nla_len; __u16 nla_type; } ; 105 struct netlink_callback { struct sk_buff *skb; const struct nlmsghdr *nlh; int (*start)(struct netlink_callback *); int (*dump)(struct sk_buff *, struct netlink_callback *); int (*done)(struct netlink_callback *); void *data; struct module *module; u16 family; u16 min_dump_alloc; unsigned int prev_seq; unsigned int seq; long args[6U]; } ; 183 struct ndmsg { __u8 ndm_family; __u8 ndm_pad1; __u16 ndm_pad2; __s32 ndm_ifindex; __u16 ndm_state; __u8 ndm_flags; __u8 ndm_type; } ; 41 struct rtnl_link_stats64 { __u64 rx_packets; __u64 tx_packets; __u64 rx_bytes; __u64 tx_bytes; __u64 rx_errors; __u64 tx_errors; __u64 rx_dropped; __u64 tx_dropped; __u64 multicast; __u64 collisions; __u64 rx_length_errors; __u64 rx_over_errors; __u64 rx_crc_errors; __u64 rx_frame_errors; __u64 rx_fifo_errors; __u64 rx_missed_errors; __u64 tx_aborted_errors; __u64 tx_carrier_errors; __u64 tx_fifo_errors; __u64 tx_heartbeat_errors; __u64 tx_window_errors; __u64 rx_compressed; __u64 tx_compressed; __u64 rx_nohandler; } ; 840 struct ifla_vf_stats { __u64 rx_packets; __u64 tx_packets; __u64 rx_bytes; __u64 tx_bytes; __u64 broadcast; __u64 multicast; } ; 16 struct ifla_vf_info { __u32 vf; __u8 mac[32U]; __u32 vlan; __u32 qos; __u32 spoofchk; __u32 linkstate; __u32 min_tx_rate; __u32 max_tx_rate; __u32 rss_query_en; __u32 trusted; } ; 118 struct tc_stats { __u64 bytes; __u32 packets; __u32 drops; __u32 overlimits; __u32 bps; __u32 pps; __u32 qlen; __u32 backlog; } ; 96 struct tc_sizespec { unsigned char cell_log; unsigned char size_log; short cell_align; int overhead; unsigned int linklayer; unsigned int mpu; unsigned int mtu; unsigned int tsize; } ; 486 struct netpoll_info ; 487 struct wireless_dev ; 488 struct wpan_dev ; 489 struct mpls_dev ; 490 struct udp_tunnel_info ; 491 struct bpf_prog ; 69 enum netdev_tx { __NETDEV_TX_MIN = -2147483648, NETDEV_TX_OK = 0, NETDEV_TX_BUSY = 16 } ; 112 typedef enum netdev_tx netdev_tx_t; 131 struct net_device_stats { unsigned long rx_packets; unsigned long tx_packets; unsigned long rx_bytes; unsigned long tx_bytes; unsigned long rx_errors; unsigned long tx_errors; unsigned long rx_dropped; unsigned long tx_dropped; unsigned long multicast; unsigned long collisions; unsigned long rx_length_errors; unsigned long rx_over_errors; unsigned long rx_crc_errors; unsigned long rx_frame_errors; unsigned long rx_fifo_errors; unsigned long rx_missed_errors; unsigned long tx_aborted_errors; unsigned long tx_carrier_errors; unsigned long tx_fifo_errors; unsigned long tx_heartbeat_errors; unsigned long tx_window_errors; unsigned long rx_compressed; unsigned long tx_compressed; } ; 194 struct neigh_parms ; 215 struct netdev_hw_addr_list { struct list_head list; int count; } ; 220 struct hh_cache { u16 hh_len; u16 __pad; seqlock_t hh_lock; unsigned long hh_data[16U]; } ; 249 struct header_ops { int (*create)(struct sk_buff *, struct net_device *, unsigned short, const void *, const void *, unsigned int); int (*parse)(const struct sk_buff *, unsigned char *); int (*cache)(const struct neighbour *, struct hh_cache *, __be16 ); void (*cache_update)(struct hh_cache *, const struct net_device *, const unsigned char *); bool (*validate)(const char *, unsigned int); } ; 300 struct napi_struct { struct list_head poll_list; unsigned long state; int weight; unsigned int gro_count; int (*poll)(struct napi_struct *, int); spinlock_t poll_lock; int poll_owner; struct net_device *dev; struct sk_buff *gro_list; struct sk_buff *skb; struct hrtimer timer; struct list_head dev_list; struct hlist_node napi_hash_node; unsigned int napi_id; } ; 346 enum rx_handler_result { RX_HANDLER_CONSUMED = 0, RX_HANDLER_ANOTHER = 1, RX_HANDLER_EXACT = 2, RX_HANDLER_PASS = 3 } ; 394 typedef enum rx_handler_result rx_handler_result_t; 395 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **); 540 struct Qdisc ; 540 struct netdev_queue { struct net_device *dev; struct Qdisc *qdisc; struct Qdisc *qdisc_sleeping; struct kobject kobj; int numa_node; unsigned long tx_maxrate; unsigned long trans_timeout; spinlock_t _xmit_lock; int xmit_lock_owner; unsigned long trans_start; unsigned long state; struct dql dql; } ; 611 struct rps_map { unsigned int len; struct callback_head rcu; u16 cpus[0U]; } ; 623 struct rps_dev_flow { u16 cpu; u16 filter; unsigned int last_qtail; } ; 635 struct rps_dev_flow_table { unsigned int mask; struct callback_head rcu; struct rps_dev_flow flows[0U]; } ; 687 struct netdev_rx_queue { struct rps_map *rps_map; struct rps_dev_flow_table *rps_flow_table; struct kobject kobj; struct net_device *dev; } ; 710 struct xps_map { unsigned int len; unsigned int alloc_len; struct callback_head rcu; u16 queues[0U]; } ; 723 struct xps_dev_maps { struct callback_head rcu; struct xps_map *cpu_map[0U]; } ; 734 struct netdev_tc_txq { u16 count; u16 offset; } ; 745 struct netdev_fcoe_hbainfo { char manufacturer[64U]; char serial_number[64U]; char hardware_version[64U]; char driver_version[64U]; char optionrom_version[64U]; char firmware_version[64U]; char model[256U]; char model_description[256U]; } ; 761 struct netdev_phys_item_id { unsigned char id[32U]; unsigned char id_len; } ; 788 struct tc_cls_u32_offload ; 789 struct tc_cls_flower_offload ; 789 struct tc_cls_matchall_offload ; 789 union __anonunion____missing_field_name_470 { u8 tc; struct tc_cls_u32_offload *cls_u32; struct tc_cls_flower_offload *cls_flower; struct tc_cls_matchall_offload *cls_mall; } ; 789 struct tc_to_netdev { unsigned int type; union __anonunion____missing_field_name_470 __annonCompField106; } ; 804 enum xdp_netdev_command { XDP_SETUP_PROG = 0, XDP_QUERY_PROG = 1 } ; 809 union __anonunion____missing_field_name_471 { struct bpf_prog *prog; bool prog_attached; } ; 809 struct netdev_xdp { enum xdp_netdev_command command; union __anonunion____missing_field_name_471 __annonCompField107; } ; 832 struct net_device_ops { int (*ndo_init)(struct net_device *); void (*ndo_uninit)(struct net_device *); int (*ndo_open)(struct net_device *); int (*ndo_stop)(struct net_device *); netdev_tx_t (*ndo_start_xmit)(struct sk_buff *, struct net_device *); netdev_features_t (*ndo_features_check)(struct sk_buff *, struct net_device *, netdev_features_t ); u16 (*ndo_select_queue)(struct net_device *, struct sk_buff *, void *, u16 (*)(struct net_device *, struct sk_buff *)); void (*ndo_change_rx_flags)(struct net_device *, int); void (*ndo_set_rx_mode)(struct net_device *); int (*ndo_set_mac_address)(struct net_device *, void *); int (*ndo_validate_addr)(struct net_device *); int (*ndo_do_ioctl)(struct net_device *, struct ifreq *, int); int (*ndo_set_config)(struct net_device *, struct ifmap *); int (*ndo_change_mtu)(struct net_device *, int); int (*ndo_neigh_setup)(struct net_device *, struct neigh_parms *); void (*ndo_tx_timeout)(struct net_device *); struct rtnl_link_stats64 * (*ndo_get_stats64)(struct net_device *, struct rtnl_link_stats64 *); struct net_device_stats * (*ndo_get_stats)(struct net_device *); int (*ndo_vlan_rx_add_vid)(struct net_device *, __be16 , u16 ); int (*ndo_vlan_rx_kill_vid)(struct net_device *, __be16 , u16 ); void (*ndo_poll_controller)(struct net_device *); int (*ndo_netpoll_setup)(struct net_device *, struct netpoll_info *); void (*ndo_netpoll_cleanup)(struct net_device *); int (*ndo_busy_poll)(struct napi_struct *); int (*ndo_set_vf_mac)(struct net_device *, int, u8 *); int (*ndo_set_vf_vlan)(struct net_device *, int, u16 , u8 ); int (*ndo_set_vf_rate)(struct net_device *, int, int, int); int (*ndo_set_vf_spoofchk)(struct net_device *, int, bool ); int (*ndo_set_vf_trust)(struct net_device *, int, bool ); int (*ndo_get_vf_config)(struct net_device *, int, struct ifla_vf_info *); int (*ndo_set_vf_link_state)(struct net_device *, int, int); int (*ndo_get_vf_stats)(struct net_device *, int, struct ifla_vf_stats *); int (*ndo_set_vf_port)(struct net_device *, int, struct nlattr **); int (*ndo_get_vf_port)(struct net_device *, int, struct sk_buff *); int (*ndo_set_vf_guid)(struct net_device *, int, u64 , int); int (*ndo_set_vf_rss_query_en)(struct net_device *, int, bool ); int (*ndo_setup_tc)(struct net_device *, u32 , __be16 , struct tc_to_netdev *); int (*ndo_fcoe_enable)(struct net_device *); int (*ndo_fcoe_disable)(struct net_device *); int (*ndo_fcoe_ddp_setup)(struct net_device *, u16 , struct scatterlist *, unsigned int); int (*ndo_fcoe_ddp_done)(struct net_device *, u16 ); int (*ndo_fcoe_ddp_target)(struct net_device *, u16 , struct scatterlist *, unsigned int); int (*ndo_fcoe_get_hbainfo)(struct net_device *, struct netdev_fcoe_hbainfo *); int (*ndo_fcoe_get_wwn)(struct net_device *, u64 *, int); int (*ndo_rx_flow_steer)(struct net_device *, const struct sk_buff *, u16 , u32 ); int (*ndo_add_slave)(struct net_device *, struct net_device *); int (*ndo_del_slave)(struct net_device *, struct net_device *); netdev_features_t (*ndo_fix_features)(struct net_device *, netdev_features_t ); int (*ndo_set_features)(struct net_device *, netdev_features_t ); int (*ndo_neigh_construct)(struct net_device *, struct neighbour *); void (*ndo_neigh_destroy)(struct net_device *, struct neighbour *); int (*ndo_fdb_add)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16 , u16 ); int (*ndo_fdb_del)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16 ); int (*ndo_fdb_dump)(struct sk_buff *, struct netlink_callback *, struct net_device *, struct net_device *, int); int (*ndo_bridge_setlink)(struct net_device *, struct nlmsghdr *, u16 ); int (*ndo_bridge_getlink)(struct sk_buff *, u32 , u32 , struct net_device *, u32 , int); int (*ndo_bridge_dellink)(struct net_device *, struct nlmsghdr *, u16 ); int (*ndo_change_carrier)(struct net_device *, bool ); int (*ndo_get_phys_port_id)(struct net_device *, struct netdev_phys_item_id *); int (*ndo_get_phys_port_name)(struct net_device *, char *, size_t ); void (*ndo_udp_tunnel_add)(struct net_device *, struct udp_tunnel_info *); void (*ndo_udp_tunnel_del)(struct net_device *, struct udp_tunnel_info *); void * (*ndo_dfwd_add_station)(struct net_device *, struct net_device *); void (*ndo_dfwd_del_station)(struct net_device *, void *); netdev_tx_t (*ndo_dfwd_start_xmit)(struct sk_buff *, struct net_device *, void *); int (*ndo_get_lock_subclass)(struct net_device *); int (*ndo_set_tx_maxrate)(struct net_device *, int, u32 ); int (*ndo_get_iflink)(const struct net_device *); int (*ndo_change_proto_down)(struct net_device *, bool ); int (*ndo_fill_metadata_dst)(struct net_device *, struct sk_buff *); void (*ndo_set_rx_headroom)(struct net_device *, int); int (*ndo_xdp)(struct net_device *, struct netdev_xdp *); } ; 1354 struct __anonstruct_adj_list_472 { struct list_head upper; struct list_head lower; } ; 1354 struct __anonstruct_all_adj_list_473 { struct list_head upper; struct list_head lower; } ; 1354 struct iw_handler_def ; 1354 struct iw_public_data ; 1354 struct switchdev_ops ; 1354 struct l3mdev_ops ; 1354 struct ndisc_ops ; 1354 struct vlan_info ; 1354 struct tipc_bearer ; 1354 struct in_device ; 1354 struct dn_dev ; 1354 struct inet6_dev ; 1354 struct tcf_proto ; 1354 struct cpu_rmap ; 1354 struct pcpu_lstats ; 1354 struct pcpu_sw_netstats ; 1354 struct pcpu_dstats ; 1354 struct pcpu_vstats ; 1354 union __anonunion____missing_field_name_474 { void *ml_priv; struct pcpu_lstats *lstats; struct pcpu_sw_netstats *tstats; struct pcpu_dstats *dstats; struct pcpu_vstats *vstats; } ; 1354 struct garp_port ; 1354 struct mrp_port ; 1354 struct rtnl_link_ops ; 1354 struct net_device { char name[16U]; struct hlist_node name_hlist; char *ifalias; unsigned long mem_end; unsigned long mem_start; unsigned long base_addr; int irq; atomic_t carrier_changes; unsigned long state; struct list_head dev_list; struct list_head napi_list; struct list_head unreg_list; struct list_head close_list; struct list_head ptype_all; struct list_head ptype_specific; struct __anonstruct_adj_list_472 adj_list; struct __anonstruct_all_adj_list_473 all_adj_list; netdev_features_t features; netdev_features_t hw_features; netdev_features_t wanted_features; netdev_features_t vlan_features; netdev_features_t hw_enc_features; netdev_features_t mpls_features; netdev_features_t gso_partial_features; int ifindex; int group; struct net_device_stats stats; atomic_long_t rx_dropped; atomic_long_t tx_dropped; atomic_long_t rx_nohandler; const struct iw_handler_def *wireless_handlers; struct iw_public_data *wireless_data; const struct net_device_ops *netdev_ops; const struct ethtool_ops *ethtool_ops; const struct switchdev_ops *switchdev_ops; const struct l3mdev_ops *l3mdev_ops; const struct ndisc_ops *ndisc_ops; const struct header_ops *header_ops; unsigned int flags; unsigned int priv_flags; unsigned short gflags; unsigned short padded; unsigned char operstate; unsigned char link_mode; unsigned char if_port; unsigned char dma; unsigned int mtu; unsigned short type; unsigned short hard_header_len; unsigned short needed_headroom; unsigned short needed_tailroom; unsigned char perm_addr[32U]; unsigned char addr_assign_type; unsigned char addr_len; unsigned short neigh_priv_len; unsigned short dev_id; unsigned short dev_port; spinlock_t addr_list_lock; unsigned char name_assign_type; bool uc_promisc; struct netdev_hw_addr_list uc; struct netdev_hw_addr_list mc; struct netdev_hw_addr_list dev_addrs; struct kset *queues_kset; unsigned int promiscuity; unsigned int allmulti; struct vlan_info *vlan_info; struct dsa_switch_tree *dsa_ptr; struct tipc_bearer *tipc_ptr; void *atalk_ptr; struct in_device *ip_ptr; struct dn_dev *dn_ptr; struct inet6_dev *ip6_ptr; void *ax25_ptr; struct wireless_dev *ieee80211_ptr; struct wpan_dev *ieee802154_ptr; struct mpls_dev *mpls_ptr; unsigned long last_rx; unsigned char *dev_addr; struct netdev_rx_queue *_rx; unsigned int num_rx_queues; unsigned int real_num_rx_queues; unsigned long gro_flush_timeout; rx_handler_func_t *rx_handler; void *rx_handler_data; struct tcf_proto *ingress_cl_list; struct netdev_queue *ingress_queue; struct list_head nf_hooks_ingress; unsigned char broadcast[32U]; struct cpu_rmap *rx_cpu_rmap; struct hlist_node index_hlist; struct netdev_queue *_tx; unsigned int num_tx_queues; unsigned int real_num_tx_queues; struct Qdisc *qdisc; unsigned long tx_queue_len; spinlock_t tx_global_lock; int watchdog_timeo; struct xps_dev_maps *xps_maps; struct tcf_proto *egress_cl_list; u32 offload_fwd_mark; struct timer_list watchdog_timer; int *pcpu_refcnt; struct list_head todo_list; struct list_head link_watch_list; unsigned char reg_state; bool dismantle; unsigned short rtnl_link_state; void (*destructor)(struct net_device *); struct netpoll_info *npinfo; possible_net_t nd_net; union __anonunion____missing_field_name_474 __annonCompField108; struct garp_port *garp_port; struct mrp_port *mrp_port; struct device dev; const struct attribute_group *sysfs_groups[4U]; const struct attribute_group *sysfs_rx_queue_group; const struct rtnl_link_ops *rtnl_link_ops; unsigned int gso_max_size; u16 gso_max_segs; const struct dcbnl_rtnl_ops *dcbnl_ops; u8 num_tc; struct netdev_tc_txq tc_to_txq[16U]; u8 prio_tc_map[16U]; unsigned int fcoe_ddp_xid; struct netprio_map *priomap; struct phy_device *phydev; struct lock_class_key *qdisc_tx_busylock; struct lock_class_key *qdisc_running_key; bool proto_down; } ; 2165 struct packet_type { __be16 type; struct net_device *dev; int (*func)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); bool (*id_match)(struct packet_type *, struct sock *); void *af_packet_priv; struct list_head list; } ; 2195 struct pcpu_sw_netstats { u64 rx_packets; u64 rx_bytes; u64 tx_packets; u64 tx_bytes; struct u64_stats_sync syncp; } ; 103 struct page_counter { atomic_long_t count; unsigned long limit; struct page_counter *parent; unsigned long watermark; unsigned long failcnt; } ; 33 struct eventfd_ctx ; 41 struct vmpressure { unsigned long scanned; unsigned long reclaimed; unsigned long tree_scanned; unsigned long tree_reclaimed; struct spinlock sr_lock; struct list_head events; struct mutex events_lock; struct work_struct work; } ; 44 struct fprop_global { struct percpu_counter events; unsigned int period; seqcount_t sequence; } ; 72 struct fprop_local_percpu { struct percpu_counter events; unsigned int period; raw_spinlock_t lock; } ; 32 typedef int congested_fn(void *, int); 41 struct bdi_writeback_congested { unsigned long state; atomic_t refcnt; struct backing_dev_info *bdi; int blkcg_id; struct rb_node rb_node; } ; 60 union __anonunion____missing_field_name_479 { struct work_struct release_work; struct callback_head rcu; } ; 60 struct bdi_writeback { struct backing_dev_info *bdi; unsigned long state; unsigned long last_old_flush; struct list_head b_dirty; struct list_head b_io; struct list_head b_more_io; struct list_head b_dirty_time; spinlock_t list_lock; struct percpu_counter stat[4U]; struct bdi_writeback_congested *congested; unsigned long bw_time_stamp; unsigned long dirtied_stamp; unsigned long written_stamp; unsigned long write_bandwidth; unsigned long avg_write_bandwidth; unsigned long dirty_ratelimit; unsigned long balanced_dirty_ratelimit; struct fprop_local_percpu completions; int dirty_exceeded; spinlock_t work_lock; struct list_head work_list; struct delayed_work dwork; struct list_head bdi_node; struct percpu_ref refcnt; struct fprop_local_percpu memcg_completions; struct cgroup_subsys_state *memcg_css; struct cgroup_subsys_state *blkcg_css; struct list_head memcg_node; struct list_head blkcg_node; union __anonunion____missing_field_name_479 __annonCompField109; } ; 134 struct backing_dev_info { struct list_head bdi_list; unsigned long ra_pages; unsigned int capabilities; congested_fn *congested_fn; void *congested_data; char *name; unsigned int min_ratio; unsigned int max_ratio; unsigned int max_prop_frac; atomic_long_t tot_write_bandwidth; struct bdi_writeback wb; struct list_head wb_list; struct radix_tree_root cgwb_tree; struct rb_root cgwb_congested_tree; atomic_t usage_cnt; wait_queue_head_t wb_waitq; struct device *dev; struct device *owner; struct timer_list laptop_mode_wb_timer; struct dentry *debug_dir; struct dentry *debug_stats; } ; 14 enum writeback_sync_modes { WB_SYNC_NONE = 0, WB_SYNC_ALL = 1 } ; 31 struct writeback_control { long nr_to_write; long pages_skipped; loff_t range_start; loff_t range_end; enum writeback_sync_modes sync_mode; unsigned char for_kupdate; unsigned char for_background; unsigned char tagged_writepages; unsigned char for_reclaim; unsigned char range_cyclic; unsigned char for_sync; struct bdi_writeback *wb; struct inode *inode; int wb_id; int wb_lcand_id; int wb_tcand_id; size_t wb_bytes; size_t wb_lcand_bytes; size_t wb_tcand_bytes; } ; 101 struct wb_domain { spinlock_t lock; struct fprop_global completions; struct timer_list period_timer; unsigned long period_time; unsigned long dirty_limit_tstamp; unsigned long dirty_limit; } ; 12 typedef void * mempool_alloc_t(gfp_t , void *); 13 typedef void mempool_free_t(void *, void *); 14 struct mempool_s { spinlock_t lock; int min_nr; int curr_nr; void **elements; void *pool_data; mempool_alloc_t *alloc; mempool_free_t *free; wait_queue_head_t wait; } ; 25 typedef struct mempool_s mempool_t; 79 union __anonunion____missing_field_name_480 { struct list_head q_node; struct kmem_cache *__rcu_icq_cache; } ; 79 union __anonunion____missing_field_name_481 { struct hlist_node ioc_node; struct callback_head __rcu_head; } ; 79 struct io_cq { struct request_queue *q; struct io_context *ioc; union __anonunion____missing_field_name_480 __annonCompField110; union __anonunion____missing_field_name_481 __annonCompField111; unsigned int flags; } ; 92 struct io_context { atomic_long_t refcount; atomic_t active_ref; atomic_t nr_tasks; spinlock_t lock; unsigned short ioprio; int nr_batch_requests; unsigned long last_waited; struct radix_tree_root icq_tree; struct io_cq *icq_hint; struct hlist_head icq_list; struct work_struct release_work; } ; 295 struct bio_integrity_payload { struct bio *bip_bio; struct bvec_iter bip_iter; bio_end_io_t *bip_end_io; unsigned short bip_slab; unsigned short bip_vcnt; unsigned short bip_max_vcnt; unsigned short bip_flags; struct work_struct bip_work; struct bio_vec *bip_vec; struct bio_vec bip_inline_vecs[0U]; } ; 529 struct bio_list { struct bio *head; struct bio *tail; } ; 661 struct bio_set { struct kmem_cache *bio_slab; unsigned int front_pad; mempool_t *bio_pool; mempool_t *bvec_pool; mempool_t *bio_integrity_pool; mempool_t *bvec_integrity_pool; spinlock_t rescue_lock; struct bio_list rescue_list; struct work_struct rescue_work; struct workqueue_struct *rescue_workqueue; } ; 87 struct mem_cgroup_id { int id; atomic_t ref; } ; 104 struct mem_cgroup_stat_cpu { long count[11U]; unsigned long events[8U]; unsigned long nr_page_events; unsigned long targets[3U]; } ; 111 struct mem_cgroup_reclaim_iter { struct mem_cgroup *position; unsigned int generation; } ; 117 struct mem_cgroup_per_node { struct lruvec lruvec; unsigned long lru_size[5U]; struct mem_cgroup_reclaim_iter iter[13U]; struct rb_node tree_node; unsigned long usage_in_excess; bool on_tree; struct mem_cgroup *memcg; } ; 133 struct mem_cgroup_threshold { struct eventfd_ctx *eventfd; unsigned long threshold; } ; 139 struct mem_cgroup_threshold_ary { int current_threshold; unsigned int size; struct mem_cgroup_threshold entries[0U]; } ; 149 struct mem_cgroup_thresholds { struct mem_cgroup_threshold_ary *primary; struct mem_cgroup_threshold_ary *spare; } ; 160 enum memcg_kmem_state { KMEM_NONE = 0, KMEM_ALLOCATED = 1, KMEM_ONLINE = 2 } ; 166 struct mem_cgroup { struct cgroup_subsys_state css; struct mem_cgroup_id id; struct page_counter memory; struct page_counter swap; struct page_counter memsw; struct page_counter kmem; struct page_counter tcpmem; unsigned long low; unsigned long high; struct work_struct high_work; unsigned long soft_limit; struct vmpressure vmpressure; bool use_hierarchy; bool oom_lock; int under_oom; int swappiness; int oom_kill_disable; struct cgroup_file events_file; struct mutex thresholds_lock; struct mem_cgroup_thresholds thresholds; struct mem_cgroup_thresholds memsw_thresholds; struct list_head oom_notify; unsigned long move_charge_at_immigrate; atomic_t moving_account; spinlock_t move_lock; struct task_struct *move_lock_task; unsigned long move_lock_flags; struct mem_cgroup_stat_cpu *stat; unsigned long socket_pressure; bool tcpmem_active; int tcpmem_pressure; int kmemcg_id; enum memcg_kmem_state kmem_state; int last_scanned_node; nodemask_t scan_nodes; atomic_t numainfo_events; atomic_t numainfo_updating; struct list_head cgwb_list; struct wb_domain cgwb_domain; struct list_head event_list; spinlock_t event_list_lock; struct mem_cgroup_per_node *nodeinfo[0U]; } ; 27 struct gnet_stats_basic_packed { __u64 bytes; __u32 packets; } ; 41 struct gnet_stats_rate_est64 { __u64 bps; __u64 pps; } ; 51 struct gnet_stats_queue { __u32 qlen; __u32 backlog; __u32 drops; __u32 requeues; __u32 overlimits; } ; 519 struct tcmsg { unsigned char tcm_family; unsigned char tcm__pad1; unsigned short tcm__pad2; int tcm_ifindex; __u32 tcm_handle; __u32 tcm_parent; __u32 tcm_info; } ; 122 struct gnet_stats_basic_cpu { struct gnet_stats_basic_packed bstats; struct u64_stats_sync syncp; } ; 13 struct gnet_dump { spinlock_t *lock; struct sk_buff *skb; struct nlattr *tail; int compat_tc_stats; int compat_xstats; int padattr; void *xstats; int xstats_len; struct tc_stats tc_stats; } ; 87 struct nla_policy { u16 type; u16 len; } ; 25 struct rtnl_link_ops { struct list_head list; const char *kind; size_t priv_size; void (*setup)(struct net_device *); int maxtype; const struct nla_policy *policy; int (*validate)(struct nlattr **, struct nlattr **); int (*newlink)(struct net *, struct net_device *, struct nlattr **, struct nlattr **); int (*changelink)(struct net_device *, struct nlattr **, struct nlattr **); void (*dellink)(struct net_device *, struct list_head *); size_t (*get_size)(const struct net_device *); int (*fill_info)(struct sk_buff *, const struct net_device *); size_t (*get_xstats_size)(const struct net_device *); int (*fill_xstats)(struct sk_buff *, const struct net_device *); unsigned int (*get_num_tx_queues)(); unsigned int (*get_num_rx_queues)(); int slave_maxtype; const struct nla_policy *slave_policy; int (*slave_validate)(struct nlattr **, struct nlattr **); int (*slave_changelink)(struct net_device *, struct net_device *, struct nlattr **, struct nlattr **); size_t (*get_slave_size)(const struct net_device *, const struct net_device *); int (*fill_slave_info)(struct sk_buff *, const struct net_device *, const struct net_device *); struct net * (*get_link_net)(const struct net_device *); size_t (*get_linkxstats_size)(const struct net_device *, int); int (*fill_linkxstats)(struct sk_buff *, const struct net_device *, int *, int); } ; 158 struct Qdisc_ops ; 159 struct qdisc_walker ; 160 struct tcf_walker ; 30 struct qdisc_size_table { struct callback_head rcu; struct list_head list; struct tc_sizespec szopts; int refcnt; u16 data[]; } ; 38 struct Qdisc { int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **); struct sk_buff * (*dequeue)(struct Qdisc *); unsigned int flags; u32 limit; const struct Qdisc_ops *ops; struct qdisc_size_table *stab; struct list_head list; u32 handle; u32 parent; void *u32_node; struct netdev_queue *dev_queue; struct gnet_stats_rate_est64 rate_est; struct gnet_stats_basic_cpu *cpu_bstats; struct gnet_stats_queue *cpu_qstats; struct sk_buff *gso_skb; struct sk_buff_head q; struct gnet_stats_basic_packed bstats; seqcount_t running; struct gnet_stats_queue qstats; unsigned long state; struct Qdisc *next_sched; struct sk_buff *skb_bad_txq; struct callback_head callback_head; int padded; atomic_t refcnt; spinlock_t busylock; } ; 126 struct Qdisc_class_ops { struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); int (*graft)(struct Qdisc *, unsigned long, struct Qdisc *, struct Qdisc **); struct Qdisc * (*leaf)(struct Qdisc *, unsigned long); void (*qlen_notify)(struct Qdisc *, unsigned long); unsigned long int (*get)(struct Qdisc *, u32 ); void (*put)(struct Qdisc *, unsigned long); int (*change)(struct Qdisc *, u32 , u32 , struct nlattr **, unsigned long *); int (*delete)(struct Qdisc *, unsigned long); void (*walk)(struct Qdisc *, struct qdisc_walker *); struct tcf_proto ** (*tcf_chain)(struct Qdisc *, unsigned long); bool (*tcf_cl_offload)(u32 ); unsigned long int (*bind_tcf)(struct Qdisc *, unsigned long, u32 ); void (*unbind_tcf)(struct Qdisc *, unsigned long); int (*dump)(struct Qdisc *, unsigned long, struct sk_buff *, struct tcmsg *); int (*dump_stats)(struct Qdisc *, unsigned long, struct gnet_dump *); } ; 158 struct Qdisc_ops { struct Qdisc_ops *next; const struct Qdisc_class_ops *cl_ops; char id[16U]; int priv_size; int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **); struct sk_buff * (*dequeue)(struct Qdisc *); struct sk_buff * (*peek)(struct Qdisc *); int (*init)(struct Qdisc *, struct nlattr *); void (*reset)(struct Qdisc *); void (*destroy)(struct Qdisc *); int (*change)(struct Qdisc *, struct nlattr *); void (*attach)(struct Qdisc *); int (*dump)(struct Qdisc *, struct sk_buff *); int (*dump_stats)(struct Qdisc *, struct gnet_dump *); struct module *owner; } ; 183 struct tcf_result { unsigned long class; u32 classid; } ; 189 struct tcf_proto_ops { struct list_head head; char kind[16U]; int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *); int (*init)(struct tcf_proto *); bool (*destroy)(struct tcf_proto *, bool ); unsigned long int (*get)(struct tcf_proto *, u32 ); int (*change)(struct net *, struct sk_buff *, struct tcf_proto *, unsigned long, u32 , struct nlattr **, unsigned long *, bool ); int (*delete)(struct tcf_proto *, unsigned long); void (*walk)(struct tcf_proto *, struct tcf_walker *); int (*dump)(struct net *, struct tcf_proto *, unsigned long, struct sk_buff *, struct tcmsg *); struct module *owner; } ; 214 struct tcf_proto { struct tcf_proto *next; void *root; int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *); __be16 protocol; u32 prio; u32 classid; struct Qdisc *q; void *data; const struct tcf_proto_ops *ops; struct callback_head rcu; } ; 806 struct sock_filter { __u16 code; __u8 jt; __u8 jf; __u32 k; } ; 49 struct bpf_insn { __u8 code; unsigned char dst_reg; unsigned char src_reg; __s16 off; __s32 imm; } ; 88 enum bpf_prog_type { BPF_PROG_TYPE_UNSPEC = 0, BPF_PROG_TYPE_SOCKET_FILTER = 1, BPF_PROG_TYPE_KPROBE = 2, BPF_PROG_TYPE_SCHED_CLS = 3, BPF_PROG_TYPE_SCHED_ACT = 4, BPF_PROG_TYPE_TRACEPOINT = 5, BPF_PROG_TYPE_XDP = 6 } ; 472 struct bpf_prog_aux ; 323 struct sock_fprog_kern { u16 len; struct sock_filter *filter; } ; 334 union __anonunion____missing_field_name_505 { struct sock_filter insns[0U]; struct bpf_insn insnsi[0U]; } ; 334 struct bpf_prog { u16 pages; unsigned char jited; unsigned char gpl_compatible; unsigned char cb_access; unsigned char dst_needed; u32 len; enum bpf_prog_type type; struct bpf_prog_aux *aux; struct sock_fprog_kern *orig_prog; unsigned int (*bpf_func)(const struct sk_buff *, const struct bpf_insn *); union __anonunion____missing_field_name_505 __annonCompField118; } ; 355 struct sk_filter { atomic_t refcnt; struct callback_head rcu; struct bpf_prog *prog; } ; 138 struct pollfd { int fd; short events; short revents; } ; 32 struct poll_table_struct { void (*_qproc)(struct file *, wait_queue_head_t *, struct poll_table_struct *); unsigned long _key; } ; 187 struct neigh_table ; 187 struct neigh_parms { possible_net_t net; struct net_device *dev; struct list_head list; int (*neigh_setup)(struct neighbour *); void (*neigh_cleanup)(struct neighbour *); struct neigh_table *tbl; void *sysctl_table; int dead; atomic_t refcnt; struct callback_head callback_head; int reachable_time; int data[13U]; unsigned long data_state[1U]; } ; 110 struct neigh_statistics { unsigned long allocs; unsigned long destroys; unsigned long hash_grows; unsigned long res_failed; unsigned long lookups; unsigned long hits; unsigned long rcv_probes_mcast; unsigned long rcv_probes_ucast; unsigned long periodic_gc_runs; unsigned long forced_gc_runs; unsigned long unres_discards; unsigned long table_fulls; } ; 130 struct neigh_ops ; 130 struct neighbour { struct neighbour *next; struct neigh_table *tbl; struct neigh_parms *parms; unsigned long confirmed; unsigned long updated; rwlock_t lock; atomic_t refcnt; struct sk_buff_head arp_queue; unsigned int arp_queue_len_bytes; struct timer_list timer; unsigned long used; atomic_t probes; __u8 flags; __u8 nud_state; __u8 type; __u8 dead; seqlock_t ha_lock; unsigned char ha[32U]; struct hh_cache hh; int (*output)(struct neighbour *, struct sk_buff *); const struct neigh_ops *ops; struct callback_head rcu; struct net_device *dev; u8 primary_key[0U]; } ; 159 struct neigh_ops { int family; void (*solicit)(struct neighbour *, struct sk_buff *); void (*error_report)(struct neighbour *, struct sk_buff *); int (*output)(struct neighbour *, struct sk_buff *); int (*connected_output)(struct neighbour *, struct sk_buff *); } ; 167 struct pneigh_entry { struct pneigh_entry *next; possible_net_t net; struct net_device *dev; u8 flags; u8 key[0U]; } ; 175 struct neigh_hash_table { struct neighbour **hash_buckets; unsigned int hash_shift; __u32 hash_rnd[4U]; struct callback_head rcu; } ; 188 struct neigh_table { int family; int entry_size; int key_len; __be16 protocol; __u32 (*hash)(const void *, const struct net_device *, __u32 *); bool (*key_eq)(const struct neighbour *, const void *); int (*constructor)(struct neighbour *); int (*pconstructor)(struct pneigh_entry *); void (*pdestructor)(struct pneigh_entry *); void (*proxy_redo)(struct sk_buff *); char *id; struct neigh_parms parms; struct list_head parms_list; int gc_interval; int gc_thresh1; int gc_thresh2; int gc_thresh3; unsigned long last_flush; struct delayed_work gc_work; struct timer_list proxy_timer; struct sk_buff_head proxy_queue; atomic_t entries; rwlock_t lock; unsigned long last_rand; struct neigh_statistics *stats; struct neigh_hash_table *nht; struct pneigh_entry **phash_buckets; } ; 520 struct lwtunnel_state ; 520 struct dn_route ; 520 union __anonunion____missing_field_name_521 { struct dst_entry *next; struct rtable *rt_next; struct rt6_info *rt6_next; struct dn_route *dn_next; } ; 520 struct dst_entry { struct callback_head callback_head; struct dst_entry *child; struct net_device *dev; struct dst_ops *ops; unsigned long _metrics; unsigned long expires; struct dst_entry *path; struct dst_entry *from; struct xfrm_state *xfrm; int (*input)(struct sk_buff *); int (*output)(struct net *, struct sock *, struct sk_buff *); unsigned short flags; unsigned short pending_confirm; short error; short obsolete; unsigned short header_len; unsigned short trailer_len; __u32 tclassid; long __pad_to_align_refcnt[2U]; atomic_t __refcnt; int __use; unsigned long lastuse; struct lwtunnel_state *lwtstate; union __anonunion____missing_field_name_521 __annonCompField119; } ; 110 struct __anonstruct_socket_lock_t_522 { spinlock_t slock; int owned; wait_queue_head_t wq; struct lockdep_map dep_map; } ; 110 typedef struct __anonstruct_socket_lock_t_522 socket_lock_t; 110 struct proto ; 116 typedef __u32 __portpair; 117 typedef __u64 __addrpair; 118 struct __anonstruct____missing_field_name_524 { __be32 skc_daddr; __be32 skc_rcv_saddr; } ; 118 union __anonunion____missing_field_name_523 { __addrpair skc_addrpair; struct __anonstruct____missing_field_name_524 __annonCompField120; } ; 118 union __anonunion____missing_field_name_525 { unsigned int skc_hash; __u16 skc_u16hashes[2U]; } ; 118 struct __anonstruct____missing_field_name_527 { __be16 skc_dport; __u16 skc_num; } ; 118 union __anonunion____missing_field_name_526 { __portpair skc_portpair; struct __anonstruct____missing_field_name_527 __annonCompField123; } ; 118 union __anonunion____missing_field_name_528 { struct hlist_node skc_bind_node; struct hlist_node skc_portaddr_node; } ; 118 struct inet_timewait_death_row ; 118 union __anonunion____missing_field_name_529 { unsigned long skc_flags; struct sock *skc_listener; struct inet_timewait_death_row *skc_tw_dr; } ; 118 union __anonunion____missing_field_name_530 { struct hlist_node skc_node; struct hlist_nulls_node skc_nulls_node; } ; 118 union __anonunion____missing_field_name_531 { int skc_incoming_cpu; u32 skc_rcv_wnd; u32 skc_tw_rcv_nxt; } ; 118 union __anonunion____missing_field_name_532 { u32 skc_rxhash; u32 skc_window_clamp; u32 skc_tw_snd_nxt; } ; 118 struct sock_common { union __anonunion____missing_field_name_523 __annonCompField121; union __anonunion____missing_field_name_525 __annonCompField122; union __anonunion____missing_field_name_526 __annonCompField124; unsigned short skc_family; volatile unsigned char skc_state; unsigned char skc_reuse; unsigned char skc_reuseport; unsigned char skc_ipv6only; unsigned char skc_net_refcnt; int skc_bound_dev_if; union __anonunion____missing_field_name_528 __annonCompField125; struct proto *skc_prot; possible_net_t skc_net; struct in6_addr skc_v6_daddr; struct in6_addr skc_v6_rcv_saddr; atomic64_t skc_cookie; union __anonunion____missing_field_name_529 __annonCompField126; int skc_dontcopy_begin[0U]; union __anonunion____missing_field_name_530 __annonCompField127; int skc_tx_queue_mapping; union __anonunion____missing_field_name_531 __annonCompField128; atomic_t skc_refcnt; int skc_dontcopy_end[0U]; union __anonunion____missing_field_name_532 __annonCompField129; } ; 230 struct __anonstruct_sk_backlog_533 { atomic_t rmem_alloc; int len; struct sk_buff *head; struct sk_buff *tail; } ; 230 union __anonunion____missing_field_name_534 { struct socket_wq *sk_wq; struct socket_wq *sk_wq_raw; } ; 230 struct sock_reuseport ; 230 struct sock { struct sock_common __sk_common; socket_lock_t sk_lock; struct sk_buff_head sk_receive_queue; struct __anonstruct_sk_backlog_533 sk_backlog; int sk_forward_alloc; __u32 sk_txhash; unsigned int sk_napi_id; unsigned int sk_ll_usec; atomic_t sk_drops; int sk_rcvbuf; struct sk_filter *sk_filter; union __anonunion____missing_field_name_534 __annonCompField130; struct xfrm_policy *sk_policy[2U]; struct dst_entry *sk_rx_dst; struct dst_entry *sk_dst_cache; atomic_t sk_wmem_alloc; atomic_t sk_omem_alloc; int sk_sndbuf; struct sk_buff_head sk_write_queue; unsigned char sk_padding; unsigned char sk_no_check_tx; unsigned char sk_no_check_rx; unsigned char sk_userlocks; unsigned char sk_protocol; unsigned short sk_type; int sk_wmem_queued; gfp_t sk_allocation; u32 sk_pacing_rate; u32 sk_max_pacing_rate; netdev_features_t sk_route_caps; netdev_features_t sk_route_nocaps; int sk_gso_type; unsigned int sk_gso_max_size; u16 sk_gso_max_segs; int sk_rcvlowat; unsigned long sk_lingertime; struct sk_buff_head sk_error_queue; struct proto *sk_prot_creator; rwlock_t sk_callback_lock; int sk_err; int sk_err_soft; u32 sk_ack_backlog; u32 sk_max_ack_backlog; __u32 sk_priority; __u32 sk_mark; struct pid *sk_peer_pid; const struct cred *sk_peer_cred; long sk_rcvtimeo; long sk_sndtimeo; struct timer_list sk_timer; ktime_t sk_stamp; u16 sk_tsflags; u8 sk_shutdown; u32 sk_tskey; struct socket *sk_socket; void *sk_user_data; struct page_frag sk_frag; struct sk_buff *sk_send_head; __s32 sk_peek_off; int sk_write_pending; void *sk_security; struct sock_cgroup_data sk_cgrp_data; struct mem_cgroup *sk_memcg; void (*sk_state_change)(struct sock *); void (*sk_data_ready)(struct sock *); void (*sk_write_space)(struct sock *); void (*sk_error_report)(struct sock *); int (*sk_backlog_rcv)(struct sock *, struct sk_buff *); void (*sk_destruct)(struct sock *); struct sock_reuseport *sk_reuseport_cb; struct callback_head sk_rcu; } ; 948 struct request_sock_ops ; 949 struct timewait_sock_ops ; 950 struct inet_hashinfo ; 951 struct raw_hashinfo ; 965 struct udp_table ; 965 union __anonunion_h_545 { struct inet_hashinfo *hashinfo; struct udp_table *udp_table; struct raw_hashinfo *raw_hash; } ; 965 struct proto { void (*close)(struct sock *, long); int (*connect)(struct sock *, struct sockaddr *, int); int (*disconnect)(struct sock *, int); struct sock * (*accept)(struct sock *, int, int *); int (*ioctl)(struct sock *, int, unsigned long); int (*init)(struct sock *); void (*destroy)(struct sock *); void (*shutdown)(struct sock *, int); int (*setsockopt)(struct sock *, int, int, char *, unsigned int); int (*getsockopt)(struct sock *, int, int, char *, int *); int (*compat_setsockopt)(struct sock *, int, int, char *, unsigned int); int (*compat_getsockopt)(struct sock *, int, int, char *, int *); int (*compat_ioctl)(struct sock *, unsigned int, unsigned long); int (*sendmsg)(struct sock *, struct msghdr *, size_t ); int (*recvmsg)(struct sock *, struct msghdr *, size_t , int, int, int *); int (*sendpage)(struct sock *, struct page *, int, size_t , int); int (*bind)(struct sock *, struct sockaddr *, int); int (*backlog_rcv)(struct sock *, struct sk_buff *); void (*release_cb)(struct sock *); int (*hash)(struct sock *); void (*unhash)(struct sock *); void (*rehash)(struct sock *); int (*get_port)(struct sock *, unsigned short); void (*clear_sk)(struct sock *, int); unsigned int inuse_idx; bool (*stream_memory_free)(const struct sock *); void (*enter_memory_pressure)(struct sock *); atomic_long_t *memory_allocated; struct percpu_counter *sockets_allocated; int *memory_pressure; long *sysctl_mem; int *sysctl_wmem; int *sysctl_rmem; int max_header; bool no_autobind; struct kmem_cache *slab; unsigned int obj_size; int slab_flags; struct percpu_counter *orphan_count; struct request_sock_ops *rsk_prot; struct timewait_sock_ops *twsk_prot; union __anonunion_h_545 h; struct module *owner; char name[32U]; struct list_head node; int (*diag_destroy)(struct sock *, int); } ; 174 struct request_sock_ops { int family; int obj_size; struct kmem_cache *slab; char *slab_name; int (*rtx_syn_ack)(const struct sock *, struct request_sock *); void (*send_ack)(const struct sock *, struct sk_buff *, struct request_sock *); void (*send_reset)(const struct sock *, struct sk_buff *); void (*destructor)(struct request_sock *); void (*syn_ack_timeout)(const struct request_sock *); } ; 46 struct request_sock { struct sock_common __req_common; struct request_sock *dl_next; u16 mss; u8 num_retrans; unsigned char cookie_ts; unsigned char num_timeout; u32 ts_recent; struct timer_list rsk_timer; const struct request_sock_ops *rsk_ops; struct sock *sk; u32 *saved_syn; u32 secid; u32 peer_secid; } ; 18 struct fib_rule_hdr { __u8 family; __u8 dst_len; __u8 src_len; __u8 tos; __u8 table; __u8 res1; __u8 res2; __u8 action; __u32 flags; } ; 68 struct fib_rule { struct list_head list; int iifindex; int oifindex; u32 mark; u32 mark_mask; u32 flags; u32 table; u8 action; u8 l3mdev; u32 target; __be64 tun_id; struct fib_rule *ctarget; struct net *fr_net; atomic_t refcnt; u32 pref; int suppress_ifgroup; int suppress_prefixlen; char iifname[16U]; char oifname[16U]; struct callback_head rcu; } ; 35 struct fib_lookup_arg { void *lookup_ptr; void *result; struct fib_rule *rule; u32 table; int flags; } ; 43 struct fib_rules_ops { int family; struct list_head list; int rule_size; int addr_size; int unresolved_rules; int nr_goto_rules; int (*action)(struct fib_rule *, struct flowi *, int, struct fib_lookup_arg *); bool (*suppress)(struct fib_rule *, struct fib_lookup_arg *); int (*match)(struct fib_rule *, struct flowi *, int); int (*configure)(struct fib_rule *, struct sk_buff *, struct fib_rule_hdr *, struct nlattr **); int (*delete)(struct fib_rule *); int (*compare)(struct fib_rule *, struct fib_rule_hdr *, struct nlattr **); int (*fill)(struct fib_rule *, struct sk_buff *, struct fib_rule_hdr *); size_t (*nlmsg_payload)(struct fib_rule *); void (*flush_cache)(struct fib_rules_ops *); int nlgroup; const struct nla_policy *policy; struct list_head rules_list; struct module *owner; struct net *fro_net; struct callback_head rcu; } ; 140 struct l3mdev_ops { u32 (*l3mdev_fib_table)(const struct net_device *); struct sk_buff * (*l3mdev_l3_rcv)(struct net_device *, struct sk_buff *, u16 ); struct rtable * (*l3mdev_get_rtable)(const struct net_device *, const struct flowi4 *); int (*l3mdev_get_saddr)(struct net_device *, struct flowi4 *); struct dst_entry * (*l3mdev_get_rt6_dst)(const struct net_device *, struct flowi6 *); int (*l3mdev_get_saddr6)(struct net_device *, const struct sock *, struct flowi6 *); } ; 328 struct timewait_sock_ops { struct kmem_cache *twsk_slab; char *twsk_slab_name; unsigned int twsk_obj_size; int (*twsk_unique)(struct sock *, struct sock *, void *); void (*twsk_destructor)(struct sock *); } ; 39 struct inet_timewait_death_row { atomic_t tw_count; struct inet_hashinfo *hashinfo; int sysctl_tw_recycle; int sysctl_max_tw_buckets; } ; 100 struct ip6_sf_list { struct ip6_sf_list *sf_next; struct in6_addr sf_addr; unsigned long sf_count[2U]; unsigned char sf_gsresp; unsigned char sf_oldin; unsigned char sf_crcount; } ; 109 struct ifmcaddr6 { struct in6_addr mca_addr; struct inet6_dev *idev; struct ifmcaddr6 *next; struct ip6_sf_list *mca_sources; struct ip6_sf_list *mca_tomb; unsigned int mca_sfmode; unsigned char mca_crcount; unsigned long mca_sfcount[2U]; struct timer_list mca_timer; unsigned int mca_flags; int mca_users; atomic_t mca_refcnt; spinlock_t mca_lock; unsigned long mca_cstamp; unsigned long mca_tstamp; } ; 141 struct ifacaddr6 { struct in6_addr aca_addr; struct inet6_dev *aca_idev; struct rt6_info *aca_rt; struct ifacaddr6 *aca_next; int aca_users; atomic_t aca_refcnt; unsigned long aca_cstamp; unsigned long aca_tstamp; } ; 152 struct ipv6_devstat { struct proc_dir_entry *proc_dir_entry; struct ipstats_mib *ipv6; struct icmpv6_mib_device *icmpv6dev; struct icmpv6msg_mib_device *icmpv6msgdev; } ; 163 struct inet6_dev { struct net_device *dev; struct list_head addr_list; struct ifmcaddr6 *mc_list; struct ifmcaddr6 *mc_tomb; spinlock_t mc_lock; unsigned char mc_qrv; unsigned char mc_gq_running; unsigned char mc_ifc_count; unsigned char mc_dad_count; unsigned long mc_v1_seen; unsigned long mc_qi; unsigned long mc_qri; unsigned long mc_maxdelay; struct timer_list mc_gq_timer; struct timer_list mc_ifc_timer; struct timer_list mc_dad_timer; struct ifacaddr6 *ac_list; rwlock_t lock; atomic_t refcnt; __u32 if_flags; int dead; u8 rndid[8U]; struct timer_list regen_timer; struct list_head tempaddr_list; struct in6_addr token; struct neigh_parms *nd_parms; struct ipv6_devconf cnf; struct ipv6_devstat stats; struct timer_list rs_timer; __u8 rs_probes; __u8 addr_gen_mode; unsigned long tstamp; struct callback_head rcu; } ; 47 struct prefix_info ; 98 struct nd_opt_hdr { __u8 nd_opt_type; __u8 nd_opt_len; } ; 103 struct ndisc_options { struct nd_opt_hdr *nd_opt_array[6U]; struct nd_opt_hdr *nd_opts_ri; struct nd_opt_hdr *nd_opts_ri_end; struct nd_opt_hdr *nd_useropts; struct nd_opt_hdr *nd_useropts_end; struct nd_opt_hdr *nd_802154_opt_array[3U]; } ; 134 struct ndisc_ops { int (*is_useropt)(u8 ); int (*parse_options)(const struct net_device *, struct nd_opt_hdr *, struct ndisc_options *); void (*update)(const struct net_device *, struct neighbour *, u32 , u8 , const struct ndisc_options *); int (*opt_addr_space)(const struct net_device *, u8 , struct neighbour *, u8 *, u8 **); void (*fill_addr_option)(const struct net_device *, struct sk_buff *, u8 , const u8 *); void (*prefix_rcv_add_addr)(struct net *, struct net_device *, const struct prefix_info *, struct inet6_dev *, struct in6_addr *, int, u32 , bool , bool , __u32 , u32 , bool ); } ; 37 struct ipv4_addr_key { __be32 addr; int vif; } ; 23 union __anonunion____missing_field_name_583 { struct ipv4_addr_key a4; struct in6_addr a6; u32 key[4U]; } ; 23 struct inetpeer_addr { union __anonunion____missing_field_name_583 __annonCompField133; __u16 family; } ; 34 union __anonunion____missing_field_name_584 { struct list_head gc_list; struct callback_head gc_rcu; } ; 34 struct __anonstruct____missing_field_name_586 { atomic_t rid; } ; 34 union __anonunion____missing_field_name_585 { struct __anonstruct____missing_field_name_586 __annonCompField135; struct callback_head rcu; struct inet_peer *gc_next; } ; 34 struct inet_peer { struct inet_peer *avl_left; struct inet_peer *avl_right; struct inetpeer_addr daddr; __u32 avl_height; u32 metrics[16U]; u32 rate_tokens; unsigned long rate_last; union __anonunion____missing_field_name_584 __annonCompField134; union __anonunion____missing_field_name_585 __annonCompField136; __u32 dtime; atomic_t refcnt; } ; 65 struct inet_peer_base { struct inet_peer *root; seqlock_t lock; int total; } ; 174 struct fib_table { struct hlist_node tb_hlist; u32 tb_id; int tb_num_default; struct callback_head rcu; unsigned long *tb_data; unsigned long __data[0U]; } ; 48 struct uncached_list ; 49 struct rtable { struct dst_entry dst; int rt_genid; unsigned int rt_flags; __u16 rt_type; __u8 rt_is_input; __u8 rt_uses_gateway; int rt_iif; __be32 rt_gateway; u32 rt_pmtu; u32 rt_table_id; struct list_head rt_uncached; struct uncached_list *rt_uncached_list; } ; 213 struct in_ifaddr ; 583 struct mmu_notifier ; 584 struct mmu_notifier_ops ; 585 struct mmu_notifier_mm { struct hlist_head list; spinlock_t lock; } ; 26 struct mmu_notifier_ops { void (*release)(struct mmu_notifier *, struct mm_struct *); int (*clear_flush_young)(struct mmu_notifier *, struct mm_struct *, unsigned long, unsigned long); int (*clear_young)(struct mmu_notifier *, struct mm_struct *, unsigned long, unsigned long); int (*test_young)(struct mmu_notifier *, struct mm_struct *, unsigned long); void (*change_pte)(struct mmu_notifier *, struct mm_struct *, unsigned long, pte_t ); void (*invalidate_page)(struct mmu_notifier *, struct mm_struct *, unsigned long); void (*invalidate_range_start)(struct mmu_notifier *, struct mm_struct *, unsigned long, unsigned long); void (*invalidate_range_end)(struct mmu_notifier *, struct mm_struct *, unsigned long, unsigned long); void (*invalidate_range)(struct mmu_notifier *, struct mm_struct *, unsigned long, unsigned long); } ; 180 struct mmu_notifier { struct hlist_node hlist; const struct mmu_notifier_ops *ops; } ; 66 struct __anonstruct_global_594 { __be64 subnet_prefix; __be64 interface_id; } ; 66 union ib_gid { u8 raw[16U]; struct __anonstruct_global_594 global; } ; 76 enum ib_gid_type { IB_GID_TYPE_IB = 0, IB_GID_TYPE_ROCE = 0, IB_GID_TYPE_ROCE_UDP_ENCAP = 1, IB_GID_TYPE_SIZE = 2 } ; 83 struct ib_gid_attr { enum ib_gid_type gid_type; struct net_device *ndev; } ; 103 enum rdma_transport_type { RDMA_TRANSPORT_IB = 0, RDMA_TRANSPORT_IWARP = 1, RDMA_TRANSPORT_USNIC = 2, RDMA_TRANSPORT_USNIC_UDP = 3 } ; 122 enum rdma_network_type { RDMA_NETWORK_IB = 0, RDMA_NETWORK_ROCE_V1 = 0, RDMA_NETWORK_IPV4 = 1, RDMA_NETWORK_IPV6 = 2 } ; 151 enum rdma_link_layer { IB_LINK_LAYER_UNSPECIFIED = 0, IB_LINK_LAYER_INFINIBAND = 1, IB_LINK_LAYER_ETHERNET = 2 } ; 205 enum ib_atomic_cap { IB_ATOMIC_NONE = 0, IB_ATOMIC_HCA = 1, IB_ATOMIC_GLOB = 2 } ; 223 struct __anonstruct_per_transport_caps_595 { uint32_t rc_odp_caps; uint32_t uc_odp_caps; uint32_t ud_odp_caps; } ; 223 struct ib_odp_caps { uint64_t general_caps; struct __anonstruct_per_transport_caps_595 per_transport_caps; } ; 268 struct ib_cq_init_attr { unsigned int cqe; int comp_vector; u32 flags; } ; 274 struct ib_device_attr { u64 fw_ver; __be64 sys_image_guid; u64 max_mr_size; u64 page_size_cap; u32 vendor_id; u32 vendor_part_id; u32 hw_ver; int max_qp; int max_qp_wr; u64 device_cap_flags; int max_sge; int max_sge_rd; int max_cq; int max_cqe; int max_mr; int max_pd; int max_qp_rd_atom; int max_ee_rd_atom; int max_res_rd_atom; int max_qp_init_rd_atom; int max_ee_init_rd_atom; enum ib_atomic_cap atomic_cap; enum ib_atomic_cap masked_atomic_cap; int max_ee; int max_rdd; int max_mw; int max_raw_ipv6_qp; int max_raw_ethy_qp; int max_mcast_grp; int max_mcast_qp_attach; int max_total_mcast_qp_attach; int max_ah; int max_fmr; int max_map_per_fmr; int max_srq; int max_srq_wr; int max_srq_sge; unsigned int max_fast_reg_page_list_len; u16 max_pkeys; u8 local_ca_ack_delay; int sig_prot_cap; int sig_guard_cap; struct ib_odp_caps odp_caps; uint64_t timestamp_mask; uint64_t hca_core_clock; } ; 322 enum ib_mtu { IB_MTU_256 = 1, IB_MTU_512 = 2, IB_MTU_1024 = 3, IB_MTU_2048 = 4, IB_MTU_4096 = 5 } ; 342 enum ib_port_state { IB_PORT_NOP = 0, IB_PORT_DOWN = 1, IB_PORT_INIT = 2, IB_PORT_ARMED = 3, IB_PORT_ACTIVE = 4, IB_PORT_ACTIVE_DEFER = 5 } ; 405 struct rdma_hw_stats { unsigned long timestamp; unsigned long lifespan; const const char **names; int num_counters; u64 value[]; } ; 454 struct ib_port_attr { u64 subnet_prefix; enum ib_port_state state; enum ib_mtu max_mtu; enum ib_mtu active_mtu; int gid_tbl_len; u32 port_cap_flags; u32 max_msg_sz; u32 bad_pkey_cntr; u32 qkey_viol_cntr; u16 pkey_tbl_len; u16 lid; u16 sm_lid; u8 lmc; u8 max_vl_num; u8 sm_sl; u8 subnet_timeout; u8 init_type_reply; u8 active_width; u8 active_speed; u8 phys_state; bool grh_required; } ; 527 struct ib_device_modify { u64 sys_image_guid; char node_desc[64U]; } ; 538 struct ib_port_modify { u32 set_port_cap_mask; u32 clr_port_cap_mask; u8 init_type; } ; 544 enum ib_event_type { IB_EVENT_CQ_ERR = 0, IB_EVENT_QP_FATAL = 1, IB_EVENT_QP_REQ_ERR = 2, IB_EVENT_QP_ACCESS_ERR = 3, IB_EVENT_COMM_EST = 4, IB_EVENT_SQ_DRAINED = 5, IB_EVENT_PATH_MIG = 6, IB_EVENT_PATH_MIG_ERR = 7, IB_EVENT_DEVICE_FATAL = 8, IB_EVENT_PORT_ACTIVE = 9, IB_EVENT_PORT_ERR = 10, IB_EVENT_LID_CHANGE = 11, IB_EVENT_PKEY_CHANGE = 12, IB_EVENT_SM_CHANGE = 13, IB_EVENT_SRQ_ERR = 14, IB_EVENT_SRQ_LIMIT_REACHED = 15, IB_EVENT_QP_LAST_WQE_REACHED = 16, IB_EVENT_CLIENT_REREGISTER = 17, IB_EVENT_GID_CHANGE = 18, IB_EVENT_WQ_FATAL = 19 } ; 569 struct ib_device ; 569 struct ib_cq ; 569 struct ib_qp ; 569 struct ib_srq ; 569 struct ib_wq ; 569 union __anonunion_element_596 { struct ib_cq *cq; struct ib_qp *qp; struct ib_srq *srq; struct ib_wq *wq; u8 port_num; } ; 569 struct ib_event { struct ib_device *device; union __anonunion_element_596 element; enum ib_event_type event; } ; 581 struct ib_event_handler { struct ib_device *device; void (*handler)(struct ib_event_handler *, struct ib_event *); struct list_head list; } ; 587 struct ib_global_route { union ib_gid dgid; u32 flow_label; u8 sgid_index; u8 hop_limit; u8 traffic_class; } ; 602 struct ib_grh { __be32 version_tclass_flow; __be16 paylen; u8 next_hdr; u8 hop_limit; union ib_gid sgid; union ib_gid dgid; } ; 669 enum ib_mr_type { IB_MR_TYPE_MEM_REG = 0, IB_MR_TYPE_SIGNATURE = 1, IB_MR_TYPE_SG_GAPS = 2 } ; 675 enum ib_signature_type { IB_SIG_TYPE_NONE = 0, IB_SIG_TYPE_T10_DIF = 1 } ; 680 enum ib_t10_dif_bg_type { IB_T10DIF_CRC = 0, IB_T10DIF_CSUM = 1 } ; 685 struct ib_t10_dif_domain { enum ib_t10_dif_bg_type bg_type; u16 pi_interval; u16 bg; u16 app_tag; u32 ref_tag; bool ref_remap; bool app_escape; bool ref_escape; u16 apptag_check_mask; } ; 733 union __anonunion_sig_598 { struct ib_t10_dif_domain dif; } ; 733 struct ib_sig_domain { enum ib_signature_type sig_type; union __anonunion_sig_598 sig; } ; 746 struct ib_sig_attrs { u8 check_mask; struct ib_sig_domain mem; struct ib_sig_domain wire; } ; 758 enum ib_sig_err_type { IB_SIG_BAD_GUARD = 0, IB_SIG_BAD_REFTAG = 1, IB_SIG_BAD_APPTAG = 2 } ; 764 struct ib_sig_err { enum ib_sig_err_type err_type; u32 expected; u32 actual; u64 sig_err_offset; u32 key; } ; 779 struct ib_mr_status { u32 fail_status; struct ib_sig_err sig_err; } ; 799 struct ib_ah_attr { struct ib_global_route grh; u16 dlid; u8 sl; u8 src_path_bits; u8 static_rate; u8 ah_flags; u8 port_num; u8 dmac[6U]; } ; 810 enum ib_wc_status { IB_WC_SUCCESS = 0, IB_WC_LOC_LEN_ERR = 1, IB_WC_LOC_QP_OP_ERR = 2, IB_WC_LOC_EEC_OP_ERR = 3, IB_WC_LOC_PROT_ERR = 4, IB_WC_WR_FLUSH_ERR = 5, IB_WC_MW_BIND_ERR = 6, IB_WC_BAD_RESP_ERR = 7, IB_WC_LOC_ACCESS_ERR = 8, IB_WC_REM_INV_REQ_ERR = 9, IB_WC_REM_ACCESS_ERR = 10, IB_WC_REM_OP_ERR = 11, IB_WC_RETRY_EXC_ERR = 12, IB_WC_RNR_RETRY_EXC_ERR = 13, IB_WC_LOC_RDD_VIOL_ERR = 14, IB_WC_REM_INV_RD_REQ_ERR = 15, IB_WC_REM_ABORT_ERR = 16, IB_WC_INV_EECN_ERR = 17, IB_WC_INV_EEC_STATE_ERR = 18, IB_WC_FATAL_ERR = 19, IB_WC_RESP_TIMEOUT_ERR = 20, IB_WC_GENERAL_ERR = 21 } ; 837 enum ib_wc_opcode { IB_WC_SEND = 0, IB_WC_RDMA_WRITE = 1, IB_WC_RDMA_READ = 2, IB_WC_COMP_SWAP = 3, IB_WC_FETCH_ADD = 4, IB_WC_LSO = 5, IB_WC_LOCAL_INV = 6, IB_WC_REG_MR = 7, IB_WC_MASKED_COMP_SWAP = 8, IB_WC_MASKED_FETCH_ADD = 9, IB_WC_RECV = 128, IB_WC_RECV_RDMA_WITH_IMM = 129 } ; 862 struct ib_cqe ; 862 union __anonunion____missing_field_name_599 { u64 wr_id; struct ib_cqe *wr_cqe; } ; 862 union __anonunion_ex_600 { __be32 imm_data; u32 invalidate_rkey; } ; 862 struct ib_wc { union __anonunion____missing_field_name_599 __annonCompField139; enum ib_wc_status status; enum ib_wc_opcode opcode; u32 vendor_err; u32 byte_len; struct ib_qp *qp; union __anonunion_ex_600 ex; u32 src_qp; int wc_flags; u16 pkey_index; u16 slid; u8 sl; u8 dlid_path_bits; u8 port_num; u8 smac[6U]; u16 vlan_id; u8 network_hdr_type; } ; 892 enum ib_cq_notify_flags { IB_CQ_SOLICITED = 1, IB_CQ_NEXT_COMP = 2, IB_CQ_SOLICITED_MASK = 3, IB_CQ_REPORT_MISSED_EVENTS = 4 } ; 899 enum ib_srq_type { IB_SRQT_BASIC = 0, IB_SRQT_XRC = 1 } ; 904 enum ib_srq_attr_mask { IB_SRQ_MAX_WR = 1, IB_SRQ_LIMIT = 2 } ; 909 struct ib_srq_attr { u32 max_wr; u32 max_sge; u32 srq_limit; } ; 915 struct ib_xrcd ; 915 struct __anonstruct_xrc_602 { struct ib_xrcd *xrcd; struct ib_cq *cq; } ; 915 union __anonunion_ext_601 { struct __anonstruct_xrc_602 xrc; } ; 915 struct ib_srq_init_attr { void (*event_handler)(struct ib_event *, void *); void *srq_context; struct ib_srq_attr attr; enum ib_srq_type srq_type; union __anonunion_ext_601 ext; } ; 929 struct ib_qp_cap { u32 max_send_wr; u32 max_recv_wr; u32 max_send_sge; u32 max_recv_sge; u32 max_inline_data; u32 max_rdma_ctxs; } ; 944 enum ib_sig_type { IB_SIGNAL_ALL_WR = 0, IB_SIGNAL_REQ_WR = 1 } ; 949 enum ib_qp_type { IB_QPT_SMI = 0, IB_QPT_GSI = 1, IB_QPT_RC = 2, IB_QPT_UC = 3, IB_QPT_UD = 4, IB_QPT_RAW_IPV6 = 5, IB_QPT_RAW_ETHERTYPE = 6, IB_QPT_RAW_PACKET = 8, IB_QPT_XRC_INI = 9, IB_QPT_XRC_TGT = 10, IB_QPT_MAX = 11, IB_QPT_RESERVED1 = 4096, IB_QPT_RESERVED2 = 4097, IB_QPT_RESERVED3 = 4098, IB_QPT_RESERVED4 = 4099, IB_QPT_RESERVED5 = 4100, IB_QPT_RESERVED6 = 4101, IB_QPT_RESERVED7 = 4102, IB_QPT_RESERVED8 = 4103, IB_QPT_RESERVED9 = 4104, IB_QPT_RESERVED10 = 4105 } ; 973 enum ib_qp_create_flags { IB_QP_CREATE_IPOIB_UD_LSO = 1, IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 2, IB_QP_CREATE_CROSS_CHANNEL = 4, IB_QP_CREATE_MANAGED_SEND = 8, IB_QP_CREATE_MANAGED_RECV = 16, IB_QP_CREATE_NETIF_QP = 32, IB_QP_CREATE_SIGNATURE_EN = 64, IB_QP_CREATE_USE_GFP_NOIO = 128, IB_QP_CREATE_SCATTER_FCS = 256, IB_QP_CREATE_RESERVED_START = 67108864, IB_QP_CREATE_RESERVED_END = -2147483648 } ; 987 struct ib_rwq_ind_table ; 987 struct ib_qp_init_attr { void (*event_handler)(struct ib_event *, void *); void *qp_context; struct ib_cq *send_cq; struct ib_cq *recv_cq; struct ib_srq *srq; struct ib_xrcd *xrcd; struct ib_qp_cap cap; enum ib_sig_type sq_sig_type; enum ib_qp_type qp_type; enum ib_qp_create_flags create_flags; u8 port_num; struct ib_rwq_ind_table *rwq_ind_tbl; } ; 1092 enum ib_qp_state { IB_QPS_RESET = 0, IB_QPS_INIT = 1, IB_QPS_RTR = 2, IB_QPS_RTS = 3, IB_QPS_SQD = 4, IB_QPS_SQE = 5, IB_QPS_ERR = 6 } ; 1102 enum ib_mig_state { IB_MIG_MIGRATED = 0, IB_MIG_REARM = 1, IB_MIG_ARMED = 2 } ; 1108 enum ib_mw_type { IB_MW_TYPE_1 = 1, IB_MW_TYPE_2 = 2 } ; 1113 struct ib_qp_attr { enum ib_qp_state qp_state; enum ib_qp_state cur_qp_state; enum ib_mtu path_mtu; enum ib_mig_state path_mig_state; u32 qkey; u32 rq_psn; u32 sq_psn; u32 dest_qp_num; int qp_access_flags; struct ib_qp_cap cap; struct ib_ah_attr ah_attr; struct ib_ah_attr alt_ah_attr; u16 pkey_index; u16 alt_pkey_index; u8 en_sqd_async_notify; u8 sq_draining; u8 max_rd_atomic; u8 max_dest_rd_atomic; u8 min_rnr_timer; u8 port_num; u8 timeout; u8 retry_cnt; u8 rnr_retry; u8 alt_port_num; u8 alt_timeout; } ; 1141 enum ib_wr_opcode { IB_WR_RDMA_WRITE = 0, IB_WR_RDMA_WRITE_WITH_IMM = 1, IB_WR_SEND = 2, IB_WR_SEND_WITH_IMM = 3, IB_WR_RDMA_READ = 4, IB_WR_ATOMIC_CMP_AND_SWP = 5, IB_WR_ATOMIC_FETCH_AND_ADD = 6, IB_WR_LSO = 7, IB_WR_SEND_WITH_INV = 8, IB_WR_RDMA_READ_WITH_INV = 9, IB_WR_LOCAL_INV = 10, IB_WR_REG_MR = 11, IB_WR_MASKED_ATOMIC_CMP_AND_SWP = 12, IB_WR_MASKED_ATOMIC_FETCH_AND_ADD = 13, IB_WR_REG_SIG_MR = 14, IB_WR_RESERVED1 = 240, IB_WR_RESERVED2 = 241, IB_WR_RESERVED3 = 242, IB_WR_RESERVED4 = 243, IB_WR_RESERVED5 = 244, IB_WR_RESERVED6 = 245, IB_WR_RESERVED7 = 246, IB_WR_RESERVED8 = 247, IB_WR_RESERVED9 = 248, IB_WR_RESERVED10 = 249 } ; 1179 struct ib_sge { u64 addr; u32 length; u32 lkey; } ; 1190 struct ib_cqe { void (*done)(struct ib_cq *, struct ib_wc *); } ; 1194 union __anonunion____missing_field_name_603 { u64 wr_id; struct ib_cqe *wr_cqe; } ; 1194 union __anonunion_ex_604 { __be32 imm_data; u32 invalidate_rkey; } ; 1194 struct ib_send_wr { struct ib_send_wr *next; union __anonunion____missing_field_name_603 __annonCompField140; struct ib_sge *sg_list; int num_sge; enum ib_wr_opcode opcode; int send_flags; union __anonunion_ex_604 ex; } ; 1210 struct ib_rdma_wr { struct ib_send_wr wr; u64 remote_addr; u32 rkey; } ; 1237 struct ib_ah ; 1254 struct ib_mr ; 1254 struct ib_reg_wr { struct ib_send_wr wr; struct ib_mr *mr; u32 key; int access; } ; 1266 struct ib_sig_handover_wr { struct ib_send_wr wr; struct ib_sig_attrs *sig_attrs; struct ib_mr *sig_mr; int access_flags; struct ib_sge *prot; } ; 1279 union __anonunion____missing_field_name_605 { u64 wr_id; struct ib_cqe *wr_cqe; } ; 1279 struct ib_recv_wr { struct ib_recv_wr *next; union __anonunion____missing_field_name_605 __annonCompField141; struct ib_sge *sg_list; int num_sge; } ; 1305 struct ib_fmr_attr { int max_pages; int max_maps; u8 page_shift; } ; 1315 struct ib_umem ; 1316 struct ib_ucontext { struct ib_device *device; struct list_head pd_list; struct list_head mr_list; struct list_head mw_list; struct list_head cq_list; struct list_head qp_list; struct list_head srq_list; struct list_head ah_list; struct list_head xrcd_list; struct list_head rule_list; struct list_head wq_list; struct list_head rwq_ind_tbl_list; int closing; struct pid *tgid; struct rb_root umem_tree; struct rw_semaphore umem_rwsem; void (*invalidate_range)(struct ib_umem *, unsigned long, unsigned long); struct mmu_notifier mn; atomic_t notifier_count; struct list_head no_private_counters; int odp_mrs_count; } ; 1350 struct ib_uobject { u64 user_handle; struct ib_ucontext *context; void *object; struct list_head list; int id; struct kref ref; struct rw_semaphore mutex; struct callback_head rcu; int live; } ; 1363 struct ib_udata { const void *inbuf; void *outbuf; size_t inlen; size_t outlen; } ; 1370 struct ib_pd { u32 local_dma_lkey; struct ib_device *device; struct ib_uobject *uobject; atomic_t usecnt; struct ib_mr *local_mr; } ; 1378 struct ib_xrcd { struct ib_device *device; atomic_t usecnt; struct inode *inode; struct mutex tgt_qp_mutex; struct list_head tgt_qp_list; } ; 1387 struct ib_ah { struct ib_device *device; struct ib_pd *pd; struct ib_uobject *uobject; } ; 1395 enum ib_poll_context { IB_POLL_DIRECT = 0, IB_POLL_SOFTIRQ = 1, IB_POLL_WORKQUEUE = 2 } ; 1401 union __anonunion____missing_field_name_606 { struct irq_poll iop; struct work_struct work; } ; 1401 struct ib_cq { struct ib_device *device; struct ib_uobject *uobject; void (*comp_handler)(struct ib_cq *, void *); void (*event_handler)(struct ib_event *, void *); void *cq_context; int cqe; atomic_t usecnt; enum ib_poll_context poll_ctx; struct ib_wc *wc; union __anonunion____missing_field_name_606 __annonCompField142; } ; 1417 struct __anonstruct_xrc_608 { struct ib_xrcd *xrcd; struct ib_cq *cq; u32 srq_num; } ; 1417 union __anonunion_ext_607 { struct __anonstruct_xrc_608 xrc; } ; 1417 struct ib_srq { struct ib_device *device; struct ib_pd *pd; struct ib_uobject *uobject; void (*event_handler)(struct ib_event *, void *); void *srq_context; enum ib_srq_type srq_type; atomic_t usecnt; union __anonunion_ext_607 ext; } ; 1435 enum ib_wq_type { IB_WQT_RQ = 0 } ; 1439 enum ib_wq_state { IB_WQS_RESET = 0, IB_WQS_RDY = 1, IB_WQS_ERR = 2 } ; 1445 struct ib_wq { struct ib_device *device; struct ib_uobject *uobject; void *wq_context; void (*event_handler)(struct ib_event *, void *); struct ib_pd *pd; struct ib_cq *cq; u32 wq_num; enum ib_wq_state state; enum ib_wq_type wq_type; atomic_t usecnt; } ; 1458 struct ib_wq_init_attr { void *wq_context; enum ib_wq_type wq_type; u32 max_wr; u32 max_sge; struct ib_cq *cq; void (*event_handler)(struct ib_event *, void *); } ; 1472 struct ib_wq_attr { enum ib_wq_state wq_state; enum ib_wq_state curr_wq_state; } ; 1477 struct ib_rwq_ind_table { struct ib_device *device; struct ib_uobject *uobject; atomic_t usecnt; u32 ind_tbl_num; u32 log_ind_tbl_size; struct ib_wq **ind_tbl; } ; 1486 struct ib_rwq_ind_table_init_attr { u32 log_ind_tbl_size; struct ib_wq **ind_tbl; } ; 1492 struct ib_qp { struct ib_device *device; struct ib_pd *pd; struct ib_cq *send_cq; struct ib_cq *recv_cq; spinlock_t mr_lock; int mrs_used; struct list_head rdma_mrs; struct list_head sig_mrs; struct ib_srq *srq; struct ib_xrcd *xrcd; struct list_head xrcd_list; atomic_t usecnt; struct list_head open_list; struct ib_qp *real_qp; struct ib_uobject *uobject; void (*event_handler)(struct ib_event *, void *); void *qp_context; u32 qp_num; u32 max_write_sge; u32 max_read_sge; enum ib_qp_type qp_type; struct ib_rwq_ind_table *rwq_ind_tbl; } ; 1523 union __anonunion____missing_field_name_609 { struct ib_uobject *uobject; struct list_head qp_entry; } ; 1523 struct ib_mr { struct ib_device *device; struct ib_pd *pd; u32 lkey; u32 rkey; u64 iova; u32 length; unsigned int page_size; bool need_inval; union __anonunion____missing_field_name_609 __annonCompField143; } ; 1538 struct ib_mw { struct ib_device *device; struct ib_pd *pd; struct ib_uobject *uobject; u32 rkey; enum ib_mw_type type; } ; 1546 struct ib_fmr { struct ib_device *device; struct ib_pd *pd; struct list_head list; u32 lkey; u32 rkey; } ; 1554 enum ib_flow_attr_type { IB_FLOW_ATTR_NORMAL = 0, IB_FLOW_ATTR_ALL_DEFAULT = 1, IB_FLOW_ATTR_MC_DEFAULT = 2, IB_FLOW_ATTR_SNIFFER = 3 } ; 1675 struct ib_flow_attr { enum ib_flow_attr_type type; u16 size; u16 priority; u32 flags; u8 num_of_specs; u8 port; } ; 1684 struct ib_flow { struct ib_qp *qp; struct ib_uobject *uobject; } ; 1693 struct ib_mad_hdr ; 1707 struct ib_pkey_cache ; 1707 struct ib_gid_table ; 1707 struct ib_cache { rwlock_t lock; struct ib_event_handler event_handler; struct ib_pkey_cache **pkey_cache; struct ib_gid_table **gid_cache; u8 *lmc_cache; } ; 1719 struct ib_dma_mapping_ops { int (*mapping_error)(struct ib_device *, u64 ); u64 (*map_single)(struct ib_device *, void *, size_t , enum dma_data_direction ); void (*unmap_single)(struct ib_device *, u64 , size_t , enum dma_data_direction ); u64 (*map_page)(struct ib_device *, struct page *, unsigned long, size_t , enum dma_data_direction ); void (*unmap_page)(struct ib_device *, u64 , size_t , enum dma_data_direction ); int (*map_sg)(struct ib_device *, struct scatterlist *, int, enum dma_data_direction ); void (*unmap_sg)(struct ib_device *, struct scatterlist *, int, enum dma_data_direction ); void (*sync_single_for_cpu)(struct ib_device *, u64 , size_t , enum dma_data_direction ); void (*sync_single_for_device)(struct ib_device *, u64 , size_t , enum dma_data_direction ); void * (*alloc_coherent)(struct ib_device *, size_t , u64 *, gfp_t ); void (*free_coherent)(struct ib_device *, size_t , void *, u64 ); } ; 1756 struct iw_cm_verbs ; 1757 struct ib_port_immutable { int pkey_tbl_len; int gid_tbl_len; u32 core_cap_flags; u32 max_mad_size; } ; 1767 enum ldv_40106 { IB_DEV_UNINITIALIZED = 0, IB_DEV_REGISTERED = 1, IB_DEV_UNREGISTERED = 2 } ; 1773 struct ib_device { struct device *dma_device; char name[64U]; struct list_head event_handler_list; spinlock_t event_handler_lock; spinlock_t client_data_lock; struct list_head core_list; struct list_head client_data_list; struct ib_cache cache; struct ib_port_immutable *port_immutable; int num_comp_vectors; struct iw_cm_verbs *iwcm; struct rdma_hw_stats * (*alloc_hw_stats)(struct ib_device *, u8 ); int (*get_hw_stats)(struct ib_device *, struct rdma_hw_stats *, u8 , int); int (*query_device)(struct ib_device *, struct ib_device_attr *, struct ib_udata *); int (*query_port)(struct ib_device *, u8 , struct ib_port_attr *); enum rdma_link_layer (*get_link_layer)(struct ib_device *, u8 ); struct net_device * (*get_netdev)(struct ib_device *, u8 ); int (*query_gid)(struct ib_device *, u8 , int, union ib_gid *); int (*add_gid)(struct ib_device *, u8 , unsigned int, const union ib_gid *, const struct ib_gid_attr *, void **); int (*del_gid)(struct ib_device *, u8 , unsigned int, void **); int (*query_pkey)(struct ib_device *, u8 , u16 , u16 *); int (*modify_device)(struct ib_device *, int, struct ib_device_modify *); int (*modify_port)(struct ib_device *, u8 , int, struct ib_port_modify *); struct ib_ucontext * (*alloc_ucontext)(struct ib_device *, struct ib_udata *); int (*dealloc_ucontext)(struct ib_ucontext *); int (*mmap)(struct ib_ucontext *, struct vm_area_struct *); struct ib_pd * (*alloc_pd)(struct ib_device *, struct ib_ucontext *, struct ib_udata *); int (*dealloc_pd)(struct ib_pd *); struct ib_ah * (*create_ah)(struct ib_pd *, struct ib_ah_attr *); int (*modify_ah)(struct ib_ah *, struct ib_ah_attr *); int (*query_ah)(struct ib_ah *, struct ib_ah_attr *); int (*destroy_ah)(struct ib_ah *); struct ib_srq * (*create_srq)(struct ib_pd *, struct ib_srq_init_attr *, struct ib_udata *); int (*modify_srq)(struct ib_srq *, struct ib_srq_attr *, enum ib_srq_attr_mask , struct ib_udata *); int (*query_srq)(struct ib_srq *, struct ib_srq_attr *); int (*destroy_srq)(struct ib_srq *); int (*post_srq_recv)(struct ib_srq *, struct ib_recv_wr *, struct ib_recv_wr **); struct ib_qp * (*create_qp)(struct ib_pd *, struct ib_qp_init_attr *, struct ib_udata *); int (*modify_qp)(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *); int (*query_qp)(struct ib_qp *, struct ib_qp_attr *, int, struct ib_qp_init_attr *); int (*destroy_qp)(struct ib_qp *); int (*post_send)(struct ib_qp *, struct ib_send_wr *, struct ib_send_wr **); int (*post_recv)(struct ib_qp *, struct ib_recv_wr *, struct ib_recv_wr **); struct ib_cq * (*create_cq)(struct ib_device *, const struct ib_cq_init_attr *, struct ib_ucontext *, struct ib_udata *); int (*modify_cq)(struct ib_cq *, u16 , u16 ); int (*destroy_cq)(struct ib_cq *); int (*resize_cq)(struct ib_cq *, int, struct ib_udata *); int (*poll_cq)(struct ib_cq *, int, struct ib_wc *); int (*peek_cq)(struct ib_cq *, int); int (*req_notify_cq)(struct ib_cq *, enum ib_cq_notify_flags ); int (*req_ncomp_notif)(struct ib_cq *, int); struct ib_mr * (*get_dma_mr)(struct ib_pd *, int); struct ib_mr * (*reg_user_mr)(struct ib_pd *, u64 , u64 , u64 , int, struct ib_udata *); int (*rereg_user_mr)(struct ib_mr *, int, u64 , u64 , u64 , int, struct ib_pd *, struct ib_udata *); int (*dereg_mr)(struct ib_mr *); struct ib_mr * (*alloc_mr)(struct ib_pd *, enum ib_mr_type , u32 ); int (*map_mr_sg)(struct ib_mr *, struct scatterlist *, int, unsigned int *); struct ib_mw * (*alloc_mw)(struct ib_pd *, enum ib_mw_type , struct ib_udata *); int (*dealloc_mw)(struct ib_mw *); struct ib_fmr * (*alloc_fmr)(struct ib_pd *, int, struct ib_fmr_attr *); int (*map_phys_fmr)(struct ib_fmr *, u64 *, int, u64 ); int (*unmap_fmr)(struct list_head *); int (*dealloc_fmr)(struct ib_fmr *); int (*attach_mcast)(struct ib_qp *, union ib_gid *, u16 ); int (*detach_mcast)(struct ib_qp *, union ib_gid *, u16 ); int (*process_mad)(struct ib_device *, int, u8 , const struct ib_wc *, const struct ib_grh *, const struct ib_mad_hdr *, size_t , struct ib_mad_hdr *, size_t *, u16 *); struct ib_xrcd * (*alloc_xrcd)(struct ib_device *, struct ib_ucontext *, struct ib_udata *); int (*dealloc_xrcd)(struct ib_xrcd *); struct ib_flow * (*create_flow)(struct ib_qp *, struct ib_flow_attr *, int); int (*destroy_flow)(struct ib_flow *); int (*check_mr_status)(struct ib_mr *, u32 , struct ib_mr_status *); void (*disassociate_ucontext)(struct ib_ucontext *); void (*drain_rq)(struct ib_qp *); void (*drain_sq)(struct ib_qp *); int (*set_vf_link_state)(struct ib_device *, int, u8 , int); int (*get_vf_config)(struct ib_device *, int, u8 , struct ifla_vf_info *); int (*get_vf_stats)(struct ib_device *, int, u8 , struct ifla_vf_stats *); int (*set_vf_guid)(struct ib_device *, int, u8 , u64 , int); struct ib_wq * (*create_wq)(struct ib_pd *, struct ib_wq_init_attr *, struct ib_udata *); int (*destroy_wq)(struct ib_wq *); int (*modify_wq)(struct ib_wq *, struct ib_wq_attr *, u32 , struct ib_udata *); struct ib_rwq_ind_table * (*create_rwq_ind_table)(struct ib_device *, struct ib_rwq_ind_table_init_attr *, struct ib_udata *); int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *); struct ib_dma_mapping_ops *dma_ops; struct module *owner; struct device dev; struct kobject *ports_parent; struct list_head port_list; enum ldv_40106 reg_state; int uverbs_abi_ver; u64 uverbs_cmd_mask; u64 uverbs_ex_cmd_mask; char node_desc[64U]; __be64 node_guid; u32 local_dma_lkey; unsigned char is_switch; u8 node_type; u8 phys_port_cnt; struct ib_device_attr attrs; struct attribute_group *hw_stats_ag; struct rdma_hw_stats *hw_stats; int (*get_port_immutable)(struct ib_device *, u8 , struct ib_port_immutable *); void (*get_dev_fw_str)(struct ib_device *, char *, size_t ); } ; 3299 struct ipv4_devconf { void *sysctl; int data[31U]; unsigned long state[1U]; } ; 20 struct ip_mc_list ; 20 struct in_device { struct net_device *dev; atomic_t refcnt; int dead; struct in_ifaddr *ifa_list; struct ip_mc_list *mc_list; struct ip_mc_list **mc_hash; int mc_count; spinlock_t mc_tomb_lock; struct ip_mc_list *mc_tomb; unsigned long mr_v1_seen; unsigned long mr_v2_seen; unsigned long mr_maxdelay; unsigned char mr_qrv; unsigned char mr_gq_running; unsigned char mr_ifc_count; struct timer_list mr_gq_timer; struct timer_list mr_ifc_timer; struct neigh_parms *arp_parms; struct ipv4_devconf cnf; struct callback_head callback_head; } ; 71 struct in_ifaddr { struct hlist_node hash; struct in_ifaddr *ifa_next; struct in_device *ifa_dev; struct callback_head callback_head; __be32 ifa_local; __be32 ifa_address; __be32 ifa_mask; __be32 ifa_broadcast; unsigned char ifa_scope; unsigned char ifa_prefixlen; __u32 ifa_flags; char ifa_label[16U]; __u32 ifa_valid_lft; __u32 ifa_preferred_lft; unsigned long ifa_cstamp; unsigned long ifa_tstamp; } ; 67 struct rdma_dev_addr { unsigned char src_dev_addr[32U]; unsigned char dst_dev_addr[32U]; unsigned char broadcast[32U]; unsigned short dev_type; int bound_dev_if; enum rdma_transport_type transport; struct net *net; enum rdma_network_type network; int hoplimit; } ; 249 struct ib_mad_hdr { u8 base_version; u8 mgmt_class; u8 class_version; u8 method; __be16 status; __be16 class_specific; __be64 tid; __be16 attr_id; __be16 resv; __be32 attr_mod; } ; 899 struct ib_sa_path_rec { __be64 service_id; union ib_gid dgid; union ib_gid sgid; __be16 dlid; __be16 slid; int raw_traffic; __be32 flow_label; u8 hop_limit; u8 traffic_class; int reversible; u8 numb_path; __be16 pkey; __be16 qos_class; u8 sl; u8 mtu_selector; u8 mtu; u8 rate_selector; u8 rate; u8 packet_life_time_selector; u8 packet_life_time; u8 preference; u8 dmac[6U]; int ifindex; struct net *net; enum ib_gid_type gid_type; } ; 459 enum rdma_cm_event_type { RDMA_CM_EVENT_ADDR_RESOLVED = 0, RDMA_CM_EVENT_ADDR_ERROR = 1, RDMA_CM_EVENT_ROUTE_RESOLVED = 2, RDMA_CM_EVENT_ROUTE_ERROR = 3, RDMA_CM_EVENT_CONNECT_REQUEST = 4, RDMA_CM_EVENT_CONNECT_RESPONSE = 5, RDMA_CM_EVENT_CONNECT_ERROR = 6, RDMA_CM_EVENT_UNREACHABLE = 7, RDMA_CM_EVENT_REJECTED = 8, RDMA_CM_EVENT_ESTABLISHED = 9, RDMA_CM_EVENT_DISCONNECTED = 10, RDMA_CM_EVENT_DEVICE_REMOVAL = 11, RDMA_CM_EVENT_MULTICAST_JOIN = 12, RDMA_CM_EVENT_MULTICAST_ERROR = 13, RDMA_CM_EVENT_ADDR_CHANGE = 14, RDMA_CM_EVENT_TIMEWAIT_EXIT = 15 } ; 66 enum rdma_port_space { RDMA_PS_SDP = 1, RDMA_PS_IPOIB = 2, RDMA_PS_IB = 319, RDMA_PS_TCP = 262, RDMA_PS_UDP = 273 } ; 74 struct rdma_addr { struct __kernel_sockaddr_storage src_addr; struct __kernel_sockaddr_storage dst_addr; struct rdma_dev_addr dev_addr; } ; 85 struct rdma_route { struct rdma_addr addr; struct ib_sa_path_rec *path_rec; int num_paths; } ; 91 struct rdma_conn_param { const void *private_data; u8 private_data_len; u8 responder_resources; u8 initiator_depth; u8 flow_control; u8 retry_count; u8 rnr_retry_count; u8 srq; u32 qp_num; u32 qkey; } ; 105 struct rdma_ud_param { const void *private_data; u8 private_data_len; struct ib_ah_attr ah_attr; u32 qp_num; u32 qkey; } ; 113 union __anonunion_param_626 { struct rdma_conn_param conn; struct rdma_ud_param ud; } ; 113 struct rdma_cm_event { enum rdma_cm_event_type event; int status; union __anonunion_param_626 param; } ; 136 struct rdma_cm_id ; 147 struct rdma_cm_id { struct ib_device *device; void *context; struct ib_qp *qp; int (*event_handler)(struct rdma_cm_id *, struct rdma_cm_event *); struct rdma_route route; enum rdma_port_space ps; enum ib_qp_type qp_type; u8 port_num; } ; 390 struct configfs_item_operations ; 391 struct configfs_group_operations ; 392 struct configfs_attribute ; 393 struct configfs_bin_attribute ; 394 struct configfs_subsystem ; 395 struct config_group ; 395 struct config_item_type ; 395 struct config_item { char *ci_name; char ci_namebuf[20U]; struct kref ci_kref; struct list_head ci_entry; struct config_item *ci_parent; struct config_group *ci_group; struct config_item_type *ci_type; struct dentry *ci_dentry; } ; 82 struct config_item_type { struct module *ct_owner; struct configfs_item_operations *ct_item_ops; struct configfs_group_operations *ct_group_ops; struct configfs_attribute **ct_attrs; struct configfs_bin_attribute **ct_bin_attrs; } ; 90 struct config_group { struct config_item cg_item; struct list_head cg_children; struct configfs_subsystem *cg_subsys; struct list_head default_groups; struct list_head group_entry; } ; 132 struct configfs_attribute { const char *ca_name; struct module *ca_owner; umode_t ca_mode; ssize_t (*show)(struct config_item *, char *); ssize_t (*store)(struct config_item *, const char *, size_t ); } ; 140 struct configfs_bin_attribute { struct configfs_attribute cb_attr; void *cb_private; size_t cb_max_size; ssize_t (*read)(struct config_item *, void *, size_t ); ssize_t (*write)(struct config_item *, const void *, size_t ); } ; 176 struct configfs_item_operations { void (*release)(struct config_item *); int (*allow_link)(struct config_item *, struct config_item *); int (*drop_link)(struct config_item *, struct config_item *); } ; 233 struct configfs_group_operations { struct config_item * (*make_item)(struct config_group *, const char *); struct config_group * (*make_group)(struct config_group *, const char *); int (*commit_item)(struct config_item *); void (*disconnect_notify)(struct config_group *, struct config_item *); void (*drop_item)(struct config_group *, struct config_item *); } ; 241 struct configfs_subsystem { struct config_group su_group; struct mutex su_mutex; } ; 82 struct disk_stats { unsigned long sectors[2U]; unsigned long ios[2U]; unsigned long merges[2U]; unsigned long ticks[2U]; unsigned long io_ticks; unsigned long time_in_queue; } ; 91 struct partition_meta_info { char uuid[37U]; u8 volname[64U]; } ; 103 struct hd_struct { sector_t start_sect; sector_t nr_sects; seqcount_t nr_sects_seq; sector_t alignment_offset; unsigned int discard_alignment; struct device __dev; struct kobject *holder_dir; int policy; int partno; struct partition_meta_info *info; int make_it_fail; unsigned long stamp; atomic_t in_flight[2U]; struct disk_stats *dkstats; struct percpu_ref ref; struct callback_head callback_head; } ; 157 struct disk_part_tbl { struct callback_head callback_head; int len; struct hd_struct *last_lookup; struct hd_struct *part[]; } ; 164 struct disk_events ; 165 struct badblocks ; 166 struct blk_integrity_profile ; 166 struct blk_integrity { struct blk_integrity_profile *profile; unsigned char flags; unsigned char tuple_size; unsigned char interval_exp; unsigned char tag_size; } ; 177 struct timer_rand_state ; 177 struct gendisk { int major; int first_minor; int minors; char disk_name[32U]; char * (*devnode)(struct gendisk *, umode_t *); unsigned int events; unsigned int async_events; struct disk_part_tbl *part_tbl; struct hd_struct part0; const struct block_device_operations *fops; struct request_queue *queue; void *private_data; int flags; struct kobject *slave_dir; struct timer_rand_state *random; atomic_t sync_io; struct disk_events *ev; struct kobject integrity_kobj; int node_id; struct badblocks *bb; } ; 64 struct bsg_class_device { struct device *class_dev; struct device *parent; int minor; struct request_queue *queue; struct kref ref; void (*release)(struct device *); } ; 22 struct elevator_queue ; 24 struct request ; 26 struct bsg_job ; 27 struct blkcg_gq ; 28 struct blk_flush_queue ; 29 struct pr_ops ; 50 typedef void rq_end_io_fn(struct request *, int); 51 struct request_list { struct request_queue *q; struct blkcg_gq *blkg; int count[2U]; int starved[2U]; mempool_t *rq_pool; wait_queue_head_t wait[2U]; unsigned int flags; } ; 76 union __anonunion____missing_field_name_627 { struct call_single_data csd; u64 fifo_time; } ; 76 struct blk_mq_ctx ; 76 union __anonunion____missing_field_name_628 { struct hlist_node hash; struct list_head ipi_list; } ; 76 union __anonunion____missing_field_name_629 { struct rb_node rb_node; void *completion_data; } ; 76 struct __anonstruct_elv_631 { struct io_cq *icq; void *priv[2U]; } ; 76 struct __anonstruct_flush_632 { unsigned int seq; struct list_head list; rq_end_io_fn *saved_end_io; } ; 76 union __anonunion____missing_field_name_630 { struct __anonstruct_elv_631 elv; struct __anonstruct_flush_632 flush; } ; 76 struct request { struct list_head queuelist; union __anonunion____missing_field_name_627 __annonCompField146; struct request_queue *q; struct blk_mq_ctx *mq_ctx; int cpu; unsigned int cmd_type; u64 cmd_flags; unsigned long atomic_flags; unsigned int __data_len; sector_t __sector; struct bio *bio; struct bio *biotail; union __anonunion____missing_field_name_628 __annonCompField147; union __anonunion____missing_field_name_629 __annonCompField148; union __anonunion____missing_field_name_630 __annonCompField149; struct gendisk *rq_disk; struct hd_struct *part; unsigned long start_time; struct request_list *rl; unsigned long long start_time_ns; unsigned long long io_start_time_ns; unsigned short nr_phys_segments; unsigned short nr_integrity_segments; unsigned short ioprio; void *special; int tag; int errors; unsigned char __cmd[16U]; unsigned char *cmd; unsigned short cmd_len; unsigned int extra_len; unsigned int sense_len; unsigned int resid_len; void *sense; unsigned long deadline; struct list_head timeout_list; unsigned int timeout; int retries; rq_end_io_fn *end_io; void *end_io_data; struct request *next_rq; } ; 117 struct elevator_type ; 12 typedef int elevator_merge_fn(struct request_queue *, struct request **, struct bio *); 15 typedef void elevator_merge_req_fn(struct request_queue *, struct request *, struct request *); 17 typedef void elevator_merged_fn(struct request_queue *, struct request *, int); 19 typedef int elevator_allow_bio_merge_fn(struct request_queue *, struct request *, struct bio *); 22 typedef int elevator_allow_rq_merge_fn(struct request_queue *, struct request *, struct request *); 25 typedef void elevator_bio_merged_fn(struct request_queue *, struct request *, struct bio *); 28 typedef int elevator_dispatch_fn(struct request_queue *, int); 30 typedef void elevator_add_req_fn(struct request_queue *, struct request *); 31 typedef struct request * elevator_request_list_fn(struct request_queue *, struct request *); 32 typedef void elevator_completed_req_fn(struct request_queue *, struct request *); 33 typedef int elevator_may_queue_fn(struct request_queue *, int, int); 35 typedef void elevator_init_icq_fn(struct io_cq *); 36 typedef void elevator_exit_icq_fn(struct io_cq *); 37 typedef int elevator_set_req_fn(struct request_queue *, struct request *, struct bio *, gfp_t ); 39 typedef void elevator_put_req_fn(struct request *); 40 typedef void elevator_activate_req_fn(struct request_queue *, struct request *); 41 typedef void elevator_deactivate_req_fn(struct request_queue *, struct request *); 43 typedef int elevator_init_fn(struct request_queue *, struct elevator_type *); 45 typedef void elevator_exit_fn(struct elevator_queue *); 46 typedef void elevator_registered_fn(struct request_queue *); 47 struct elevator_ops { elevator_merge_fn *elevator_merge_fn; elevator_merged_fn *elevator_merged_fn; elevator_merge_req_fn *elevator_merge_req_fn; elevator_allow_bio_merge_fn *elevator_allow_bio_merge_fn; elevator_allow_rq_merge_fn *elevator_allow_rq_merge_fn; elevator_bio_merged_fn *elevator_bio_merged_fn; elevator_dispatch_fn *elevator_dispatch_fn; elevator_add_req_fn *elevator_add_req_fn; elevator_activate_req_fn *elevator_activate_req_fn; elevator_deactivate_req_fn *elevator_deactivate_req_fn; elevator_completed_req_fn *elevator_completed_req_fn; elevator_request_list_fn *elevator_former_req_fn; elevator_request_list_fn *elevator_latter_req_fn; elevator_init_icq_fn *elevator_init_icq_fn; elevator_exit_icq_fn *elevator_exit_icq_fn; elevator_set_req_fn *elevator_set_req_fn; elevator_put_req_fn *elevator_put_req_fn; elevator_may_queue_fn *elevator_may_queue_fn; elevator_init_fn *elevator_init_fn; elevator_exit_fn *elevator_exit_fn; elevator_registered_fn *elevator_registered_fn; } ; 79 struct elv_fs_entry { struct attribute attr; ssize_t (*show)(struct elevator_queue *, char *); ssize_t (*store)(struct elevator_queue *, const char *, size_t ); } ; 87 struct elevator_type { struct kmem_cache *icq_cache; struct elevator_ops ops; size_t icq_size; size_t icq_align; struct elv_fs_entry *elevator_attrs; char elevator_name[16U]; struct module *elevator_owner; char icq_cache_name[21U]; struct list_head list; } ; 108 struct elevator_queue { struct elevator_type *type; void *elevator_data; struct kobject kobj; struct mutex sysfs_lock; unsigned char registered; struct hlist_head hash[64U]; } ; 224 typedef void request_fn_proc(struct request_queue *); 225 typedef blk_qc_t make_request_fn(struct request_queue *, struct bio *); 226 typedef int prep_rq_fn(struct request_queue *, struct request *); 227 typedef void unprep_rq_fn(struct request_queue *, struct request *); 230 typedef void softirq_done_fn(struct request *); 231 typedef int dma_drain_needed_fn(struct request *); 232 typedef int lld_busy_fn(struct request_queue *); 233 typedef int bsg_job_fn(struct bsg_job *); 234 enum blk_eh_timer_return { BLK_EH_NOT_HANDLED = 0, BLK_EH_HANDLED = 1, BLK_EH_RESET_TIMER = 2 } ; 241 typedef enum blk_eh_timer_return rq_timed_out_fn(struct request *); 247 struct blk_queue_tag { struct request **tag_index; unsigned long *tag_map; int busy; int max_depth; int real_max_depth; atomic_t refcnt; int alloc_policy; int next_tag; } ; 258 struct queue_limits { unsigned long bounce_pfn; unsigned long seg_boundary_mask; unsigned long virt_boundary_mask; unsigned int max_hw_sectors; unsigned int max_dev_sectors; unsigned int chunk_sectors; unsigned int max_sectors; unsigned int max_segment_size; unsigned int physical_block_size; unsigned int alignment_offset; unsigned int io_min; unsigned int io_opt; unsigned int max_discard_sectors; unsigned int max_hw_discard_sectors; unsigned int max_write_same_sectors; unsigned int discard_granularity; unsigned int discard_alignment; unsigned short logical_block_size; unsigned short max_segments; unsigned short max_integrity_segments; unsigned char misaligned; unsigned char discard_misaligned; unsigned char cluster; unsigned char discard_zeroes_data; unsigned char raid_partial_stripes_expensive; } ; 294 struct blk_mq_ops ; 294 struct blk_mq_hw_ctx ; 294 struct throtl_data ; 294 struct blk_mq_tag_set ; 294 struct request_queue { struct list_head queue_head; struct request *last_merge; struct elevator_queue *elevator; int nr_rqs[2U]; int nr_rqs_elvpriv; struct request_list root_rl; request_fn_proc *request_fn; make_request_fn *make_request_fn; prep_rq_fn *prep_rq_fn; unprep_rq_fn *unprep_rq_fn; softirq_done_fn *softirq_done_fn; rq_timed_out_fn *rq_timed_out_fn; dma_drain_needed_fn *dma_drain_needed; lld_busy_fn *lld_busy_fn; struct blk_mq_ops *mq_ops; unsigned int *mq_map; struct blk_mq_ctx *queue_ctx; unsigned int nr_queues; struct blk_mq_hw_ctx **queue_hw_ctx; unsigned int nr_hw_queues; sector_t end_sector; struct request *boundary_rq; struct delayed_work delay_work; struct backing_dev_info backing_dev_info; void *queuedata; unsigned long queue_flags; int id; gfp_t bounce_gfp; spinlock_t __queue_lock; spinlock_t *queue_lock; struct kobject kobj; struct kobject mq_kobj; struct blk_integrity integrity; struct device *dev; int rpm_status; unsigned int nr_pending; unsigned long nr_requests; unsigned int nr_congestion_on; unsigned int nr_congestion_off; unsigned int nr_batching; unsigned int dma_drain_size; void *dma_drain_buffer; unsigned int dma_pad_mask; unsigned int dma_alignment; struct blk_queue_tag *queue_tags; struct list_head tag_busy_list; unsigned int nr_sorted; unsigned int in_flight[2U]; unsigned int request_fn_active; unsigned int rq_timeout; struct timer_list timeout; struct work_struct timeout_work; struct list_head timeout_list; struct list_head icq_list; unsigned long blkcg_pols[1U]; struct blkcg_gq *root_blkg; struct list_head blkg_list; struct queue_limits limits; unsigned int sg_timeout; unsigned int sg_reserved_size; int node; struct blk_flush_queue *fq; struct list_head requeue_list; spinlock_t requeue_lock; struct work_struct requeue_work; struct mutex sysfs_lock; int bypass_depth; atomic_t mq_freeze_depth; bsg_job_fn *bsg_job_fn; int bsg_job_size; struct bsg_class_device bsg_dev; struct throtl_data *td; struct callback_head callback_head; wait_queue_head_t mq_freeze_wq; struct percpu_ref q_usage_counter; struct list_head all_q_node; struct blk_mq_tag_set *tag_set; struct list_head tag_set_list; struct bio_set *bio_split; bool mq_sysfs_init_done; } ; 1048 struct blk_plug { struct list_head list; struct list_head mq_list; struct list_head cb_list; } ; 1482 struct blk_integrity_iter { void *prot_buf; void *data_buf; sector_t seed; unsigned int data_size; unsigned short interval; const char *disk_name; } ; 1511 typedef int integrity_processing_fn(struct blk_integrity_iter *); 1512 struct blk_integrity_profile { integrity_processing_fn *generate_fn; integrity_processing_fn *verify_fn; const char *name; } ; 1671 struct block_device_operations { int (*open)(struct block_device *, fmode_t ); void (*release)(struct gendisk *, fmode_t ); int (*rw_page)(struct block_device *, sector_t , struct page *, bool ); int (*ioctl)(struct block_device *, fmode_t , unsigned int, unsigned long); int (*compat_ioctl)(struct block_device *, fmode_t , unsigned int, unsigned long); long int (*direct_access)(struct block_device *, sector_t , void **, pfn_t *, long); unsigned int (*check_events)(struct gendisk *, unsigned int); int (*media_changed)(struct gendisk *); void (*unlock_native_capacity)(struct gendisk *); int (*revalidate_disk)(struct gendisk *); int (*getgeo)(struct block_device *, struct hd_geometry *); void (*swap_slot_free_notify)(struct block_device *, unsigned long); struct module *owner; const struct pr_ops *pr_ops; } ; 1701 struct percpu_ida_cpu ; 1702 struct __anonstruct____missing_field_name_634 { spinlock_t lock; unsigned int cpu_last_stolen; wait_queue_head_t wait; unsigned int nr_free; unsigned int *freelist; } ; 1702 struct percpu_ida { unsigned int nr_tags; unsigned int percpu_max_size; unsigned int percpu_batch_size; struct percpu_ida_cpu *tag_cpu; cpumask_t cpus_have_tags; struct __anonstruct____missing_field_name_634 __annonCompField150; } ; 19 struct inet_ehash_bucket { struct hlist_nulls_head chain; } ; 92 struct inet_bind_hashbucket { spinlock_t lock; struct hlist_head chain; } ; 100 struct inet_listen_hashbucket { spinlock_t lock; struct hlist_head head; } ; 108 struct inet_hashinfo { struct inet_ehash_bucket *ehash; spinlock_t *ehash_locks; unsigned int ehash_mask; unsigned int ehash_locks_mask; struct inet_bind_hashbucket *bhash; unsigned int bhash_size; struct kmem_cache *bind_bucket_cachep; struct inet_listen_hashbucket listening_hash[32U]; } ; 1382 struct ahash_request ; 1883 enum transport_state_table { TRANSPORT_NO_STATE = 0, TRANSPORT_NEW_CMD = 1, TRANSPORT_WRITE_PENDING = 3, TRANSPORT_PROCESSING = 5, TRANSPORT_COMPLETE = 6, TRANSPORT_ISTATE_PROCESSING = 11, TRANSPORT_COMPLETE_QF_WP = 18, TRANSPORT_COMPLETE_QF_OK = 19 } ; 152 typedef unsigned int sense_reason_t; 220 struct se_cmd ; 235 struct se_device ; 235 struct t10_alua_tg_pt_gp ; 235 struct t10_alua { u16 alua_tg_pt_gps_counter; u32 alua_tg_pt_gps_count; spinlock_t lba_map_lock; u32 lba_map_segment_size; u32 lba_map_segment_multiplier; struct list_head lba_map_list; spinlock_t tg_pt_gps_lock; struct se_device *t10_dev; struct t10_alua_tg_pt_gp *default_tg_pt_gp; struct config_group alua_tg_pt_gps_group; struct list_head tg_pt_gps_list; } ; 253 struct t10_alua_lu_gp { u16 lu_gp_id; int lu_gp_valid_id; u32 lu_gp_members; atomic_t lu_gp_ref_cnt; spinlock_t lu_gp_lock; struct config_group lu_gp_group; struct list_head lu_gp_node; struct list_head lu_gp_mem_list; } ; 264 struct t10_alua_lu_gp_member { bool lu_gp_assoc; atomic_t lu_gp_mem_ref_cnt; spinlock_t lu_gp_mem_lock; struct t10_alua_lu_gp *lu_gp; struct se_device *lu_gp_mem_dev; struct list_head lu_gp_mem_list; } ; 273 struct se_lun ; 273 struct se_node_acl ; 273 struct t10_alua_tg_pt_gp { u16 tg_pt_gp_id; int tg_pt_gp_valid_id; int tg_pt_gp_alua_supported_states; int tg_pt_gp_alua_pending_state; int tg_pt_gp_alua_previous_state; int tg_pt_gp_alua_access_status; int tg_pt_gp_alua_access_type; int tg_pt_gp_nonop_delay_msecs; int tg_pt_gp_trans_delay_msecs; int tg_pt_gp_implicit_trans_secs; int tg_pt_gp_pref; int tg_pt_gp_write_metadata; u32 tg_pt_gp_members; atomic_t tg_pt_gp_alua_access_state; atomic_t tg_pt_gp_ref_cnt; spinlock_t tg_pt_gp_lock; struct mutex tg_pt_gp_md_mutex; struct se_device *tg_pt_gp_dev; struct config_group tg_pt_gp_group; struct list_head tg_pt_gp_list; struct list_head tg_pt_gp_lun_list; struct se_lun *tg_pt_gp_alua_lun; struct se_node_acl *tg_pt_gp_alua_nacl; struct delayed_work tg_pt_gp_transition_work; struct completion *tg_pt_gp_transition_complete; } ; 311 struct t10_wwn { char vendor[8U]; char model[16U]; char revision[4U]; char unit_serial[254U]; spinlock_t t10_vpd_lock; struct se_device *t10_dev; struct config_group t10_wwn_group; struct list_head t10_vpd_list; } ; 322 struct se_dev_entry ; 322 struct t10_pr_registration { char pr_reg_isid[16U]; unsigned char pr_iport[256U]; unsigned char pr_tport[256U]; u16 pr_aptpl_rpti; u16 pr_reg_tpgt; int pr_reg_all_tg_pt; int pr_reg_aptpl; int pr_res_holder; int pr_res_type; int pr_res_scope; bool isid_present_at_reg; u64 pr_res_mapped_lun; u64 pr_aptpl_target_lun; u16 tg_pt_sep_rtpi; u32 pr_res_generation; u64 pr_reg_bin_isid; u64 pr_res_key; atomic_t pr_res_holders; struct se_node_acl *pr_reg_nacl; struct se_dev_entry *pr_reg_deve; struct list_head pr_reg_list; struct list_head pr_reg_abort_list; struct list_head pr_reg_aptpl_list; struct list_head pr_reg_atp_list; struct list_head pr_reg_atp_mem_list; } ; 362 struct t10_reservation { int pr_all_tg_pt; int pr_aptpl_active; u32 pr_generation; spinlock_t registration_lock; spinlock_t aptpl_reg_lock; struct se_node_acl *pr_res_holder; struct list_head registration_list; struct list_head aptpl_reg_list; } ; 387 struct se_tmr_req { u8 function; u8 response; int call_transport; u64 ref_task_tag; void *fabric_tmr_ptr; struct se_cmd *task_cmd; struct se_device *tmr_dev; struct se_lun *tmr_lun; struct list_head tmr_list; } ; 402 enum target_prot_op { TARGET_PROT_NORMAL = 0, TARGET_PROT_DIN_INSERT = 1, TARGET_PROT_DOUT_INSERT = 2, TARGET_PROT_DIN_STRIP = 4, TARGET_PROT_DOUT_STRIP = 8, TARGET_PROT_DIN_PASS = 16, TARGET_PROT_DOUT_PASS = 32 } ; 412 enum target_prot_type { TARGET_DIF_TYPE0_PROT = 0, TARGET_DIF_TYPE1_PROT = 1, TARGET_DIF_TYPE2_PROT = 2, TARGET_DIF_TYPE3_PROT = 3 } ; 425 struct se_session ; 425 struct target_core_fabric_ops ; 425 struct se_cmd { u8 scsi_status; u8 scsi_asc; u8 scsi_ascq; u16 scsi_sense_length; unsigned char cmd_wait_set; unsigned char unknown_data_length; bool state_active; u64 tag; int alua_nonop_delay; enum dma_data_direction data_direction; int sam_task_attr; unsigned int map_tag; enum transport_state_table t_state; u32 se_cmd_flags; u32 data_length; u32 residual_count; u64 orig_fe_lun; u64 pr_res_key; void *sense_buffer; struct list_head se_delayed_node; struct list_head se_qf_node; struct se_device *se_dev; struct se_lun *se_lun; struct se_session *se_sess; struct se_tmr_req *se_tmr_req; struct list_head se_cmd_list; struct completion cmd_wait_comp; const struct target_core_fabric_ops *se_tfo; sense_reason_t (*execute_cmd)(struct se_cmd *); sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool , int *); void *protocol_data; unsigned char *t_task_cdb; unsigned char __t_task_cdb[32U]; unsigned long long t_task_lba; unsigned int t_task_nolb; unsigned int transport_state; spinlock_t t_state_lock; struct kref cmd_kref; struct completion t_transport_stop_comp; struct work_struct work; struct scatterlist *t_data_sg; struct scatterlist *t_data_sg_orig; unsigned int t_data_nents; unsigned int t_data_nents_orig; void *t_data_vmap; struct scatterlist *t_bidi_data_sg; unsigned int t_bidi_data_nents; int lun_ref_active; struct list_head state_list; void *priv; enum target_prot_op prot_op; enum target_prot_type prot_type; u8 prot_checks; bool prot_pto; u32 prot_length; u32 reftag_seed; struct scatterlist *t_prot_sg; unsigned int t_prot_nents; sense_reason_t pi_err; sector_t bad_sector; int cpuid; } ; 535 struct se_portal_group ; 535 struct se_node_acl { char initiatorname[224U]; bool dynamic_node_acl; u32 queue_depth; u32 acl_index; enum target_prot_type saved_prot_type; char acl_tag[64U]; atomic_t acl_pr_ref_count; struct hlist_head lun_entry_hlist; struct se_session *nacl_sess; struct se_portal_group *se_tpg; struct mutex lun_entry_mutex; spinlock_t nacl_sess_lock; struct config_group acl_group; struct config_group acl_attrib_group; struct config_group acl_auth_group; struct config_group acl_param_group; struct config_group acl_fabric_stat_group; struct list_head acl_list; struct list_head acl_sess_list; struct completion acl_free_comp; struct kref acl_kref; } ; 592 struct se_session { unsigned char sess_tearing_down; u64 sess_bin_isid; enum target_prot_op sup_prot_ops; enum target_prot_type sess_prot_type; struct se_node_acl *se_node_acl; struct se_portal_group *se_tpg; void *fabric_sess_ptr; struct list_head sess_list; struct list_head sess_acl_list; struct list_head sess_cmd_list; struct list_head sess_wait_list; spinlock_t sess_cmd_lock; void *sess_cmd_map; struct percpu_ida sess_tag_pool; } ; 610 struct se_ml_stat_grps { struct config_group stat_group; struct config_group scsi_auth_intr_group; struct config_group scsi_att_intr_port_group; } ; 619 struct se_lun_acl { u64 mapped_lun; struct se_node_acl *se_lun_nacl; struct se_lun *se_lun; struct config_group se_lun_group; struct se_ml_stat_grps ml_stat_grps; } ; 627 struct se_dev_entry { u64 mapped_lun; u64 pr_res_key; u64 creation_time; bool lun_access_ro; u32 attach_count; atomic_long_t total_cmds; atomic_long_t read_bytes; atomic_long_t write_bytes; atomic_t ua_count; struct kref pr_kref; struct completion pr_comp; struct se_lun_acl *se_lun_acl; spinlock_t ua_lock; struct se_lun *se_lun; unsigned long deve_flags; struct list_head alua_port_list; struct list_head lun_link; struct list_head ua_list; struct hlist_node link; struct callback_head callback_head; } ; 652 struct se_dev_attrib { int emulate_model_alias; int emulate_dpo; int emulate_fua_write; int emulate_fua_read; int emulate_write_cache; int emulate_ua_intlck_ctrl; int emulate_tas; int emulate_tpu; int emulate_tpws; int emulate_caw; int emulate_3pc; int pi_prot_format; enum target_prot_type pi_prot_type; enum target_prot_type hw_pi_prot_type; int enforce_pr_isids; int force_pr_aptpl; int is_nonrot; int emulate_rest_reord; int unmap_zeroes_data; u32 hw_block_size; u32 block_size; u32 hw_max_sectors; u32 optimal_sectors; u32 hw_queue_depth; u32 queue_depth; u32 max_unmap_lba_count; u32 max_unmap_block_desc_count; u32 unmap_granularity; u32 unmap_granularity_alignment; u32 max_write_same_len; u32 max_bytes_per_io; struct se_device *da_dev; struct config_group da_group; } ; 688 struct se_port_stat_grps { struct config_group stat_group; struct config_group scsi_port_group; struct config_group scsi_tgt_port_group; struct config_group scsi_transport_group; } ; 695 struct scsi_port_stats { atomic_long_t cmd_pdus; atomic_long_t tx_data_octets; atomic_long_t rx_data_octets; } ; 701 struct se_lun { u64 unpacked_lun; u32 lun_link_magic; bool lun_access_ro; u32 lun_index; u16 lun_rtpi; atomic_t lun_acl_count; struct se_device *lun_se_dev; struct list_head lun_deve_list; spinlock_t lun_deve_lock; int lun_tg_pt_secondary_stat; int lun_tg_pt_secondary_write_md; atomic_t lun_tg_pt_secondary_offline; struct mutex lun_tg_pt_md_mutex; struct list_head lun_tg_pt_gp_link; struct t10_alua_tg_pt_gp *lun_tg_pt_gp; spinlock_t lun_tg_pt_gp_lock; struct se_portal_group *lun_tpg; struct scsi_port_stats lun_stats; struct config_group lun_group; struct se_port_stat_grps port_stat_grps; struct completion lun_ref_comp; struct percpu_ref lun_ref; struct list_head lun_dev_link; struct hlist_node link; struct callback_head callback_head; } ; 738 struct se_dev_stat_grps { struct config_group stat_group; struct config_group scsi_dev_group; struct config_group scsi_tgt_dev_group; struct config_group scsi_lu_group; } ; 745 struct se_hba ; 745 struct target_backend_ops ; 745 struct se_device { u32 dev_link_magic; u16 dev_rpti_counter; u32 dev_cur_ordered_id; u32 dev_flags; u32 queue_depth; u64 dev_res_bin_isid; u32 dev_index; u64 creation_time; atomic_long_t num_resets; atomic_long_t num_cmds; atomic_long_t read_bytes; atomic_long_t write_bytes; atomic_t simple_cmds; atomic_t dev_ordered_sync; atomic_t dev_qf_count; u32 export_count; spinlock_t delayed_cmd_lock; spinlock_t execute_task_lock; spinlock_t dev_reservation_lock; unsigned int dev_reservation_flags; spinlock_t se_port_lock; spinlock_t se_tmr_lock; spinlock_t qf_cmd_lock; struct semaphore caw_sem; struct se_node_acl *dev_reserved_node_acl; struct t10_alua_lu_gp_member *dev_alua_lu_gp_mem; struct t10_pr_registration *dev_pr_res_holder; struct list_head dev_sep_list; struct list_head dev_tmr_list; struct workqueue_struct *tmr_wq; struct work_struct qf_work_queue; struct list_head delayed_cmd_list; struct list_head state_list; struct list_head qf_cmd_list; struct list_head g_dev_node; struct se_hba *se_hba; struct t10_wwn t10_wwn; struct t10_alua t10_alua; struct t10_reservation t10_pr; struct se_dev_attrib dev_attrib; struct config_group dev_group; struct config_group dev_pr_group; struct se_dev_stat_grps dev_stat_grps; unsigned char dev_alias[512U]; unsigned char udev_path[512U]; const struct target_backend_ops *transport; struct list_head dev_list; struct se_lun xcopy_lun; int prot_length; u32 hba_index; struct callback_head callback_head; } ; 827 struct target_backend ; 827 struct se_hba { u16 hba_tpgt; u32 hba_id; u32 hba_flags; u32 dev_count; u32 hba_index; void *hba_ptr; struct list_head hba_node; spinlock_t device_lock; struct config_group hba_group; struct mutex hba_access_mutex; struct target_backend *backend; } ; 844 struct se_tpg_np { struct se_portal_group *tpg_np_parent; struct config_group tpg_np_group; } ; 855 struct se_wwn ; 855 struct se_portal_group { int proto_id; atomic_t tpg_pr_ref_count; struct mutex acl_node_mutex; spinlock_t session_lock; struct mutex tpg_lun_mutex; struct list_head se_tpg_node; struct list_head acl_node_list; struct hlist_head tpg_lun_hlist; struct se_lun *tpg_virt_lun0; struct list_head tpg_sess_list; const struct target_core_fabric_ops *se_tpg_tfo; struct se_wwn *se_tpg_wwn; struct config_group tpg_group; struct config_group tpg_lun_group; struct config_group tpg_np_group; struct config_group tpg_acl_group; struct config_group tpg_attrib_group; struct config_group tpg_auth_group; struct config_group tpg_param_group; } ; 912 struct target_fabric_configfs ; 912 struct se_wwn { struct target_fabric_configfs *wwn_tf; struct config_group wwn_group; struct config_group fabric_stat_group; } ; 932 struct target_core_fabric_ops { struct module *module; const char *name; size_t node_acl_size; u32 max_data_sg_nents; char * (*get_fabric_name)(); char * (*tpg_get_wwn)(struct se_portal_group *); u16 (*tpg_get_tag)(struct se_portal_group *); u32 (*tpg_get_default_depth)(struct se_portal_group *); int (*tpg_check_demo_mode)(struct se_portal_group *); int (*tpg_check_demo_mode_cache)(struct se_portal_group *); int (*tpg_check_demo_mode_write_protect)(struct se_portal_group *); int (*tpg_check_prod_mode_write_protect)(struct se_portal_group *); int (*tpg_check_demo_mode_login_only)(struct se_portal_group *); int (*tpg_check_prot_fabric_only)(struct se_portal_group *); u32 (*tpg_get_inst_index)(struct se_portal_group *); int (*check_stop_free)(struct se_cmd *); void (*release_cmd)(struct se_cmd *); void (*close_session)(struct se_session *); u32 (*sess_get_index)(struct se_session *); u32 (*sess_get_initiator_sid)(struct se_session *, unsigned char *, u32 ); int (*write_pending)(struct se_cmd *); int (*write_pending_status)(struct se_cmd *); void (*set_default_node_attributes)(struct se_node_acl *); int (*get_cmd_state)(struct se_cmd *); int (*queue_data_in)(struct se_cmd *); int (*queue_status)(struct se_cmd *); void (*queue_tm_rsp)(struct se_cmd *); void (*aborted_task)(struct se_cmd *); struct se_wwn * (*fabric_make_wwn)(struct target_fabric_configfs *, struct config_group *, const char *); void (*fabric_drop_wwn)(struct se_wwn *); void (*add_wwn_groups)(struct se_wwn *); struct se_portal_group * (*fabric_make_tpg)(struct se_wwn *, struct config_group *, const char *); void (*fabric_drop_tpg)(struct se_portal_group *); int (*fabric_post_link)(struct se_portal_group *, struct se_lun *); void (*fabric_pre_unlink)(struct se_portal_group *, struct se_lun *); struct se_tpg_np * (*fabric_make_np)(struct se_portal_group *, struct config_group *, const char *); void (*fabric_drop_np)(struct se_tpg_np *); int (*fabric_init_nodeacl)(struct se_node_acl *, const char *); struct configfs_attribute **tfc_discovery_attrs; struct configfs_attribute **tfc_wwn_attrs; struct configfs_attribute **tfc_tpg_base_attrs; struct configfs_attribute **tfc_tpg_np_base_attrs; struct configfs_attribute **tfc_tpg_attrib_attrs; struct configfs_attribute **tfc_tpg_auth_attrs; struct configfs_attribute **tfc_tpg_param_attrs; struct configfs_attribute **tfc_tpg_nacl_base_attrs; struct configfs_attribute **tfc_tpg_nacl_attrib_attrs; struct configfs_attribute **tfc_tpg_nacl_auth_attrs; struct configfs_attribute **tfc_tpg_nacl_param_attrs; } ; 199 struct scsi_lun { __u8 scsi_lun[8U]; } ; 77 typedef uint32_t itt_t; 78 struct iscsi_hdr { uint8_t opcode; uint8_t flags; uint8_t rsvd2[2U]; uint8_t hlength; uint8_t dlength[3U]; struct scsi_lun lun; itt_t itt; __be32 ttt; __be32 statsn; __be32 exp_statsn; __be32 max_statsn; uint8_t other[12U]; } ; 143 struct iscsi_scsi_req { uint8_t opcode; uint8_t flags; __be16 rsvd2; uint8_t hlength; uint8_t dlength[3U]; struct scsi_lun lun; itt_t itt; __be32 data_length; __be32 cmdsn; __be32 exp_statsn; uint8_t cdb[16U]; } ; 192 struct iscsi_scsi_rsp { uint8_t opcode; uint8_t flags; uint8_t response; uint8_t cmd_status; uint8_t hlength; uint8_t dlength[3U]; uint8_t rsvd[8U]; itt_t itt; __be32 rsvd1; __be32 statsn; __be32 exp_cmdsn; __be32 max_cmdsn; __be32 exp_datasn; __be32 bi_residual_count; __be32 residual_count; } ; 243 struct iscsi_nopout { uint8_t opcode; uint8_t flags; __be16 rsvd2; uint8_t rsvd3; uint8_t dlength[3U]; struct scsi_lun lun; itt_t itt; __be32 ttt; __be32 cmdsn; __be32 exp_statsn; uint8_t rsvd4[16U]; } ; 266 struct iscsi_nopin { uint8_t opcode; uint8_t flags; __be16 rsvd2; uint8_t rsvd3; uint8_t dlength[3U]; struct scsi_lun lun; itt_t itt; __be32 ttt; __be32 statsn; __be32 exp_cmdsn; __be32 max_cmdsn; uint8_t rsvd4[12U]; } ; 299 struct iscsi_tm_rsp { uint8_t opcode; uint8_t flags; uint8_t response; uint8_t qualifier; uint8_t hlength; uint8_t dlength[3U]; uint8_t rsvd2[8U]; itt_t itt; itt_t rtt; __be32 statsn; __be32 exp_cmdsn; __be32 max_cmdsn; uint8_t rsvd3[12U]; } ; 358 struct iscsi_data { uint8_t opcode; uint8_t flags; uint8_t rsvd2[2U]; uint8_t rsvd3; uint8_t dlength[3U]; struct scsi_lun lun; itt_t itt; __be32 ttt; __be32 rsvd4; __be32 exp_statsn; __be32 rsvd5; __be32 datasn; __be32 offset; __be32 rsvd6; } ; 396 struct iscsi_text { uint8_t opcode; uint8_t flags; uint8_t rsvd2[2U]; uint8_t hlength; uint8_t dlength[3U]; uint8_t rsvd4[8U]; itt_t itt; __be32 ttt; __be32 cmdsn; __be32 exp_statsn; uint8_t rsvd5[16U]; } ; 417 struct iscsi_text_rsp { uint8_t opcode; uint8_t flags; uint8_t rsvd2[2U]; uint8_t hlength; uint8_t dlength[3U]; uint8_t rsvd4[8U]; itt_t itt; __be32 ttt; __be32 statsn; __be32 exp_cmdsn; __be32 max_cmdsn; uint8_t rsvd5[12U]; } ; 436 struct iscsi_login_req { uint8_t opcode; uint8_t flags; uint8_t max_version; uint8_t min_version; uint8_t hlength; uint8_t dlength[3U]; uint8_t isid[6U]; __be16 tsih; itt_t itt; __be16 cid; __be16 rsvd3; __be32 cmdsn; __be32 exp_statsn; uint8_t rsvd5[16U]; } ; 547 struct iscsi_logout_rsp { uint8_t opcode; uint8_t flags; uint8_t response; uint8_t rsvd2; uint8_t hlength; uint8_t dlength[3U]; uint8_t rsvd3[8U]; itt_t itt; __be32 rsvd4; __be32 statsn; __be32 exp_cmdsn; __be32 max_cmdsn; __be32 rsvd5; __be16 t2wait; __be16 t2retain; __be32 rsvd6; } ; 601 struct iscsi_reject { uint8_t opcode; uint8_t flags; uint8_t reason; uint8_t rsvd2; uint8_t hlength; uint8_t dlength[3U]; uint8_t rsvd3[8U]; __be32 ffffffff; uint8_t rsvd4[4U]; __be32 statsn; __be32 exp_cmdsn; __be32 max_cmdsn; __be32 datasn; uint8_t rsvd5[8U]; } ; 627 enum iscsit_transport_type { ISCSI_TCP = 0, ISCSI_SCTP_TCP = 1, ISCSI_SCTP_UDP = 2, ISCSI_IWARP_TCP = 3, ISCSI_IWARP_SCTP = 4, ISCSI_INFINIBAND = 5, ISCSI_CXGBIT = 6 } ; 660 enum datain_req_comp_table { DATAIN_COMPLETE_NORMAL = 1, DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY = 2, DATAIN_COMPLETE_CONNECTION_RECOVERY = 3 } ; 666 enum datain_req_rec_table { DATAIN_WITHIN_COMMAND_RECOVERY = 1, DATAIN_CONNECTION_RECOVERY = 2 } ; 671 enum tpg_state_table { TPG_STATE_FREE = 0, TPG_STATE_ACTIVE = 1, TPG_STATE_INACTIVE = 2, TPG_STATE_COLD_RESET = 3 } ; 678 enum tiqn_state_table { TIQN_STATE_ACTIVE = 1, TIQN_STATE_SHUTDOWN = 2 } ; 683 enum cmd_flags_table { ICF_GOT_LAST_DATAOUT = 1, ICF_GOT_DATACK_SNACK = 2, ICF_NON_IMMEDIATE_UNSOLICITED_DATA = 4, ICF_SENT_LAST_R2T = 8, ICF_WITHIN_COMMAND_RECOVERY = 16, ICF_CONTIG_MEMORY = 32, ICF_ATTACHED_TO_RQUEUE = 64, ICF_OOO_CMDSN = 128, ICF_SENDTARGETS_ALL = 256, ICF_SENDTARGETS_SINGLE = 512 } ; 696 enum cmd_i_state_table { ISTATE_NO_STATE = 0, ISTATE_NEW_CMD = 1, ISTATE_DEFERRED_CMD = 2, ISTATE_UNSOLICITED_DATA = 3, ISTATE_RECEIVE_DATAOUT = 4, ISTATE_RECEIVE_DATAOUT_RECOVERY = 5, ISTATE_RECEIVED_LAST_DATAOUT = 6, ISTATE_WITHIN_DATAOUT_RECOVERY = 7, ISTATE_IN_CONNECTION_RECOVERY = 8, ISTATE_RECEIVED_TASKMGT = 9, ISTATE_SEND_ASYNCMSG = 10, ISTATE_SENT_ASYNCMSG = 11, ISTATE_SEND_DATAIN = 12, ISTATE_SEND_LAST_DATAIN = 13, ISTATE_SENT_LAST_DATAIN = 14, ISTATE_SEND_LOGOUTRSP = 15, ISTATE_SENT_LOGOUTRSP = 16, ISTATE_SEND_NOPIN = 17, ISTATE_SENT_NOPIN = 18, ISTATE_SEND_REJECT = 19, ISTATE_SENT_REJECT = 20, ISTATE_SEND_R2T = 21, ISTATE_SENT_R2T = 22, ISTATE_SEND_R2T_RECOVERY = 23, ISTATE_SENT_R2T_RECOVERY = 24, ISTATE_SEND_LAST_R2T = 25, ISTATE_SENT_LAST_R2T = 26, ISTATE_SEND_LAST_R2T_RECOVERY = 27, ISTATE_SENT_LAST_R2T_RECOVERY = 28, ISTATE_SEND_STATUS = 29, ISTATE_SEND_STATUS_BROKEN_PC = 30, ISTATE_SENT_STATUS = 31, ISTATE_SEND_STATUS_RECOVERY = 32, ISTATE_SENT_STATUS_RECOVERY = 33, ISTATE_SEND_TASKMGTRSP = 34, ISTATE_SENT_TASKMGTRSP = 35, ISTATE_SEND_TEXTRSP = 36, ISTATE_SENT_TEXTRSP = 37, ISTATE_SEND_NOPIN_WANT_RESPONSE = 38, ISTATE_SENT_NOPIN_WANT_RESPONSE = 39, ISTATE_SEND_NOPIN_NO_RESPONSE = 40, ISTATE_REMOVE = 41, ISTATE_FREE = 42 } ; 764 enum naf_flags_table { NAF_USERID_SET = 1, NAF_PASSWORD_SET = 2, NAF_USERID_IN_SET = 4, NAF_PASSWORD_IN_SET = 8 } ; 771 enum iscsi_timer_flags_table { ISCSI_TF_RUNNING = 1, ISCSI_TF_STOP = 2, ISCSI_TF_EXPIRED = 4 } ; 777 enum np_flags_table { NPF_IP_NETWORK = 0 } ; 781 enum np_thread_state_table { ISCSI_NP_THREAD_ACTIVE = 1, ISCSI_NP_THREAD_INACTIVE = 2, ISCSI_NP_THREAD_RESET = 3, ISCSI_NP_THREAD_SHUTDOWN = 4, ISCSI_NP_THREAD_EXIT = 5 } ; 789 struct iscsi_conn_ops { u8 HeaderDigest; u8 DataDigest; u32 MaxRecvDataSegmentLength; u32 MaxXmitDataSegmentLength; u32 InitiatorRecvDataSegmentLength; u32 TargetRecvDataSegmentLength; } ; 259 struct iscsi_sess_ops { char InitiatorName[224U]; char InitiatorAlias[256U]; char TargetName[224U]; char TargetAlias[256U]; char TargetAddress[256U]; u16 TargetPortalGroupTag; u16 MaxConnections; u8 InitialR2T; u8 ImmediateData; u32 MaxBurstLength; u32 FirstBurstLength; u16 DefaultTime2Wait; u16 DefaultTime2Retain; u16 MaxOutstandingR2T; u8 DataPDUInOrder; u8 DataSequenceInOrder; u8 ErrorRecoveryLevel; u8 SessionType; u8 RDMAExtensions; } ; 284 struct iscsi_cmd ; 300 struct iscsi_param_list { bool iser; struct list_head param_list; struct list_head extra_response_list; } ; 306 struct iscsi_datain_req { enum datain_req_comp_table dr_complete; int generate_recovery_values; enum datain_req_rec_table recovery; u32 begrun; u32 runlength; u32 data_length; u32 data_offset; u32 data_sn; u32 next_burst_len; u32 read_data_done; u32 seq_send_order; struct list_head cmd_datain_node; } ; 337 struct iscsi_r2t { int seq_complete; int recovery_r2t; int sent_r2t; u32 r2t_sn; u32 offset; u32 targ_xfer_tag; u32 xfer_len; struct list_head r2t_list; } ; 348 struct iscsi_pdu ; 348 struct iscsi_seq ; 348 struct iscsi_tmr_req ; 348 struct iscsi_conn ; 348 struct iscsi_conn_recovery ; 348 struct iscsi_session ; 348 struct iscsi_cmd { enum iscsi_timer_flags_table dataout_timer_flags; u8 dataout_timeout_retries; u8 error_recovery_count; enum cmd_i_state_table deferred_i_state; enum cmd_i_state_table i_state; u8 immediate_cmd; u8 immediate_data; u8 iscsi_opcode; u8 iscsi_response; u8 logout_reason; u8 logout_response; u8 maxcmdsn_inc; u8 unsolicited_data; u8 reject_reason; u16 logout_cid; enum cmd_flags_table cmd_flags; itt_t init_task_tag; u32 targ_xfer_tag; u32 cmd_sn; u32 exp_stat_sn; u32 stat_sn; u32 data_sn; u32 r2t_sn; u32 acked_data_sn; u32 buf_ptr_size; u32 data_crc; u32 outstanding_r2ts; u32 r2t_offset; u32 iov_data_count; u32 orig_iov_data_count; u32 iov_misc_count; u32 pdu_count; u32 pdu_send_order; u32 pdu_start; u32 seq_send_order; u32 seq_count; u32 seq_no; u32 seq_start_offset; u32 seq_end_offset; u32 read_data_done; u32 write_data_done; u32 first_burst_len; u32 next_burst_len; u32 tx_size; void *buf_ptr; void *text_in_ptr; enum dma_data_direction data_direction; unsigned char pdu[52U]; atomic_t immed_queue_count; atomic_t response_queue_count; spinlock_t datain_lock; spinlock_t dataout_timeout_lock; spinlock_t istate_lock; spinlock_t error_lock; spinlock_t r2t_lock; struct list_head datain_list; struct list_head cmd_r2t_list; struct timer_list dataout_timer; struct kvec *iov_data; struct kvec iov_misc[5U]; struct iscsi_pdu *pdu_list; struct iscsi_pdu *pdu_ptr; struct iscsi_seq *seq_list; struct iscsi_seq *seq_ptr; struct iscsi_tmr_req *tmr_req; struct iscsi_conn *conn; struct iscsi_conn_recovery *cr; struct iscsi_session *sess; struct list_head i_conn_node; struct se_cmd se_cmd; unsigned char sense_buffer[98U]; u32 padding; u8 pad_bytes[4U]; struct scatterlist *first_data_sg; u32 first_data_sg_off; u32 kmapped_nents; sense_reason_t sense_reason; } ; 498 struct iscsi_tmr_req { bool task_reassign; u32 exp_data_sn; struct iscsi_cmd *ref_cmd; struct iscsi_conn_recovery *conn_recovery; struct se_tmr_req *se_tmr_req; } ; 506 struct iscsi_login ; 506 struct iscsit_transport ; 506 struct iscsi_login_thread_s ; 506 struct iscsi_portal_group ; 506 struct iscsi_tpg_np ; 506 struct iscsi_conn { wait_queue_head_t queues_wq; u8 auth_complete; u8 conn_state; u8 conn_logout_reason; u8 network_transport; enum iscsi_timer_flags_table nopin_timer_flags; enum iscsi_timer_flags_table nopin_response_timer_flags; u8 which_thread; u16 cid; u16 login_port; int net_size; int login_family; u32 auth_id; u32 conn_flags; itt_t login_itt; u32 exp_statsn; u32 stat_sn; struct __kernel_sockaddr_storage login_sockaddr; struct __kernel_sockaddr_storage local_sockaddr; int conn_usage_count; int conn_waiting_on_uc; atomic_t check_immediate_queue; atomic_t conn_logout_remove; atomic_t connection_exit; atomic_t connection_recovery; atomic_t connection_reinstatement; atomic_t connection_wait_rcfr; atomic_t sleep_on_conn_wait_comp; atomic_t transport_failed; struct completion conn_post_wait_comp; struct completion conn_wait_comp; struct completion conn_wait_rcfr_comp; struct completion conn_waiting_on_uc_comp; struct completion conn_logout_comp; struct completion tx_half_close_comp; struct completion rx_half_close_comp; struct socket *sock; void (*orig_data_ready)(struct sock *); void (*orig_state_change)(struct sock *); unsigned long login_flags; struct delayed_work login_work; struct delayed_work login_cleanup_work; struct iscsi_login *login; struct timer_list nopin_timer; struct timer_list nopin_response_timer; struct timer_list transport_timer; struct task_struct *login_kworker; spinlock_t cmd_lock; spinlock_t conn_usage_lock; spinlock_t immed_queue_lock; spinlock_t nopin_timer_lock; spinlock_t response_queue_lock; spinlock_t state_lock; struct ahash_request *conn_rx_hash; struct ahash_request *conn_tx_hash; cpumask_var_t conn_cpumask; unsigned char conn_rx_reset_cpumask; unsigned char conn_tx_reset_cpumask; struct list_head conn_cmd_list; struct list_head immed_queue_list; struct list_head response_queue_list; struct iscsi_conn_ops *conn_ops; struct iscsi_login *conn_login; struct iscsit_transport *conn_transport; struct iscsi_param_list *param_list; void *auth_protocol; void *context; struct iscsi_login_thread_s *login_thread; struct iscsi_portal_group *tpg; struct iscsi_tpg_np *tpg_np; struct iscsi_session *sess; int bitmap_id; int rx_thread_active; struct task_struct *rx_thread; struct completion rx_login_comp; int tx_thread_active; struct task_struct *tx_thread; struct list_head conn_list; } ; 605 struct iscsi_conn_recovery { u16 cid; u32 cmd_count; u32 maxrecvdatasegmentlength; u32 maxxmitdatasegmentlength; int ready_for_reallegiance; struct list_head conn_recovery_cmd_list; spinlock_t conn_recovery_cmd_lock; struct timer_list time2retain_timer; struct iscsi_session *sess; struct list_head cr_list; } ; 618 struct iscsi_session { u8 initiator_vendor; u8 isid[6U]; enum iscsi_timer_flags_table time2retain_timer_flags; u8 version_active; u16 cid_called; u16 conn_recovery_count; u16 tsih; u32 session_state; itt_t init_task_tag; u32 targ_xfer_tag; u32 cmdsn_window; struct mutex cmdsn_mutex; u32 exp_cmd_sn; atomic_t max_cmd_sn; struct list_head sess_ooo_cmdsn_list; u32 sid; char auth_type[8U]; int session_index; int session_usage_count; int session_waiting_on_uc; atomic_long_t cmd_pdus; atomic_long_t rsp_pdus; atomic_long_t tx_data_octets; atomic_long_t rx_data_octets; atomic_long_t conn_digest_errors; atomic_long_t conn_timeout_errors; u64 creation_time; atomic_t nconn; atomic_t session_continuation; atomic_t session_fall_back_to_erl0; atomic_t session_logout; atomic_t session_reinstatement; atomic_t session_stop_active; atomic_t sleep_on_sess_wait_comp; struct list_head sess_conn_list; struct list_head cr_active_list; struct list_head cr_inactive_list; spinlock_t conn_lock; spinlock_t cr_a_lock; spinlock_t cr_i_lock; spinlock_t session_usage_lock; spinlock_t ttt_lock; struct completion async_msg_comp; struct completion reinstatement_comp; struct completion session_wait_comp; struct completion session_waiting_on_uc_comp; struct timer_list time2retain_timer; struct iscsi_sess_ops *sess_ops; struct se_session *se_sess; struct iscsi_portal_group *tpg; } ; 684 struct iscsi_np ; 684 struct iscsi_login { u8 auth_complete; u8 checked_for_existing; u8 current_stage; u8 leading_connection; u8 first_request; u8 version_min; u8 version_max; u8 login_complete; u8 login_failed; bool zero_tsih; char isid[6U]; u32 cmd_sn; itt_t init_task_tag; u32 initial_exp_statsn; u32 rsp_length; u16 cid; u16 tsih; char req[48U]; char rsp[48U]; char *req_buf; char *rsp_buf; struct iscsi_conn *conn; struct iscsi_np *np; } ; 725 struct iscsi_node_auth { enum naf_flags_table naf_flags; int authenticate_target; int enforce_discovery_auth; char userid[256U]; char password[256U]; char userid_mutual[256U]; char password_mutual[256U]; } ; 17 struct iscsi_sess_err_stats { spinlock_t lock; u32 digest_errors; u32 cxn_timeout_errors; u32 pdu_format_errors; u32 last_sess_failure_type; char last_sess_fail_rem_name[224U]; } ; 33 struct iscsi_login_stats { spinlock_t lock; u32 accepts; u32 other_fails; u32 redirects; u32 authorize_fails; u32 authenticate_fails; u32 negotiate_fails; u64 last_fail_time; u32 last_fail_type; int last_intr_fail_ip_family; struct __kernel_sockaddr_storage last_intr_fail_sockaddr; char last_intr_fail_name[224U]; } ; 56 struct iscsi_logout_stats { spinlock_t lock; u32 normal_logouts; u32 abnormal_logouts; } ; 754 struct iscsi_tpg_attrib { u32 authentication; u32 login_timeout; u32 netif_timeout; u32 generate_node_acls; u32 cache_dynamic_acls; u32 default_cmdsn_depth; u32 demo_mode_write_protect; u32 prod_mode_write_protect; u32 demo_mode_discovery; u32 default_erl; u8 t10_pi; u32 fabric_prot_type; u32 tpg_enabled_sendtargets; struct iscsi_portal_group *tpg; } ; 771 struct iscsi_np { int np_network_transport; int np_ip_proto; int np_sock_type; enum np_thread_state_table np_thread_state; bool enabled; enum iscsi_timer_flags_table np_login_timer_flags; u32 np_exports; enum np_flags_table np_flags; spinlock_t np_thread_lock; struct completion np_restart_comp; struct socket *np_socket; struct __kernel_sockaddr_storage np_sockaddr; struct task_struct *np_thread; struct timer_list np_login_timer; void *np_context; struct iscsit_transport *np_transport; struct list_head np_list; } ; 791 struct iscsi_tpg_np { struct iscsi_np *tpg_np; struct iscsi_portal_group *tpg; struct iscsi_tpg_np *tpg_np_parent; struct list_head tpg_np_list; struct list_head tpg_np_child_list; struct list_head tpg_np_parent_list; struct se_tpg_np se_tpg_np; spinlock_t tpg_np_parent_lock; struct completion tpg_np_comp; struct kref tpg_np_kref; } ; 804 struct iscsi_tiqn ; 804 struct iscsi_portal_group { unsigned char tpg_chap_id; enum tpg_state_table tpg_state; u16 tpgt; u16 ntsih; u32 nsessions; u32 num_tpg_nps; u32 sid; spinlock_t tpg_np_lock; spinlock_t tpg_state_lock; struct se_portal_group tpg_se_tpg; struct mutex tpg_access_lock; struct semaphore np_login_sem; struct iscsi_tpg_attrib tpg_attrib; struct iscsi_node_auth tpg_demo_auth; struct iscsi_param_list *param_list; struct iscsi_tiqn *tpg_tiqn; struct list_head tpg_gnp_list; struct list_head tpg_list; } ; 833 struct iscsi_wwn_stat_grps { struct config_group iscsi_stat_group; struct config_group iscsi_instance_group; struct config_group iscsi_sess_err_group; struct config_group iscsi_tgt_attr_group; struct config_group iscsi_login_stats_group; struct config_group iscsi_logout_stats_group; } ; 842 struct iscsi_tiqn { unsigned char tiqn[224U]; enum tiqn_state_table tiqn_state; int tiqn_access_count; u32 tiqn_active_tpgs; u32 tiqn_ntpgs; u32 tiqn_num_tpg_nps; u32 tiqn_nsessions; struct list_head tiqn_list; struct list_head tiqn_tpg_list; spinlock_t tiqn_state_lock; spinlock_t tiqn_tpg_lock; struct se_wwn tiqn_wwn; struct iscsi_wwn_stat_grps tiqn_stat_grps; int tiqn_index; struct iscsi_sess_err_stats sess_err_stats; struct iscsi_login_stats login_stats; struct iscsi_logout_stats logout_stats; } ; 920 struct iscsit_transport { char name[16U]; int transport_type; bool rdma_shutdown; int priv_size; struct module *owner; struct list_head t_node; int (*iscsit_setup_np)(struct iscsi_np *, struct __kernel_sockaddr_storage *); int (*iscsit_accept_np)(struct iscsi_np *, struct iscsi_conn *); void (*iscsit_free_np)(struct iscsi_np *); void (*iscsit_wait_conn)(struct iscsi_conn *); void (*iscsit_free_conn)(struct iscsi_conn *); int (*iscsit_get_login_rx)(struct iscsi_conn *, struct iscsi_login *); int (*iscsit_put_login_tx)(struct iscsi_conn *, struct iscsi_login *, u32 ); int (*iscsit_immediate_queue)(struct iscsi_conn *, struct iscsi_cmd *, int); int (*iscsit_response_queue)(struct iscsi_conn *, struct iscsi_cmd *, int); int (*iscsit_get_dataout)(struct iscsi_conn *, struct iscsi_cmd *, bool ); int (*iscsit_queue_data_in)(struct iscsi_conn *, struct iscsi_cmd *); int (*iscsit_queue_status)(struct iscsi_conn *, struct iscsi_cmd *); void (*iscsit_aborted_task)(struct iscsi_conn *, struct iscsi_cmd *); int (*iscsit_xmit_pdu)(struct iscsi_conn *, struct iscsi_cmd *, struct iscsi_datain_req *, const void *, u32 ); void (*iscsit_release_cmd)(struct iscsi_conn *, struct iscsi_cmd *); void (*iscsit_get_rx_pdu)(struct iscsi_conn *); int (*iscsit_validate_params)(struct iscsi_conn *); void (*iscsit_get_r2t_ttt)(struct iscsi_conn *, struct iscsi_cmd *, struct iscsi_r2t *); enum target_prot_op (*iscsit_get_sup_prot_ops)(struct iscsi_conn *); } ; 24 struct rdma_rw_reg_ctx { struct ib_sge sge; struct ib_rdma_wr wr; struct ib_reg_wr reg_wr; struct ib_send_wr inv_wr; struct ib_mr *mr; } ; 50 struct __anonstruct_single_641 { struct ib_sge sge; struct ib_rdma_wr wr; } ; 50 struct __anonstruct_map_642 { struct ib_sge *sges; struct ib_rdma_wr *wrs; } ; 50 struct __anonstruct_sig_643 { struct rdma_rw_reg_ctx data; struct rdma_rw_reg_ctx prot; struct ib_send_wr sig_inv_wr; struct ib_mr *sig_mr; struct ib_sge sig_sge; struct ib_sig_handover_wr sig_wr; } ; 50 union __anonunion____missing_field_name_640 { struct __anonstruct_single_641 single; struct __anonstruct_map_642 map; struct rdma_rw_reg_ctx *reg; struct __anonstruct_sig_643 *sig; } ; 50 struct rdma_rw_ctx { u32 nr_ops; u8 type; union __anonunion____missing_field_name_640 __annonCompField154; } ; 87 struct iser_cm_hdr { u8 flags; u8 rsvd[3U]; } ; 58 struct iser_ctrl { u8 flags; u8 rsvd[3U]; __be32 write_stag; __be64 write_va; __be32 read_stag; __be64 read_va; } ; 77 enum isert_desc_type { ISCSI_TX_CONTROL = 0, ISCSI_TX_DATAIN = 1 } ; 82 enum iser_conn_state { ISER_CONN_INIT = 0, ISER_CONN_UP = 1, ISER_CONN_BOUND = 2, ISER_CONN_FULL_FEATURE = 3, ISER_CONN_TERMINATING = 4, ISER_CONN_DOWN = 5 } ; 91 struct iser_rx_desc { struct iser_ctrl iser_header; struct iscsi_hdr iscsi_header; char data[8192U]; u64 dma_addr; struct ib_sge rx_sg; struct ib_cqe rx_cqe; char pad[3988U]; } ; 96 struct iser_tx_desc { struct iser_ctrl iser_header; struct iscsi_hdr iscsi_header; enum isert_desc_type type; u64 dma_addr; struct ib_sge tx_sg[2U]; struct ib_cqe tx_cqe; int num_sge; struct ib_send_wr send_wr; } ; 112 struct isert_conn ; 112 struct isert_cmd { uint32_t read_stag; uint32_t write_stag; uint64_t read_va; uint64_t write_va; uint32_t inv_rkey; u64 pdu_buf_dma; u32 pdu_buf_len; struct isert_conn *conn; struct iscsi_cmd *iscsi_cmd; struct iser_tx_desc tx_desc; struct iser_rx_desc *rx_desc; struct rdma_rw_ctx rw; struct work_struct comp_work; struct scatterlist sg; } ; 134 struct isert_device ; 135 struct isert_conn { enum iser_conn_state state; u32 responder_resources; u32 initiator_depth; bool pi_support; struct iser_rx_desc *login_req_buf; char *login_rsp_buf; u64 login_req_dma; int login_req_len; u64 login_rsp_dma; struct iser_rx_desc *rx_descs; struct ib_recv_wr rx_wr[128U]; struct iscsi_conn *conn; struct list_head node; struct completion login_comp; struct completion login_req_comp; struct iser_tx_desc login_tx_desc; struct rdma_cm_id *cm_id; struct ib_qp *qp; struct isert_device *device; struct mutex mutex; struct kref kref; struct work_struct release_work; bool logout_posted; bool snd_w_inv; } ; 162 struct isert_comp { struct isert_device *device; struct ib_cq *cq; int active_qps; } ; 178 struct isert_device { bool pi_capable; int refcount; struct ib_device *ib_device; struct ib_pd *pd; struct isert_comp *comps; int comps_used; struct list_head dev_node; } ; 188 struct isert_np { struct iscsi_np *np; struct semaphore sem; struct rdma_cm_id *cm_id; struct mutex mutex; struct list_head accepted; struct list_head pending; } ; 1 void * __builtin_memcpy(void *, const void *, unsigned long); 1 long int __builtin_expect(long, long); 218 void __read_once_size(const volatile void *p, void *res, int size); 243 void __write_once_size(volatile void *p, void *res, int size); 33 extern struct module __this_module; 7 __u32 __arch_swab32(__u32 val); 14 __u64 __arch_swab64(__u64 val); 46 __u16 __fswab16(__u16 val); 55 __u32 __fswab32(__u32 val); 64 __u64 __fswab64(__u64 val); 162 int printk(const char *, ...); 257 void dump_stack(); 275 void __pr_err(const char *, ...); 276 void __pr_warn(const char *, ...); 278 void __pr_info(const char *, ...); 3 bool ldv_is_err(const void *ptr); 5 void * ldv_err_ptr(long error); 6 long int ldv_ptr_err(const void *ptr); 8 void ldv_dma_map_page(); 9 void ldv_dma_mapping_error(); 31 void * __memcpy(void *, const void *, size_t ); 56 void * __memset(void *, int, size_t ); 25 void INIT_LIST_HEAD(struct list_head *list); 48 void __list_add(struct list_head *, struct list_head *, struct list_head *); 75 void list_add_tail(struct list_head *new, struct list_head *head); 112 void __list_del_entry(struct list_head *); 113 void list_del(struct list_head *); 143 void list_del_init(struct list_head *entry); 165 void list_move_tail(struct list_head *list, struct list_head *head); 187 int list_empty(const struct list_head *head); 87 void __bad_percpu_size(); 71 void warn_slowpath_null(const char *, const int); 23 unsigned long int __phys_addr(unsigned long); 70 bool __virt_addr_valid(unsigned long); 10 extern struct task_struct *current_task; 12 struct task_struct * get_current(); 114 int __bitmap_weight(const unsigned long *, unsigned int); 311 int bitmap_weight(const unsigned long *src, unsigned int nbits); 37 extern int nr_cpu_ids; 88 extern struct cpumask __cpu_possible_mask; 89 extern struct cpumask __cpu_online_mask; 478 unsigned int cpumask_weight(const struct cpumask *srcp); 16 void __xadd_wrong_size(); 36 void atomic_set(atomic_t *v, int i); 78 bool atomic_sub_and_test(int i, atomic_t *v); 154 int atomic_add_return(int i, atomic_t *v); 23 void * ERR_PTR(long error); 32 long int PTR_ERR(const void *ptr); 41 bool IS_ERR(const void *ptr); 281 void lockdep_init_map(struct lockdep_map *, const char *, struct lock_class_key *, int); 30 void _raw_spin_lock_bh(raw_spinlock_t *); 42 void _raw_spin_unlock_bh(raw_spinlock_t *); 305 void spin_lock_bh(spinlock_t *lock); 350 void spin_unlock_bh(spinlock_t *lock); 72 void __init_waitqueue_head(wait_queue_head_t *, const char *, struct lock_class_key *); 119 void __mutex_init(struct mutex *, const char *, struct lock_class_key *); 138 void mutex_lock_nested(struct mutex *, unsigned int); 174 void mutex_unlock(struct mutex *); 73 void init_completion(struct completion *x); 86 void reinit_completion(struct completion *x); 93 int wait_for_completion_interruptible(struct completion *); 95 unsigned long int wait_for_completion_timeout(struct completion *, unsigned long); 106 void complete(struct completion *); 181 void __init_work(struct work_struct *, int); 353 extern struct workqueue_struct *system_wq; 362 struct workqueue_struct * __alloc_workqueue_key(const char *, unsigned int, int, struct lock_class_key *, const char *, ...); 422 void destroy_workqueue(struct workqueue_struct *); 430 bool queue_work_on(int, struct workqueue_struct *, struct work_struct *); 432 bool queue_delayed_work_on(int, struct workqueue_struct *, struct delayed_work *, unsigned long); 437 void flush_workqueue(struct workqueue_struct *); 470 bool queue_work(struct workqueue_struct *wq, struct work_struct *work); 484 bool queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *dwork, unsigned long delay); 558 void flush_scheduled_work(); 586 bool schedule_delayed_work(struct delayed_work *dwork, unsigned long delay); 31 void kref_init(struct kref *kref); 40 void kref_get(struct kref *kref); 67 int kref_sub(struct kref *kref, unsigned int count, void (*release)(struct kref *)); 96 int kref_put(struct kref *kref, void (*release)(struct kref *)); 82 void sg_assign_page(struct scatterlist *sg, struct page *page); 112 void sg_set_page(struct scatterlist *sg, struct page *page, unsigned int len, unsigned int offset); 136 void sg_set_buf(struct scatterlist *sg, const void *buf, unsigned int buflen); 248 void sg_init_table(struct scatterlist *, unsigned int); 272 size_t sg_copy_from_buffer(struct scatterlist *, unsigned int, const void *, size_t ); 32 void sema_init(struct semaphore *sem, int val); 40 int down_interruptible(struct semaphore *); 44 void up(struct semaphore *); 37 void debug_dma_map_page(struct device *, struct page *, size_t , size_t , int, dma_addr_t , bool ); 42 void debug_dma_mapping_error(struct device *, dma_addr_t ); 44 void debug_dma_unmap_page(struct device *, dma_addr_t , size_t , int, bool ); 59 void debug_dma_sync_single_for_cpu(struct device *, dma_addr_t , size_t , int); 63 void debug_dma_sync_single_for_device(struct device *, dma_addr_t , size_t , int); 131 void kmemcheck_mark_initialized(void *address, unsigned int n); 125 int valid_dma_direction(int dma_direction); 28 extern struct dma_map_ops *dma_ops; 30 struct dma_map_ops * get_dma_ops(struct device *dev); 169 dma_addr_t ldv_dma_map_single_attrs_5(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, unsigned long attrs); 169 dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, unsigned long attrs); 192 void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, unsigned long attrs); 269 void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir); 281 void dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir); 471 int ldv_dma_mapping_error_7(struct device *dev, dma_addr_t dma_addr); 471 int dma_mapping_error(struct device *dev, dma_addr_t dma_addr); 154 void kfree(const void *); 318 void * __kmalloc(size_t , gfp_t ); 466 void * kmalloc(size_t size, gfp_t flags); 564 void * kmalloc_array(size_t n, size_t size, gfp_t flags); 579 void * kcalloc(size_t n, size_t size, gfp_t flags); 622 void * kzalloc(size_t size, gfp_t flags); 7 extern struct net init_net; 568 const char * ib_event_msg(enum ib_event_type ); 836 const char * ib_wc_status_msg(enum ib_wc_status ); 2505 struct ib_pd * ib_alloc_pd(struct ib_device *); 2507 void ib_dealloc_pd(struct ib_pd *); 2678 int ib_destroy_qp(struct ib_qp *); 2712 int ib_post_send(struct ib_qp *qp, struct ib_send_wr *send_wr, struct ib_send_wr **bad_send_wr); 2727 int ib_post_recv(struct ib_qp *qp, struct ib_recv_wr *recv_wr, struct ib_recv_wr **bad_recv_wr); 2734 struct ib_cq * ib_alloc_cq(struct ib_device *, void *, int, int, enum ib_poll_context ); 2736 void ib_free_cq(struct ib_cq *); 2876 int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr); 2890 u64 ib_dma_map_single(struct ib_device *dev, void *cpu_addr, size_t size, enum dma_data_direction direction); 2906 void ib_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size, enum dma_data_direction direction); 3054 void ib_dma_sync_single_for_cpu(struct ib_device *dev, u64 addr, size_t size, enum dma_data_direction dir); 3072 void ib_dma_sync_single_for_device(struct ib_device *dev, u64 addr, size_t size, enum dma_data_direction dir); 3262 int ib_check_mr_status(struct ib_mr *, u32 , struct ib_mr_status *); 3298 void ib_drain_qp(struct ib_qp *); 52 void put_unaligned_be16(u16 val, void *p); 65 const char * rdma_event_msg(enum rdma_cm_event_type ); 172 struct rdma_cm_id * rdma_create_id(struct net *, int (*)(struct rdma_cm_id *, struct rdma_cm_event *), void *, enum rdma_port_space , enum ib_qp_type ); 185 void rdma_destroy_id(struct rdma_cm_id *); 198 int rdma_bind_addr(struct rdma_cm_id *, struct sockaddr *); 230 int rdma_create_qp(struct rdma_cm_id *, struct ib_pd *, struct ib_qp_init_attr *); 285 int rdma_listen(struct rdma_cm_id *, int); 302 int rdma_accept(struct rdma_cm_id *, struct rdma_conn_param *); 317 int rdma_notify(struct rdma_cm_id *, enum ib_event_type ); 322 int rdma_reject(struct rdma_cm_id *, const void *, u8 ); 329 int rdma_disconnect(struct rdma_cm_id *); 147 void target_execute_cmd(struct se_cmd *); 149 int transport_generic_free_cmd(struct se_cmd *, int); 153 int transport_send_check_condition_and_sense(struct se_cmd *, sense_reason_t , int); 156 int target_put_sess_cmd(struct se_cmd *); 157 void target_sess_cmd_list_set_waiting(struct se_session *); 158 void target_wait_for_sess_cmds(struct se_session *); 195 enum dma_data_direction target_reverse_dma_direction(struct se_cmd *se_cmd); 893 struct iscsi_cmd * iscsit_find_cmd_from_itt(struct iscsi_conn *, itt_t ); 36 void * iscsit_priv_cmd(struct iscsi_cmd *cmd); 45 int iscsit_register_transport(struct iscsit_transport *); 46 void iscsit_unregister_transport(struct iscsit_transport *); 53 int iscsit_setup_scsi_cmd(struct iscsi_conn *, struct iscsi_cmd *, unsigned char *); 55 void iscsit_set_unsoliticed_dataout(struct iscsi_cmd *); 56 int iscsit_process_scsi_cmd(struct iscsi_conn *, struct iscsi_cmd *, struct iscsi_scsi_req *); 58 int iscsit_check_dataout_hdr(struct iscsi_conn *, unsigned char *, struct iscsi_cmd **); 60 int iscsit_check_dataout_payload(struct iscsi_cmd *, struct iscsi_data *, bool ); 62 int iscsit_setup_nop_out(struct iscsi_conn *, struct iscsi_cmd *, struct iscsi_nopout *); 64 int iscsit_process_nop_out(struct iscsi_conn *, struct iscsi_cmd *, struct iscsi_nopout *); 66 int iscsit_handle_logout_cmd(struct iscsi_conn *, struct iscsi_cmd *, unsigned char *); 68 int iscsit_handle_task_mgt_cmd(struct iscsi_conn *, struct iscsi_cmd *, unsigned char *); 70 int iscsit_setup_text_cmd(struct iscsi_conn *, struct iscsi_cmd *, struct iscsi_text *); 72 int iscsit_process_text_cmd(struct iscsi_conn *, struct iscsi_cmd *, struct iscsi_text *); 74 void iscsit_build_rsp_pdu(struct iscsi_cmd *, struct iscsi_conn *, bool , struct iscsi_scsi_rsp *); 76 void iscsit_build_nopin_rsp(struct iscsi_cmd *, struct iscsi_conn *, struct iscsi_nopin *, bool ); 78 void iscsit_build_task_mgt_rsp(struct iscsi_cmd *, struct iscsi_conn *, struct iscsi_tm_rsp *); 80 int iscsit_build_text_rsp(struct iscsi_cmd *, struct iscsi_conn *, struct iscsi_text_rsp *, enum iscsit_transport_type ); 83 void iscsit_build_reject(struct iscsi_cmd *, struct iscsi_conn *, struct iscsi_reject *); 85 int iscsit_build_logout_rsp(struct iscsi_cmd *, struct iscsi_conn *, struct iscsi_logout_rsp *); 87 int iscsit_logout_post_handler(struct iscsi_cmd *, struct iscsi_conn *); 107 void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int); 111 void iscsit_stop_dataout_timer(struct iscsi_cmd *); 116 int iscsit_tmr_post_handler(struct iscsi_cmd *, struct iscsi_conn *); 121 struct iscsi_cmd * iscsit_allocate_cmd(struct iscsi_conn *, int); 122 int iscsit_sequence_cmd(struct iscsi_conn *, struct iscsi_cmd *, unsigned char *, __be32 ); 124 void iscsit_release_cmd(struct iscsi_cmd *); 62 int rdma_rw_ctx_init(struct rdma_rw_ctx *, struct ib_qp *, u8 , struct scatterlist *, u32 , u32 , u64 , u32 , enum dma_data_direction ); 65 void rdma_rw_ctx_destroy(struct rdma_rw_ctx *, struct ib_qp *, u8 , struct scatterlist *, u32 , enum dma_data_direction ); 69 int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *, struct ib_qp *, u8 , struct scatterlist *, u32 , struct scatterlist *, u32 , struct ib_sig_attrs *, u64 , u32 , enum dma_data_direction ); 74 void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx *, struct ib_qp *, u8 , struct scatterlist *, u32 , struct scatterlist *, u32 , enum dma_data_direction ); 81 int rdma_rw_ctx_post(struct rdma_rw_ctx *, struct ib_qp *, u8 , struct ib_cqe *, struct ib_send_wr *); 91 struct iser_rx_desc * cqe_to_rx_desc(struct ib_cqe *cqe); 107 struct iser_tx_desc * cqe_to_tx_desc(struct ib_cqe *cqe); 129 struct isert_cmd * tx_desc_to_cmd(struct iser_tx_desc *desc); 42 int isert_debug_level = 0; 46 struct mutex device_list_mutex = { { 1 }, { { { { { 0 } }, 3735899821U, 4294967295U, (void *)-1, { 0, { 0, 0 }, "device_list_mutex.wait_lock", 0, 0UL } } } }, { &(device_list_mutex.wait_list), &(device_list_mutex.wait_list) }, 0, (void *)(&device_list_mutex), { 0, { 0, 0 }, "device_list_mutex", 0, 0UL } }; 47 struct list_head device_list = { &device_list, &device_list }; 48 struct workqueue_struct *isert_comp_wq = 0; 49 struct workqueue_struct *isert_release_wq = 0; 52 int isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd); 54 int isert_login_post_recv(struct isert_conn *isert_conn); 56 int isert_rdma_accept(struct isert_conn *isert_conn); 57 struct rdma_cm_id * isert_setup_id(struct isert_np *isert_np); 59 void isert_release_work(struct work_struct *work); 60 void isert_recv_done(struct ib_cq *cq, struct ib_wc *wc); 61 void isert_send_done(struct ib_cq *cq, struct ib_wc *wc); 62 void isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc); 63 void isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc); 66 bool isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd); 74 void isert_qp_event_callback(struct ib_event *e, void *context); 94 struct isert_comp * isert_comp_get(struct isert_conn *isert_conn); 116 void isert_comp_put(struct isert_comp *comp); 124 struct ib_qp * isert_create_qp(struct isert_conn *isert_conn, struct isert_comp *comp, struct rdma_cm_id *cma_id); 157 int isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id); 176 int isert_alloc_rx_descriptors(struct isert_conn *isert_conn); 224 void isert_free_rx_descriptors(struct isert_conn *isert_conn); 244 void isert_free_comps(struct isert_device *device); 258 int isert_alloc_comps(struct isert_device *device); 301 int isert_create_device_ib_res(struct isert_device *device); 336 void isert_free_device_ib_res(struct isert_device *device); 345 void isert_device_put(struct isert_device *device); 359 struct isert_device * isert_device_get(struct rdma_cm_id *cma_id); 401 void isert_init_conn(struct isert_conn *isert_conn); 413 void isert_free_login_buf(struct isert_conn *isert_conn); 428 int isert_alloc_login_buf(struct isert_conn *isert_conn, struct ib_device *ib_dev); 479 void isert_set_nego_params(struct isert_conn *isert_conn, struct rdma_conn_param *param); 505 int isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event); 573 void isert_connect_release(struct isert_conn *isert_conn); 601 void isert_connected_handler(struct rdma_cm_id *cma_id); 622 void isert_release_kref(struct kref *kref); 634 void isert_put_conn(struct isert_conn *isert_conn); 640 void isert_handle_unbound_conn(struct isert_conn *isert_conn); 670 void isert_conn_terminate(struct isert_conn *isert_conn); 687 int isert_np_cma_handler(struct isert_np *isert_np, enum rdma_cm_event_type event); 714 int isert_disconnected_handler(struct rdma_cm_id *cma_id, enum rdma_cm_event_type event); 742 int isert_connect_error(struct rdma_cm_id *cma_id); 754 int isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event); 795 int isert_post_recvm(struct isert_conn *isert_conn, u32 count); 821 int isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc); 839 int isert_login_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc); 865 void isert_create_send_desc(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, struct iser_tx_desc *tx_desc); 887 int isert_init_tx_hdrs(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc); 914 void isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, struct ib_send_wr *send_wr); 964 int isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, u32 length); 1027 void isert_rx_login_req(struct isert_conn *isert_conn); 1076 struct iscsi_cmd * isert_allocate_cmd(struct iscsi_conn *conn, struct iser_rx_desc *rx_desc); 1096 int isert_handle_scsi_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc, unsigned char *buf); 1164 int isert_handle_iscsi_dataout(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, unsigned char *buf); 1229 int isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc, unsigned char *buf); 1248 int isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc, struct iscsi_text *hdr); 1277 int isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, uint32_t read_stag, uint64_t read_va, uint32_t write_stag, uint64_t write_va); 1362 void isert_print_wc(struct ib_wc *wc, const char *type); 1462 void isert_rdma_rw_ctx_destroy(struct isert_cmd *cmd, struct isert_conn *conn); 1484 void isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err); 1561 void isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev); 1572 void isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd, struct ib_device *ib_dev, bool comp_err); 1587 int isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr); 1631 void isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc); 1660 void isert_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc); 1703 void isert_do_control_comp(struct work_struct *work); 1784 int isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd); 1854 void isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd); 1870 enum target_prot_op isert_get_sup_prot_ops(struct iscsi_conn *conn); 1890 int isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn, bool nopout_response); 1910 int isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn); 1928 int isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn); 1946 int isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn); 1979 int isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn); 2020 void isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs, struct ib_sig_domain *domain); 2041 int isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs); 2074 int isert_rdma_rw_ctx_post(struct isert_cmd *cmd, struct isert_conn *conn, struct ib_cqe *cqe, struct ib_send_wr *chain_wr); 2123 int isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd); 2165 int isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery); 2182 int isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state); 2207 int isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state); 2286 int isert_setup_np(struct iscsi_np *np, struct __kernel_sockaddr_storage *ksockaddr); 2358 int isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login); 2394 void isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn, struct isert_conn *isert_conn); 2407 int isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn); 2453 void isert_free_np(struct iscsi_np *np); 2513 void isert_wait4logout(struct isert_conn *isert_conn); 2527 void isert_wait4cmds(struct iscsi_conn *conn); 2547 void isert_put_unsol_pending_cmds(struct iscsi_conn *conn); 2572 void isert_wait_conn(struct iscsi_conn *conn); 2590 void isert_free_conn(struct iscsi_conn *conn); 2598 void isert_get_rx_pdu(struct iscsi_conn *conn); 2607 struct iscsit_transport iser_target_transport = { { 'I', 'B', '/', 'i', 'S', 'E', 'R', '\x0' }, 5, 1, 448, &__this_module, { 0, 0 }, &isert_setup_np, &isert_accept_np, &isert_free_np, &isert_wait_conn, &isert_free_conn, &isert_get_login_rx, &isert_put_login_tx, &isert_immediate_queue, &isert_response_queue, &isert_get_dataout, &isert_put_datain, &isert_put_response, &isert_aborted_task, 0, 0, &isert_get_rx_pdu, 0, 0, &isert_get_sup_prot_ops }; 2630 int isert_init(); 2661 void isert_exit(); 2694 void ldv_check_final_state(); 2703 void ldv_initialize(); 2706 void ldv_handler_precall(); 2709 int nondet_int(); 2712 int LDV_IN_INTERRUPT = 0; 2715 void ldv_main0_sequence_infinite_withcheck_stateful(); 10 void ldv_error(); 28 bool ldv_is_err_or_null(const void *ptr); 5 int LDV_DMA_MAP_CALLS = 0; return ; } { 2717 struct iscsi_np *var_group1; 2718 struct __kernel_sockaddr_storage *var_group2; 2719 struct iscsi_conn *var_group3; 2720 struct iscsi_login *var_group4; 2721 unsigned int var_isert_put_login_tx_36_p2; 2722 struct iscsi_cmd *var_group5; 2723 int var_isert_immediate_queue_71_p2; 2724 int var_isert_response_queue_72_p2; 2725 _Bool var_isert_get_dataout_70_p2; 2726 int tmp; 2727 int tmp___0; 2728 int tmp___1; 2882 LDV_IN_INTERRUPT = 1; 2891 ldv_initialize() { /* Function call is skipped due to function is undefined */} 2904 ldv_handler_precall() { /* Function call is skipped due to function is undefined */} { 2632 int ret; 2633 struct lock_class_key __key; 2634 const char *__lock_name; 2635 struct workqueue_struct *tmp; 2636 struct lock_class_key __key___0; 2637 const char *__lock_name___0; 2638 int __max1; 2639 int __max2; 2640 unsigned int tmp___0; 2641 struct workqueue_struct *tmp___1; 2642 long tmp___2; 2634 __lock_name = "\"isert_comp_wq\""; 2634 tmp = __alloc_workqueue_key("isert_comp_wq", 18U, 0, &__key, __lock_name) { /* Function call is skipped due to function is undefined */} 2634 isert_comp_wq = tmp; 2642 __lock_name___0 = "\"isert_release_wq\""; 2642 __max1 = 512; { 480 int tmp; { 313 int tmp___0; 315 tmp___0 = __bitmap_weight(src, nbits) { /* Function call is skipped due to function is undefined */} 315 return tmp___0;; } 480 return (unsigned int)tmp;; } 2642 __max2 = (int)(tmp___0 * 4U); 2642 int __CPAchecker_TMP_0; 2642 __CPAchecker_TMP_0 = __max1; 2642 tmp___1 = __alloc_workqueue_key("isert_release_wq", 2U, __CPAchecker_TMP_0, &__key___0, __lock_name___0) { /* Function call is skipped due to function is undefined */} 2642 isert_release_wq = tmp___1; 2644 assume(!(((unsigned long)isert_release_wq) == ((unsigned long)((struct workqueue_struct *)0)))); 2650 iscsit_register_transport(&iser_target_transport) { /* Function call is skipped due to function is undefined */} 2651 tmp___2 = __builtin_expect(isert_debug_level > 1, 0L) { /* Function call is skipped due to function is undefined */} } 2910 goto ldv_68937; 2910 tmp___1 = nondet_int() { /* Function call is skipped due to function is undefined */} 2912 goto ldv_68936; 2911 ldv_68936:; 2913 tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */} 2913 switch (tmp___0); 3194 ldv_handler_precall() { /* Function call is skipped due to function is undefined */} { 1807 struct isert_cmd *isert_cmd; 1808 void *tmp; 1809 struct isert_conn *isert_conn; 1810 struct ib_send_wr *send_wr; 1811 struct iscsi_scsi_rsp *hdr; 1812 struct isert_device *device; 1813 struct ib_device *ib_dev; 1814 struct ib_sge *tx_dsg; 1815 unsigned int padding; 1816 unsigned int pdu_len; 1817 long tmp___0; 1818 int tmp___1; { 38 return ((void *)cmd) + 1U;; } 1807 isert_cmd = (struct isert_cmd *)tmp; 1808 struct isert_conn *__CPAchecker_TMP_0 = (struct isert_conn *)(conn->context); 1808 isert_conn = __CPAchecker_TMP_0; 1809 send_wr = &(isert_cmd->tx_desc.send_wr); 1810 hdr = (struct iscsi_scsi_rsp *)(&(isert_cmd->tx_desc.iscsi_header)); { 868 struct isert_device *device; 869 struct ib_device *ib_dev; 870 long tmp; 869 device = isert_conn->device; 870 ib_dev = device->ib_device; { 3059 unsigned long __CPAchecker_TMP_0 = (unsigned long)(dev->dma_ops); 3059 assume(!(__CPAchecker_TMP_0 != ((unsigned long)((struct ib_dma_mapping_ops *)0)))); { 272 struct dma_map_ops *ops; 273 struct dma_map_ops *tmp; 274 int tmp___0; 275 long tmp___1; { 32 long tmp; 35 tmp = __builtin_expect(((unsigned long)dev) == ((unsigned long)((struct device *)0)), 0L) { /* Function call is skipped due to function is undefined */} 35 assume(tmp != 0L); 36 return dma_ops;; } 273 ops = tmp; { 127 int __CPAchecker_TMP_0; 127 assume(!(dma_direction == 0)); 127 assume(dma_direction == 1); __CPAchecker_TMP_0 = 1; 127 return __CPAchecker_TMP_0;; } 275 tmp___1 = __builtin_expect(tmp___0 == 0, 0L) { /* Function call is skipped due to function is undefined */} 275 assume(!(tmp___1 != 0L)); 276 unsigned long __CPAchecker_TMP_0 = (unsigned long)(ops->sync_single_for_cpu); 276 assume(__CPAchecker_TMP_0 != ((unsigned long)((void (*)(struct device *, dma_addr_t , size_t , enum dma_data_direction ))0))); 277 (*(ops->sync_single_for_cpu))(dev, addr, size, dir); 278 debug_dma_sync_single_for_cpu(dev, addr, size, (int)dir) { /* Function call is skipped due to function is undefined */} 279 return ;; } 3063 return ;; } 875 __memset((void *)(&(tx_desc->iser_header)), 0, 28UL) { /* Function call is skipped due to function is undefined */} 876 tx_desc->iser_header.flags = 16U; 878 tx_desc->num_sge = 1; 881 ((tx_desc->tx_sg)[0]).lkey = device->pd->local_dma_lkey; 882 tmp = __builtin_expect(isert_debug_level > 2, 0L) { /* Function call is skipped due to function is undefined */} } 1814 iscsit_build_rsp_pdu(cmd, conn, 1, hdr) { /* Function call is skipped due to function is undefined */} { 889 struct isert_device *device; 890 struct ib_device *ib_dev; 891 unsigned long long dma_addr; 892 int tmp; 893 long tmp___0; 890 device = isert_conn->device; 891 ib_dev = device->ib_device; { 2893 unsigned long long tmp; 2894 unsigned long long tmp___0; 2894 unsigned long __CPAchecker_TMP_0 = (unsigned long)(dev->dma_ops); 2894 assume(!(__CPAchecker_TMP_0 != ((unsigned long)((struct ib_dma_mapping_ops *)0)))); { 38 unsigned long long tmp; { } 173 struct dma_map_ops *ops; 174 struct dma_map_ops *tmp; 175 unsigned long long addr; 176 int tmp___0; 177 long tmp___1; 178 unsigned long tmp___2; 179 unsigned long tmp___3; { 32 long tmp; 35 tmp = __builtin_expect(((unsigned long)dev) == ((unsigned long)((struct device *)0)), 0L) { /* Function call is skipped due to function is undefined */} 35 assume(tmp != 0L); 36 return dma_ops;; } 174 ops = tmp; { 133 return ;; } { 127 int __CPAchecker_TMP_0; 127 assume(!(dma_direction == 0)); 127 assume(dma_direction == 1); __CPAchecker_TMP_0 = 1; 127 return __CPAchecker_TMP_0;; } 178 tmp___1 = __builtin_expect(tmp___0 == 0, 0L) { /* Function call is skipped due to function is undefined */} 178 assume(!(tmp___1 != 0L)); 179 tmp___2 = __phys_addr((unsigned long)ptr) { /* Function call is skipped due to function is undefined */} 179 addr = (*(ops->map_page))(dev, ((struct page *)-24189255811072L) + (tmp___2 >> 12), ((unsigned long)ptr) & 4095UL, size, dir, attrs); 182 tmp___3 = __phys_addr((unsigned long)ptr) { /* Function call is skipped due to function is undefined */} 182 debug_dma_map_page(dev, ((struct page *)-24189255811072L) + (tmp___3 >> 12), ((unsigned long)ptr) & 4095UL, size, (int)dir, addr, 1) { /* Function call is skipped due to function is undefined */} 185 return addr;; } 2896 return tmp___0;; } { 2878 int tmp; 2879 int tmp___0; 2878 unsigned long __CPAchecker_TMP_0 = (unsigned long)(dev->dma_ops); 2878 assume(!(__CPAchecker_TMP_0 != ((unsigned long)((struct ib_dma_mapping_ops *)0)))); { 53 int tmp; { } 473 struct dma_map_ops *tmp; 474 int tmp___0; 475 struct dma_map_ops *tmp___1; 473 debug_dma_mapping_error(dev, dma_addr) { /* Function call is skipped due to function is undefined */} { 32 long tmp; 35 tmp = __builtin_expect(((unsigned long)dev) == ((unsigned long)((struct device *)0)), 0L) { /* Function call is skipped due to function is undefined */} 35 assume(tmp != 0L); 36 return dma_ops;; } 475 unsigned long __CPAchecker_TMP_0 = (unsigned long)(tmp___1->mapping_error); 475 assume(!(__CPAchecker_TMP_0 != ((unsigned long)((int (*)(struct device *, dma_addr_t ))0)))); 479 return dma_addr == 0ULL;; } 2880 return tmp___0;; } 897 __pr_err("isert: %s: ib_dma_mapping_error() failed\n", "isert_init_tx_hdrs") { /* Function call is skipped due to function is undefined */} } 1822 device = isert_conn->device; 1823 ib_dev = device->ib_device; 1824 tx_dsg = ((struct ib_sge *)(&(isert_cmd->tx_desc.tx_sg))) + 1UL; { 54 unsigned short tmp; { 51 return (__u16 )(((int)((short)(((int)val) << 8))) | ((int)((short)(((int)val) >> 8))));; } 54 *((__be16 *)p) = tmp; 55 return ;; } 1829 cmd->se_cmd.scsi_sense_length = ((unsigned int)(cmd->se_cmd.scsi_sense_length)) + 2U; 1831 padding = ((u32 )(-((int)(cmd->se_cmd.scsi_sense_length)))) & 3U; 1832 (hdr->dlength)[0] = 0U; 1832 (hdr->dlength)[1] = (uint8_t )(((int)(cmd->se_cmd.scsi_sense_length)) >> 8); 1832 (hdr->dlength)[2] = (uint8_t )(cmd->se_cmd.scsi_sense_length); 1833 pdu_len = ((u32 )(cmd->se_cmd.scsi_sense_length)) + padding; { 2893 unsigned long long tmp; 2894 unsigned long long tmp___0; 2894 unsigned long __CPAchecker_TMP_0 = (unsigned long)(dev->dma_ops); 2894 assume(!(__CPAchecker_TMP_0 != ((unsigned long)((struct ib_dma_mapping_ops *)0)))); { 38 unsigned long long tmp; { } 173 struct dma_map_ops *ops; 174 struct dma_map_ops *tmp; 175 unsigned long long addr; 176 int tmp___0; 177 long tmp___1; 178 unsigned long tmp___2; 179 unsigned long tmp___3; { 32 long tmp; 35 tmp = __builtin_expect(((unsigned long)dev) == ((unsigned long)((struct device *)0)), 0L) { /* Function call is skipped due to function is undefined */} 35 assume(tmp != 0L); 36 return dma_ops;; } 174 ops = tmp; { 133 return ;; } { 127 int __CPAchecker_TMP_0; 127 assume(!(dma_direction == 0)); 127 assume(dma_direction == 1); __CPAchecker_TMP_0 = 1; 127 return __CPAchecker_TMP_0;; } 178 tmp___1 = __builtin_expect(tmp___0 == 0, 0L) { /* Function call is skipped due to function is undefined */} 178 assume(!(tmp___1 != 0L)); 179 tmp___2 = __phys_addr((unsigned long)ptr) { /* Function call is skipped due to function is undefined */} 179 addr = (*(ops->map_page))(dev, ((struct page *)-24189255811072L) + (tmp___2 >> 12), ((unsigned long)ptr) & 4095UL, size, dir, attrs); 182 tmp___3 = __phys_addr((unsigned long)ptr) { /* Function call is skipped due to function is undefined */} 182 debug_dma_map_page(dev, ((struct page *)-24189255811072L) + (tmp___3 >> 12), ((unsigned long)ptr) & 4095UL, size, (int)dir, addr, 1) { /* Function call is skipped due to function is undefined */} 185 return addr;; } 2896 return tmp___0;; } 1839 isert_cmd->pdu_buf_len = pdu_len; 1840 tx_dsg->addr = isert_cmd->pdu_buf_dma; 1841 tx_dsg->length = pdu_len; 1842 tx_dsg->lkey = device->pd->local_dma_lkey; 1843 isert_cmd->tx_desc.num_sge = 2; { 917 struct iser_tx_desc *tx_desc; 917 tx_desc = &(isert_cmd->tx_desc); 919 tx_desc->tx_cqe.done = &isert_send_done; 920 send_wr->__annonCompField140.wr_cqe = &(tx_desc->tx_cqe); 922 int __CPAchecker_TMP_0 = (int)(isert_conn->snd_w_inv); 923 send_wr->opcode = 8; 924 send_wr->ex.invalidate_rkey = isert_cmd->inv_rkey; 929 send_wr->sg_list = (struct ib_sge *)(&(tx_desc->tx_sg)); 930 send_wr->num_sge = isert_cmd->tx_desc.num_sge; 931 send_wr->send_flags = 2; } 1848 tmp___0 = __builtin_expect(isert_debug_level > 2, 0L) { /* Function call is skipped due to function is undefined */} { } 1786 struct ib_send_wr *wr_failed; 1787 int ret; { 823 struct ib_recv_wr *rx_wr_failed; 824 struct ib_recv_wr rx_wr; 825 int ret; 826 rx_wr.__annonCompField141.wr_cqe = &(rx_desc->rx_cqe); 827 rx_wr.sg_list = &(rx_desc->rx_sg); 828 rx_wr.num_sge = 1; 829 rx_wr.next = (struct ib_recv_wr *)0; { } 2729 int tmp; 2731 tmp = (*(qp->device->post_recv))(qp, recv_wr, bad_recv_wr); 2731 return tmp;; } { 2714 int tmp; 2716 tmp = (*(qp->device->post_send))(qp, send_wr, bad_send_wr); 2716 return tmp;; } 1798 __pr_err("isert: %s: ib_post_send failed with %d\n", "isert_post_response", ret) { /* Function call is skipped due to function is undefined */} } 3202 goto ldv_68920; 3277 ldv_68920:; 3278 ldv_68937:; 2910 tmp___1 = nondet_int() { /* Function call is skipped due to function is undefined */} 2912 goto ldv_68936; 2911 ldv_68936:; 2913 tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */} 2913 switch (tmp___0); 3194 ldv_handler_precall() { /* Function call is skipped due to function is undefined */} { } 1807 struct isert_cmd *isert_cmd; 1808 void *tmp; 1809 struct isert_conn *isert_conn; 1810 struct ib_send_wr *send_wr; 1811 struct iscsi_scsi_rsp *hdr; 1812 struct isert_device *device; 1813 struct ib_device *ib_dev; 1814 struct ib_sge *tx_dsg; 1815 unsigned int padding; 1816 unsigned int pdu_len; 1817 long tmp___0; 1818 int tmp___1; { 38 return ((void *)cmd) + 1U;; } 1807 isert_cmd = (struct isert_cmd *)tmp; 1808 struct isert_conn *__CPAchecker_TMP_0 = (struct isert_conn *)(conn->context); 1808 isert_conn = __CPAchecker_TMP_0; 1809 send_wr = &(isert_cmd->tx_desc.send_wr); 1810 hdr = (struct iscsi_scsi_rsp *)(&(isert_cmd->tx_desc.iscsi_header)); { 868 struct isert_device *device; 869 struct ib_device *ib_dev; 870 long tmp; 869 device = isert_conn->device; 870 ib_dev = device->ib_device; { 3059 unsigned long __CPAchecker_TMP_0 = (unsigned long)(dev->dma_ops); 3059 assume(!(__CPAchecker_TMP_0 != ((unsigned long)((struct ib_dma_mapping_ops *)0)))); { 272 struct dma_map_ops *ops; 273 struct dma_map_ops *tmp; 274 int tmp___0; 275 long tmp___1; { 32 long tmp; 35 tmp = __builtin_expect(((unsigned long)dev) == ((unsigned long)((struct device *)0)), 0L) { /* Function call is skipped due to function is undefined */} 35 assume(tmp != 0L); 36 return dma_ops;; } 273 ops = tmp; { 127 int __CPAchecker_TMP_0; 127 assume(!(dma_direction == 0)); 127 assume(dma_direction == 1); __CPAchecker_TMP_0 = 1; 127 return __CPAchecker_TMP_0;; } 275 tmp___1 = __builtin_expect(tmp___0 == 0, 0L) { /* Function call is skipped due to function is undefined */} 275 assume(!(tmp___1 != 0L)); 276 unsigned long __CPAchecker_TMP_0 = (unsigned long)(ops->sync_single_for_cpu); 276 assume(__CPAchecker_TMP_0 != ((unsigned long)((void (*)(struct device *, dma_addr_t , size_t , enum dma_data_direction ))0))); 277 (*(ops->sync_single_for_cpu))(dev, addr, size, dir); 278 debug_dma_sync_single_for_cpu(dev, addr, size, (int)dir) { /* Function call is skipped due to function is undefined */} 279 return ;; } 3063 return ;; } 875 __memset((void *)(&(tx_desc->iser_header)), 0, 28UL) { /* Function call is skipped due to function is undefined */} 876 tx_desc->iser_header.flags = 16U; 878 tx_desc->num_sge = 1; 881 ((tx_desc->tx_sg)[0]).lkey = device->pd->local_dma_lkey; 882 tmp = __builtin_expect(isert_debug_level > 2, 0L) { /* Function call is skipped due to function is undefined */} } 1814 iscsit_build_rsp_pdu(cmd, conn, 1, hdr) { /* Function call is skipped due to function is undefined */} { } 889 struct isert_device *device; 890 struct ib_device *ib_dev; 891 unsigned long long dma_addr; 892 int tmp; 893 long tmp___0; 890 device = isert_conn->device; 891 ib_dev = device->ib_device; { } 2893 unsigned long long tmp; 2894 unsigned long long tmp___0; 2894 unsigned long __CPAchecker_TMP_0 = (unsigned long)(dev->dma_ops); 2894 assume(!(__CPAchecker_TMP_0 != ((unsigned long)((struct ib_dma_mapping_ops *)0)))); } | Source code
1 #ifndef _ASM_X86_DMA_MAPPING_H
2 #define _ASM_X86_DMA_MAPPING_H
3
4 /*
5 * IOMMU interface. See Documentation/DMA-API-HOWTO.txt and
6 * Documentation/DMA-API.txt for documentation.
7 */
8
9 #include <linux/kmemcheck.h>
10 #include <linux/scatterlist.h>
11 #include <linux/dma-debug.h>
12 #include <asm/io.h>
13 #include <asm/swiotlb.h>
14 #include <linux/dma-contiguous.h>
15
16 #ifdef CONFIG_ISA
17 # define ISA_DMA_BIT_MASK DMA_BIT_MASK(24)
18 #else
19 # define ISA_DMA_BIT_MASK DMA_BIT_MASK(32)
20 #endif
21
22 #define DMA_ERROR_CODE 0
23
24 extern int iommu_merge;
25 extern struct device x86_dma_fallback_dev;
26 extern int panic_on_overflow;
27
28 extern struct dma_map_ops *dma_ops;
29
30 static inline struct dma_map_ops *get_dma_ops(struct device *dev)
31 {
32 #ifndef CONFIG_X86_DEV_DMA_OPS
33 return dma_ops;
34 #else
35 if (unlikely(!dev) || !dev->archdata.dma_ops)
36 return dma_ops;
37 else
38 return dev->archdata.dma_ops;
39 #endif
40 }
41
42 bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp);
43 #define arch_dma_alloc_attrs arch_dma_alloc_attrs
44
45 #define HAVE_ARCH_DMA_SUPPORTED 1
46 extern int dma_supported(struct device *hwdev, u64 mask);
47
48 extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
49 dma_addr_t *dma_addr, gfp_t flag,
50 unsigned long attrs);
51
52 extern void dma_generic_free_coherent(struct device *dev, size_t size,
53 void *vaddr, dma_addr_t dma_addr,
54 unsigned long attrs);
55
56 #ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */
57 extern bool dma_capable(struct device *dev, dma_addr_t addr, size_t size);
58 extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
59 extern phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
60 #else
61
62 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
63 {
64 if (!dev->dma_mask)
65 return 0;
66
67 return addr + size - 1 <= *dev->dma_mask;
68 }
69
70 static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
71 {
72 return paddr;
73 }
74
75 static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
76 {
77 return daddr;
78 }
79 #endif /* CONFIG_X86_DMA_REMAP */
80
81 static inline void
82 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
83 enum dma_data_direction dir)
84 {
85 flush_write_buffers();
86 }
87
88 static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
89 gfp_t gfp)
90 {
91 unsigned long dma_mask = 0;
92
93 dma_mask = dev->coherent_dma_mask;
94 if (!dma_mask)
95 dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32);
96
97 return dma_mask;
98 }
99
100 static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
101 {
102 unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
103
104 if (dma_mask <= DMA_BIT_MASK(24))
105 gfp |= GFP_DMA;
106 #ifdef CONFIG_X86_64
107 if (dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
108 gfp |= GFP_DMA32;
109 #endif
110 return gfp;
111 }
112
113 #endif 1
2 /*******************************************************************************
3 * This file contains iSCSI extentions for RDMA (iSER) Verbs
4 *
5 * (c) Copyright 2013 Datera, Inc.
6 *
7 * Nicholas A. Bellinger <nab@linux-iscsi.org>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 ****************************************************************************/
19
20 #include <linux/string.h>
21 #include <linux/module.h>
22 #include <linux/scatterlist.h>
23 #include <linux/socket.h>
24 #include <linux/in.h>
25 #include <linux/in6.h>
26 #include <rdma/ib_verbs.h>
27 #include <rdma/rdma_cm.h>
28 #include <target/target_core_base.h>
29 #include <target/target_core_fabric.h>
30 #include <target/iscsi/iscsi_transport.h>
31 #include <linux/semaphore.h>
32
33 #include "ib_isert.h"
34
35 #define ISERT_MAX_CONN 8
36 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
37 #define ISER_MAX_TX_CQ_LEN \
38 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
39 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
40 ISERT_MAX_CONN)
41
42 static int isert_debug_level;
43 module_param_named(debug_level, isert_debug_level, int, 0644);
44 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)");
45
46 static DEFINE_MUTEX(device_list_mutex);
47 static LIST_HEAD(device_list);
48 static struct workqueue_struct *isert_comp_wq;
49 static struct workqueue_struct *isert_release_wq;
50
51 static int
52 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd);
53 static int
54 isert_login_post_recv(struct isert_conn *isert_conn);
55 static int
56 isert_rdma_accept(struct isert_conn *isert_conn);
57 struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
58
59 static void isert_release_work(struct work_struct *work);
60 static void isert_recv_done(struct ib_cq *cq, struct ib_wc *wc);
61 static void isert_send_done(struct ib_cq *cq, struct ib_wc *wc);
62 static void isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc);
63 static void isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc);
64
65 static inline bool
66 isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
67 {
68 return (conn->pi_support &&
69 cmd->prot_op != TARGET_PROT_NORMAL);
70 }
71
72
73 static void
74 isert_qp_event_callback(struct ib_event *e, void *context)
75 {
76 struct isert_conn *isert_conn = context;
77
78 isert_err("%s (%d): conn %p\n",
79 ib_event_msg(e->event), e->event, isert_conn);
80
81 switch (e->event) {
82 case IB_EVENT_COMM_EST:
83 rdma_notify(isert_conn->cm_id, IB_EVENT_COMM_EST);
84 break;
85 case IB_EVENT_QP_LAST_WQE_REACHED:
86 isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED\n");
87 break;
88 default:
89 break;
90 }
91 }
92
93 static struct isert_comp *
94 isert_comp_get(struct isert_conn *isert_conn)
95 {
96 struct isert_device *device = isert_conn->device;
97 struct isert_comp *comp;
98 int i, min = 0;
99
100 mutex_lock(&device_list_mutex);
101 for (i = 0; i < device->comps_used; i++)
102 if (device->comps[i].active_qps <
103 device->comps[min].active_qps)
104 min = i;
105 comp = &device->comps[min];
106 comp->active_qps++;
107 mutex_unlock(&device_list_mutex);
108
109 isert_info("conn %p, using comp %p min_index: %d\n",
110 isert_conn, comp, min);
111
112 return comp;
113 }
114
115 static void
116 isert_comp_put(struct isert_comp *comp)
117 {
118 mutex_lock(&device_list_mutex);
119 comp->active_qps--;
120 mutex_unlock(&device_list_mutex);
121 }
122
123 static struct ib_qp *
124 isert_create_qp(struct isert_conn *isert_conn,
125 struct isert_comp *comp,
126 struct rdma_cm_id *cma_id)
127 {
128 struct isert_device *device = isert_conn->device;
129 struct ib_qp_init_attr attr;
130 int ret;
131
132 memset(&attr, 0, sizeof(struct ib_qp_init_attr));
133 attr.event_handler = isert_qp_event_callback;
134 attr.qp_context = isert_conn;
135 attr.send_cq = comp->cq;
136 attr.recv_cq = comp->cq;
137 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS + 1;
138 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1;
139 attr.cap.max_rdma_ctxs = ISCSI_DEF_XMIT_CMDS_MAX;
140 attr.cap.max_send_sge = device->ib_device->attrs.max_sge;
141 attr.cap.max_recv_sge = 1;
142 attr.sq_sig_type = IB_SIGNAL_REQ_WR;
143 attr.qp_type = IB_QPT_RC;
144 if (device->pi_capable)
145 attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
146
147 ret = rdma_create_qp(cma_id, device->pd, &attr);
148 if (ret) {
149 isert_err("rdma_create_qp failed for cma_id %d\n", ret);
150 return ERR_PTR(ret);
151 }
152
153 return cma_id->qp;
154 }
155
156 static int
157 isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
158 {
159 struct isert_comp *comp;
160 int ret;
161
162 comp = isert_comp_get(isert_conn);
163 isert_conn->qp = isert_create_qp(isert_conn, comp, cma_id);
164 if (IS_ERR(isert_conn->qp)) {
165 ret = PTR_ERR(isert_conn->qp);
166 goto err;
167 }
168
169 return 0;
170 err:
171 isert_comp_put(comp);
172 return ret;
173 }
174
175 static int
176 isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
177 {
178 struct isert_device *device = isert_conn->device;
179 struct ib_device *ib_dev = device->ib_device;
180 struct iser_rx_desc *rx_desc;
181 struct ib_sge *rx_sg;
182 u64 dma_addr;
183 int i, j;
184
185 isert_conn->rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS *
186 sizeof(struct iser_rx_desc), GFP_KERNEL);
187 if (!isert_conn->rx_descs)
188 goto fail;
189
190 rx_desc = isert_conn->rx_descs;
191
192 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
193 dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc,
194 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
195 if (ib_dma_mapping_error(ib_dev, dma_addr))
196 goto dma_map_fail;
197
198 rx_desc->dma_addr = dma_addr;
199
200 rx_sg = &rx_desc->rx_sg;
201 rx_sg->addr = rx_desc->dma_addr;
202 rx_sg->length = ISER_RX_PAYLOAD_SIZE;
203 rx_sg->lkey = device->pd->local_dma_lkey;
204 rx_desc->rx_cqe.done = isert_recv_done;
205 }
206
207 return 0;
208
209 dma_map_fail:
210 rx_desc = isert_conn->rx_descs;
211 for (j = 0; j < i; j++, rx_desc++) {
212 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
213 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
214 }
215 kfree(isert_conn->rx_descs);
216 isert_conn->rx_descs = NULL;
217 fail:
218 isert_err("conn %p failed to allocate rx descriptors\n", isert_conn);
219
220 return -ENOMEM;
221 }
222
223 static void
224 isert_free_rx_descriptors(struct isert_conn *isert_conn)
225 {
226 struct ib_device *ib_dev = isert_conn->device->ib_device;
227 struct iser_rx_desc *rx_desc;
228 int i;
229
230 if (!isert_conn->rx_descs)
231 return;
232
233 rx_desc = isert_conn->rx_descs;
234 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
235 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
236 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
237 }
238
239 kfree(isert_conn->rx_descs);
240 isert_conn->rx_descs = NULL;
241 }
242
243 static void
244 isert_free_comps(struct isert_device *device)
245 {
246 int i;
247
248 for (i = 0; i < device->comps_used; i++) {
249 struct isert_comp *comp = &device->comps[i];
250
251 if (comp->cq)
252 ib_free_cq(comp->cq);
253 }
254 kfree(device->comps);
255 }
256
257 static int
258 isert_alloc_comps(struct isert_device *device)
259 {
260 int i, max_cqe, ret = 0;
261
262 device->comps_used = min(ISERT_MAX_CQ, min_t(int, num_online_cpus(),
263 device->ib_device->num_comp_vectors));
264
265 isert_info("Using %d CQs, %s supports %d vectors support "
266 "pi_capable %d\n",
267 device->comps_used, device->ib_device->name,
268 device->ib_device->num_comp_vectors,
269 device->pi_capable);
270
271 device->comps = kcalloc(device->comps_used, sizeof(struct isert_comp),
272 GFP_KERNEL);
273 if (!device->comps) {
274 isert_err("Unable to allocate completion contexts\n");
275 return -ENOMEM;
276 }
277
278 max_cqe = min(ISER_MAX_CQ_LEN, device->ib_device->attrs.max_cqe);
279
280 for (i = 0; i < device->comps_used; i++) {
281 struct isert_comp *comp = &device->comps[i];
282
283 comp->device = device;
284 comp->cq = ib_alloc_cq(device->ib_device, comp, max_cqe, i,
285 IB_POLL_WORKQUEUE);
286 if (IS_ERR(comp->cq)) {
287 isert_err("Unable to allocate cq\n");
288 ret = PTR_ERR(comp->cq);
289 comp->cq = NULL;
290 goto out_cq;
291 }
292 }
293
294 return 0;
295 out_cq:
296 isert_free_comps(device);
297 return ret;
298 }
299
300 static int
301 isert_create_device_ib_res(struct isert_device *device)
302 {
303 struct ib_device *ib_dev = device->ib_device;
304 int ret;
305
306 isert_dbg("devattr->max_sge: %d\n", ib_dev->attrs.max_sge);
307 isert_dbg("devattr->max_sge_rd: %d\n", ib_dev->attrs.max_sge_rd);
308
309 ret = isert_alloc_comps(device);
310 if (ret)
311 goto out;
312
313 device->pd = ib_alloc_pd(ib_dev);
314 if (IS_ERR(device->pd)) {
315 ret = PTR_ERR(device->pd);
316 isert_err("failed to allocate pd, device %p, ret=%d\n",
317 device, ret);
318 goto out_cq;
319 }
320
321 /* Check signature cap */
322 device->pi_capable = ib_dev->attrs.device_cap_flags &
323 IB_DEVICE_SIGNATURE_HANDOVER ? true : false;
324
325 return 0;
326
327 out_cq:
328 isert_free_comps(device);
329 out:
330 if (ret > 0)
331 ret = -EINVAL;
332 return ret;
333 }
334
335 static void
336 isert_free_device_ib_res(struct isert_device *device)
337 {
338 isert_info("device %p\n", device);
339
340 ib_dealloc_pd(device->pd);
341 isert_free_comps(device);
342 }
343
344 static void
345 isert_device_put(struct isert_device *device)
346 {
347 mutex_lock(&device_list_mutex);
348 device->refcount--;
349 isert_info("device %p refcount %d\n", device, device->refcount);
350 if (!device->refcount) {
351 isert_free_device_ib_res(device);
352 list_del(&device->dev_node);
353 kfree(device);
354 }
355 mutex_unlock(&device_list_mutex);
356 }
357
358 static struct isert_device *
359 isert_device_get(struct rdma_cm_id *cma_id)
360 {
361 struct isert_device *device;
362 int ret;
363
364 mutex_lock(&device_list_mutex);
365 list_for_each_entry(device, &device_list, dev_node) {
366 if (device->ib_device->node_guid == cma_id->device->node_guid) {
367 device->refcount++;
368 isert_info("Found iser device %p refcount %d\n",
369 device, device->refcount);
370 mutex_unlock(&device_list_mutex);
371 return device;
372 }
373 }
374
375 device = kzalloc(sizeof(struct isert_device), GFP_KERNEL);
376 if (!device) {
377 mutex_unlock(&device_list_mutex);
378 return ERR_PTR(-ENOMEM);
379 }
380
381 INIT_LIST_HEAD(&device->dev_node);
382
383 device->ib_device = cma_id->device;
384 ret = isert_create_device_ib_res(device);
385 if (ret) {
386 kfree(device);
387 mutex_unlock(&device_list_mutex);
388 return ERR_PTR(ret);
389 }
390
391 device->refcount++;
392 list_add_tail(&device->dev_node, &device_list);
393 isert_info("Created a new iser device %p refcount %d\n",
394 device, device->refcount);
395 mutex_unlock(&device_list_mutex);
396
397 return device;
398 }
399
400 static void
401 isert_init_conn(struct isert_conn *isert_conn)
402 {
403 isert_conn->state = ISER_CONN_INIT;
404 INIT_LIST_HEAD(&isert_conn->node);
405 init_completion(&isert_conn->login_comp);
406 init_completion(&isert_conn->login_req_comp);
407 kref_init(&isert_conn->kref);
408 mutex_init(&isert_conn->mutex);
409 INIT_WORK(&isert_conn->release_work, isert_release_work);
410 }
411
412 static void
413 isert_free_login_buf(struct isert_conn *isert_conn)
414 {
415 struct ib_device *ib_dev = isert_conn->device->ib_device;
416
417 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
418 ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE);
419 kfree(isert_conn->login_rsp_buf);
420
421 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
422 ISER_RX_PAYLOAD_SIZE,
423 DMA_FROM_DEVICE);
424 kfree(isert_conn->login_req_buf);
425 }
426
427 static int
428 isert_alloc_login_buf(struct isert_conn *isert_conn,
429 struct ib_device *ib_dev)
430 {
431 int ret;
432
433 isert_conn->login_req_buf = kzalloc(sizeof(*isert_conn->login_req_buf),
434 GFP_KERNEL);
435 if (!isert_conn->login_req_buf) {
436 isert_err("Unable to allocate isert_conn->login_buf\n");
437 return -ENOMEM;
438 }
439
440 isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
441 isert_conn->login_req_buf,
442 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
443 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
444 if (ret) {
445 isert_err("login_req_dma mapping error: %d\n", ret);
446 isert_conn->login_req_dma = 0;
447 goto out_free_login_req_buf;
448 }
449
450 isert_conn->login_rsp_buf = kzalloc(ISER_RX_PAYLOAD_SIZE, GFP_KERNEL);
451 if (!isert_conn->login_rsp_buf) {
452 isert_err("Unable to allocate isert_conn->login_rspbuf\n");
453 goto out_unmap_login_req_buf;
454 }
455
456 isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
457 isert_conn->login_rsp_buf,
458 ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE);
459 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
460 if (ret) {
461 isert_err("login_rsp_dma mapping error: %d\n", ret);
462 isert_conn->login_rsp_dma = 0;
463 goto out_free_login_rsp_buf;
464 }
465
466 return 0;
467
468 out_free_login_rsp_buf:
469 kfree(isert_conn->login_rsp_buf);
470 out_unmap_login_req_buf:
471 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
472 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
473 out_free_login_req_buf:
474 kfree(isert_conn->login_req_buf);
475 return ret;
476 }
477
478 static void
479 isert_set_nego_params(struct isert_conn *isert_conn,
480 struct rdma_conn_param *param)
481 {
482 struct ib_device_attr *attr = &isert_conn->device->ib_device->attrs;
483
484 /* Set max inflight RDMA READ requests */
485 isert_conn->initiator_depth = min_t(u8, param->initiator_depth,
486 attr->max_qp_init_rd_atom);
487 isert_dbg("Using initiator_depth: %u\n", isert_conn->initiator_depth);
488
489 if (param->private_data) {
490 u8 flags = *(u8 *)param->private_data;
491
492 /*
493 * use remote invalidation if the both initiator
494 * and the HCA support it
495 */
496 isert_conn->snd_w_inv = !(flags & ISER_SEND_W_INV_NOT_SUP) &&
497 (attr->device_cap_flags &
498 IB_DEVICE_MEM_MGT_EXTENSIONS);
499 if (isert_conn->snd_w_inv)
500 isert_info("Using remote invalidation\n");
501 }
502 }
503
504 static int
505 isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
506 {
507 struct isert_np *isert_np = cma_id->context;
508 struct iscsi_np *np = isert_np->np;
509 struct isert_conn *isert_conn;
510 struct isert_device *device;
511 int ret = 0;
512
513 spin_lock_bh(&np->np_thread_lock);
514 if (!np->enabled) {
515 spin_unlock_bh(&np->np_thread_lock);
516 isert_dbg("iscsi_np is not enabled, reject connect request\n");
517 return rdma_reject(cma_id, NULL, 0);
518 }
519 spin_unlock_bh(&np->np_thread_lock);
520
521 isert_dbg("cma_id: %p, portal: %p\n",
522 cma_id, cma_id->context);
523
524 isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
525 if (!isert_conn)
526 return -ENOMEM;
527
528 isert_init_conn(isert_conn);
529 isert_conn->cm_id = cma_id;
530
531 ret = isert_alloc_login_buf(isert_conn, cma_id->device);
532 if (ret)
533 goto out;
534
535 device = isert_device_get(cma_id);
536 if (IS_ERR(device)) {
537 ret = PTR_ERR(device);
538 goto out_rsp_dma_map;
539 }
540 isert_conn->device = device;
541
542 isert_set_nego_params(isert_conn, &event->param.conn);
543
544 ret = isert_conn_setup_qp(isert_conn, cma_id);
545 if (ret)
546 goto out_conn_dev;
547
548 ret = isert_login_post_recv(isert_conn);
549 if (ret)
550 goto out_conn_dev;
551
552 ret = isert_rdma_accept(isert_conn);
553 if (ret)
554 goto out_conn_dev;
555
556 mutex_lock(&isert_np->mutex);
557 list_add_tail(&isert_conn->node, &isert_np->accepted);
558 mutex_unlock(&isert_np->mutex);
559
560 return 0;
561
562 out_conn_dev:
563 isert_device_put(device);
564 out_rsp_dma_map:
565 isert_free_login_buf(isert_conn);
566 out:
567 kfree(isert_conn);
568 rdma_reject(cma_id, NULL, 0);
569 return ret;
570 }
571
572 static void
573 isert_connect_release(struct isert_conn *isert_conn)
574 {
575 struct isert_device *device = isert_conn->device;
576
577 isert_dbg("conn %p\n", isert_conn);
578
579 BUG_ON(!device);
580
581 isert_free_rx_descriptors(isert_conn);
582 if (isert_conn->cm_id)
583 rdma_destroy_id(isert_conn->cm_id);
584
585 if (isert_conn->qp) {
586 struct isert_comp *comp = isert_conn->qp->recv_cq->cq_context;
587
588 isert_comp_put(comp);
589 ib_destroy_qp(isert_conn->qp);
590 }
591
592 if (isert_conn->login_req_buf)
593 isert_free_login_buf(isert_conn);
594
595 isert_device_put(device);
596
597 kfree(isert_conn);
598 }
599
600 static void
601 isert_connected_handler(struct rdma_cm_id *cma_id)
602 {
603 struct isert_conn *isert_conn = cma_id->qp->qp_context;
604 struct isert_np *isert_np = cma_id->context;
605
606 isert_info("conn %p\n", isert_conn);
607
608 mutex_lock(&isert_conn->mutex);
609 isert_conn->state = ISER_CONN_UP;
610 kref_get(&isert_conn->kref);
611 mutex_unlock(&isert_conn->mutex);
612
613 mutex_lock(&isert_np->mutex);
614 list_move_tail(&isert_conn->node, &isert_np->pending);
615 mutex_unlock(&isert_np->mutex);
616
617 isert_info("np %p: Allow accept_np to continue\n", isert_np);
618 up(&isert_np->sem);
619 }
620
621 static void
622 isert_release_kref(struct kref *kref)
623 {
624 struct isert_conn *isert_conn = container_of(kref,
625 struct isert_conn, kref);
626
627 isert_info("conn %p final kref %s/%d\n", isert_conn, current->comm,
628 current->pid);
629
630 isert_connect_release(isert_conn);
631 }
632
633 static void
634 isert_put_conn(struct isert_conn *isert_conn)
635 {
636 kref_put(&isert_conn->kref, isert_release_kref);
637 }
638
639 static void
640 isert_handle_unbound_conn(struct isert_conn *isert_conn)
641 {
642 struct isert_np *isert_np = isert_conn->cm_id->context;
643
644 mutex_lock(&isert_np->mutex);
645 if (!list_empty(&isert_conn->node)) {
646 /*
647 * This means iscsi doesn't know this connection
648 * so schedule a cleanup ourselves
649 */
650 list_del_init(&isert_conn->node);
651 isert_put_conn(isert_conn);
652 queue_work(isert_release_wq, &isert_conn->release_work);
653 }
654 mutex_unlock(&isert_np->mutex);
655 }
656
657 /**
658 * isert_conn_terminate() - Initiate connection termination
659 * @isert_conn: isert connection struct
660 *
661 * Notes:
662 * In case the connection state is BOUND, move state
663 * to TEMINATING and start teardown sequence (rdma_disconnect).
664 * In case the connection state is UP, complete flush as well.
665 *
666 * This routine must be called with mutex held. Thus it is
667 * safe to call multiple times.
668 */
669 static void
670 isert_conn_terminate(struct isert_conn *isert_conn)
671 {
672 int err;
673
674 if (isert_conn->state >= ISER_CONN_TERMINATING)
675 return;
676
677 isert_info("Terminating conn %p state %d\n",
678 isert_conn, isert_conn->state);
679 isert_conn->state = ISER_CONN_TERMINATING;
680 err = rdma_disconnect(isert_conn->cm_id);
681 if (err)
682 isert_warn("Failed rdma_disconnect isert_conn %p\n",
683 isert_conn);
684 }
685
686 static int
687 isert_np_cma_handler(struct isert_np *isert_np,
688 enum rdma_cm_event_type event)
689 {
690 isert_dbg("%s (%d): isert np %p\n",
691 rdma_event_msg(event), event, isert_np);
692
693 switch (event) {
694 case RDMA_CM_EVENT_DEVICE_REMOVAL:
695 isert_np->cm_id = NULL;
696 break;
697 case RDMA_CM_EVENT_ADDR_CHANGE:
698 isert_np->cm_id = isert_setup_id(isert_np);
699 if (IS_ERR(isert_np->cm_id)) {
700 isert_err("isert np %p setup id failed: %ld\n",
701 isert_np, PTR_ERR(isert_np->cm_id));
702 isert_np->cm_id = NULL;
703 }
704 break;
705 default:
706 isert_err("isert np %p Unexpected event %d\n",
707 isert_np, event);
708 }
709
710 return -1;
711 }
712
713 static int
714 isert_disconnected_handler(struct rdma_cm_id *cma_id,
715 enum rdma_cm_event_type event)
716 {
717 struct isert_conn *isert_conn = cma_id->qp->qp_context;
718
719 mutex_lock(&isert_conn->mutex);
720 switch (isert_conn->state) {
721 case ISER_CONN_TERMINATING:
722 break;
723 case ISER_CONN_UP:
724 isert_conn_terminate(isert_conn);
725 ib_drain_qp(isert_conn->qp);
726 isert_handle_unbound_conn(isert_conn);
727 break;
728 case ISER_CONN_BOUND:
729 case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
730 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
731 break;
732 default:
733 isert_warn("conn %p teminating in state %d\n",
734 isert_conn, isert_conn->state);
735 }
736 mutex_unlock(&isert_conn->mutex);
737
738 return 0;
739 }
740
741 static int
742 isert_connect_error(struct rdma_cm_id *cma_id)
743 {
744 struct isert_conn *isert_conn = cma_id->qp->qp_context;
745
746 list_del_init(&isert_conn->node);
747 isert_conn->cm_id = NULL;
748 isert_put_conn(isert_conn);
749
750 return -1;
751 }
752
753 static int
754 isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
755 {
756 struct isert_np *isert_np = cma_id->context;
757 int ret = 0;
758
759 isert_info("%s (%d): status %d id %p np %p\n",
760 rdma_event_msg(event->event), event->event,
761 event->status, cma_id, cma_id->context);
762
763 if (isert_np->cm_id == cma_id)
764 return isert_np_cma_handler(cma_id->context, event->event);
765
766 switch (event->event) {
767 case RDMA_CM_EVENT_CONNECT_REQUEST:
768 ret = isert_connect_request(cma_id, event);
769 if (ret)
770 isert_err("failed handle connect request %d\n", ret);
771 break;
772 case RDMA_CM_EVENT_ESTABLISHED:
773 isert_connected_handler(cma_id);
774 break;
775 case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */
776 case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */
777 case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
778 case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
779 ret = isert_disconnected_handler(cma_id, event->event);
780 break;
781 case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
782 case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
783 case RDMA_CM_EVENT_CONNECT_ERROR:
784 ret = isert_connect_error(cma_id);
785 break;
786 default:
787 isert_err("Unhandled RDMA CMA event: %d\n", event->event);
788 break;
789 }
790
791 return ret;
792 }
793
794 static int
795 isert_post_recvm(struct isert_conn *isert_conn, u32 count)
796 {
797 struct ib_recv_wr *rx_wr, *rx_wr_failed;
798 int i, ret;
799 struct iser_rx_desc *rx_desc;
800
801 for (rx_wr = isert_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
802 rx_desc = &isert_conn->rx_descs[i];
803
804 rx_wr->wr_cqe = &rx_desc->rx_cqe;
805 rx_wr->sg_list = &rx_desc->rx_sg;
806 rx_wr->num_sge = 1;
807 rx_wr->next = rx_wr + 1;
808 }
809 rx_wr--;
810 rx_wr->next = NULL; /* mark end of work requests list */
811
812 ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr,
813 &rx_wr_failed);
814 if (ret)
815 isert_err("ib_post_recv() failed with ret: %d\n", ret);
816
817 return ret;
818 }
819
820 static int
821 isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc)
822 {
823 struct ib_recv_wr *rx_wr_failed, rx_wr;
824 int ret;
825
826 rx_wr.wr_cqe = &rx_desc->rx_cqe;
827 rx_wr.sg_list = &rx_desc->rx_sg;
828 rx_wr.num_sge = 1;
829 rx_wr.next = NULL;
830
831 ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_failed);
832 if (ret)
833 isert_err("ib_post_recv() failed with ret: %d\n", ret);
834
835 return ret;
836 }
837
838 static int
839 isert_login_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
840 {
841 struct ib_device *ib_dev = isert_conn->cm_id->device;
842 struct ib_send_wr send_wr, *send_wr_failed;
843 int ret;
844
845 ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr,
846 ISER_HEADERS_LEN, DMA_TO_DEVICE);
847
848 tx_desc->tx_cqe.done = isert_login_send_done;
849
850 send_wr.next = NULL;
851 send_wr.wr_cqe = &tx_desc->tx_cqe;
852 send_wr.sg_list = tx_desc->tx_sg;
853 send_wr.num_sge = tx_desc->num_sge;
854 send_wr.opcode = IB_WR_SEND;
855 send_wr.send_flags = IB_SEND_SIGNALED;
856
857 ret = ib_post_send(isert_conn->qp, &send_wr, &send_wr_failed);
858 if (ret)
859 isert_err("ib_post_send() failed, ret: %d\n", ret);
860
861 return ret;
862 }
863
864 static void
865 isert_create_send_desc(struct isert_conn *isert_conn,
866 struct isert_cmd *isert_cmd,
867 struct iser_tx_desc *tx_desc)
868 {
869 struct isert_device *device = isert_conn->device;
870 struct ib_device *ib_dev = device->ib_device;
871
872 ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
873 ISER_HEADERS_LEN, DMA_TO_DEVICE);
874
875 memset(&tx_desc->iser_header, 0, sizeof(struct iser_ctrl));
876 tx_desc->iser_header.flags = ISCSI_CTRL;
877
878 tx_desc->num_sge = 1;
879
880 if (tx_desc->tx_sg[0].lkey != device->pd->local_dma_lkey) {
881 tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey;
882 isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc);
883 }
884 }
885
886 static int
887 isert_init_tx_hdrs(struct isert_conn *isert_conn,
888 struct iser_tx_desc *tx_desc)
889 {
890 struct isert_device *device = isert_conn->device;
891 struct ib_device *ib_dev = device->ib_device;
892 u64 dma_addr;
893
894 dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
895 ISER_HEADERS_LEN, DMA_TO_DEVICE);
896 if (ib_dma_mapping_error(ib_dev, dma_addr)) {
897 isert_err("ib_dma_mapping_error() failed\n");
898 return -ENOMEM;
899 }
900
901 tx_desc->dma_addr = dma_addr;
902 tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
903 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
904 tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey;
905
906 isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n",
907 tx_desc->tx_sg[0].addr, tx_desc->tx_sg[0].length,
908 tx_desc->tx_sg[0].lkey);
909
910 return 0;
911 }
912
913 static void
914 isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
915 struct ib_send_wr *send_wr)
916 {
917 struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
918
919 tx_desc->tx_cqe.done = isert_send_done;
920 send_wr->wr_cqe = &tx_desc->tx_cqe;
921
922 if (isert_conn->snd_w_inv && isert_cmd->inv_rkey) {
923 send_wr->opcode = IB_WR_SEND_WITH_INV;
924 send_wr->ex.invalidate_rkey = isert_cmd->inv_rkey;
925 } else {
926 send_wr->opcode = IB_WR_SEND;
927 }
928
929 send_wr->sg_list = &tx_desc->tx_sg[0];
930 send_wr->num_sge = isert_cmd->tx_desc.num_sge;
931 send_wr->send_flags = IB_SEND_SIGNALED;
932 }
933
934 static int
935 isert_login_post_recv(struct isert_conn *isert_conn)
936 {
937 struct ib_recv_wr rx_wr, *rx_wr_fail;
938 struct ib_sge sge;
939 int ret;
940
941 memset(&sge, 0, sizeof(struct ib_sge));
942 sge.addr = isert_conn->login_req_dma;
943 sge.length = ISER_RX_PAYLOAD_SIZE;
944 sge.lkey = isert_conn->device->pd->local_dma_lkey;
945
946 isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n",
947 sge.addr, sge.length, sge.lkey);
948
949 isert_conn->login_req_buf->rx_cqe.done = isert_login_recv_done;
950
951 memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
952 rx_wr.wr_cqe = &isert_conn->login_req_buf->rx_cqe;
953 rx_wr.sg_list = &sge;
954 rx_wr.num_sge = 1;
955
956 ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_fail);
957 if (ret)
958 isert_err("ib_post_recv() failed: %d\n", ret);
959
960 return ret;
961 }
962
963 static int
964 isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
965 u32 length)
966 {
967 struct isert_conn *isert_conn = conn->context;
968 struct isert_device *device = isert_conn->device;
969 struct ib_device *ib_dev = device->ib_device;
970 struct iser_tx_desc *tx_desc = &isert_conn->login_tx_desc;
971 int ret;
972
973 isert_create_send_desc(isert_conn, NULL, tx_desc);
974
975 memcpy(&tx_desc->iscsi_header, &login->rsp[0],
976 sizeof(struct iscsi_hdr));
977
978 isert_init_tx_hdrs(isert_conn, tx_desc);
979
980 if (length > 0) {
981 struct ib_sge *tx_dsg = &tx_desc->tx_sg[1];
982
983 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma,
984 length, DMA_TO_DEVICE);
985
986 memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length);
987
988 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma,
989 length, DMA_TO_DEVICE);
990
991 tx_dsg->addr = isert_conn->login_rsp_dma;
992 tx_dsg->length = length;
993 tx_dsg->lkey = isert_conn->device->pd->local_dma_lkey;
994 tx_desc->num_sge = 2;
995 }
996 if (!login->login_failed) {
997 if (login->login_complete) {
998 ret = isert_alloc_rx_descriptors(isert_conn);
999 if (ret)
1000 return ret;
1001
1002 ret = isert_post_recvm(isert_conn,
1003 ISERT_QP_MAX_RECV_DTOS);
1004 if (ret)
1005 return ret;
1006
1007 /* Now we are in FULL_FEATURE phase */
1008 mutex_lock(&isert_conn->mutex);
1009 isert_conn->state = ISER_CONN_FULL_FEATURE;
1010 mutex_unlock(&isert_conn->mutex);
1011 goto post_send;
1012 }
1013
1014 ret = isert_login_post_recv(isert_conn);
1015 if (ret)
1016 return ret;
1017 }
1018 post_send:
1019 ret = isert_login_post_send(isert_conn, tx_desc);
1020 if (ret)
1021 return ret;
1022
1023 return 0;
1024 }
1025
1026 static void
1027 isert_rx_login_req(struct isert_conn *isert_conn)
1028 {
1029 struct iser_rx_desc *rx_desc = isert_conn->login_req_buf;
1030 int rx_buflen = isert_conn->login_req_len;
1031 struct iscsi_conn *conn = isert_conn->conn;
1032 struct iscsi_login *login = conn->conn_login;
1033 int size;
1034
1035 isert_info("conn %p\n", isert_conn);
1036
1037 WARN_ON_ONCE(!login);
1038
1039 if (login->first_request) {
1040 struct iscsi_login_req *login_req =
1041 (struct iscsi_login_req *)&rx_desc->iscsi_header;
1042 /*
1043 * Setup the initial iscsi_login values from the leading
1044 * login request PDU.
1045 */
1046 login->leading_connection = (!login_req->tsih) ? 1 : 0;
1047 login->current_stage =
1048 (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK)
1049 >> 2;
1050 login->version_min = login_req->min_version;
1051 login->version_max = login_req->max_version;
1052 memcpy(login->isid, login_req->isid, 6);
1053 login->cmd_sn = be32_to_cpu(login_req->cmdsn);
1054 login->init_task_tag = login_req->itt;
1055 login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
1056 login->cid = be16_to_cpu(login_req->cid);
1057 login->tsih = be16_to_cpu(login_req->tsih);
1058 }
1059
1060 memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
1061
1062 size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
1063 isert_dbg("Using login payload size: %d, rx_buflen: %d "
1064 "MAX_KEY_VALUE_PAIRS: %d\n", size, rx_buflen,
1065 MAX_KEY_VALUE_PAIRS);
1066 memcpy(login->req_buf, &rx_desc->data[0], size);
1067
1068 if (login->first_request) {
1069 complete(&isert_conn->login_comp);
1070 return;
1071 }
1072 schedule_delayed_work(&conn->login_work, 0);
1073 }
1074
1075 static struct iscsi_cmd
1076 *isert_allocate_cmd(struct iscsi_conn *conn, struct iser_rx_desc *rx_desc)
1077 {
1078 struct isert_conn *isert_conn = conn->context;
1079 struct isert_cmd *isert_cmd;
1080 struct iscsi_cmd *cmd;
1081
1082 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
1083 if (!cmd) {
1084 isert_err("Unable to allocate iscsi_cmd + isert_cmd\n");
1085 return NULL;
1086 }
1087 isert_cmd = iscsit_priv_cmd(cmd);
1088 isert_cmd->conn = isert_conn;
1089 isert_cmd->iscsi_cmd = cmd;
1090 isert_cmd->rx_desc = rx_desc;
1091
1092 return cmd;
1093 }
1094
1095 static int
1096 isert_handle_scsi_cmd(struct isert_conn *isert_conn,
1097 struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd,
1098 struct iser_rx_desc *rx_desc, unsigned char *buf)
1099 {
1100 struct iscsi_conn *conn = isert_conn->conn;
1101 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
1102 int imm_data, imm_data_len, unsol_data, sg_nents, rc;
1103 bool dump_payload = false;
1104 unsigned int data_len;
1105
1106 rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
1107 if (rc < 0)
1108 return rc;
1109
1110 imm_data = cmd->immediate_data;
1111 imm_data_len = cmd->first_burst_len;
1112 unsol_data = cmd->unsolicited_data;
1113 data_len = cmd->se_cmd.data_length;
1114
1115 if (imm_data && imm_data_len == data_len)
1116 cmd->se_cmd.se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
1117 rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
1118 if (rc < 0) {
1119 return 0;
1120 } else if (rc > 0) {
1121 dump_payload = true;
1122 goto sequence_cmd;
1123 }
1124
1125 if (!imm_data)
1126 return 0;
1127
1128 if (imm_data_len != data_len) {
1129 sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
1130 sg_copy_from_buffer(cmd->se_cmd.t_data_sg, sg_nents,
1131 &rx_desc->data[0], imm_data_len);
1132 isert_dbg("Copy Immediate sg_nents: %u imm_data_len: %d\n",
1133 sg_nents, imm_data_len);
1134 } else {
1135 sg_init_table(&isert_cmd->sg, 1);
1136 cmd->se_cmd.t_data_sg = &isert_cmd->sg;
1137 cmd->se_cmd.t_data_nents = 1;
1138 sg_set_buf(&isert_cmd->sg, &rx_desc->data[0], imm_data_len);
1139 isert_dbg("Transfer Immediate imm_data_len: %d\n",
1140 imm_data_len);
1141 }
1142
1143 cmd->write_data_done += imm_data_len;
1144
1145 if (cmd->write_data_done == cmd->se_cmd.data_length) {
1146 spin_lock_bh(&cmd->istate_lock);
1147 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1148 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1149 spin_unlock_bh(&cmd->istate_lock);
1150 }
1151
1152 sequence_cmd:
1153 rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
1154
1155 if (!rc && dump_payload == false && unsol_data)
1156 iscsit_set_unsoliticed_dataout(cmd);
1157 else if (dump_payload && imm_data)
1158 target_put_sess_cmd(&cmd->se_cmd);
1159
1160 return 0;
1161 }
1162
1163 static int
1164 isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
1165 struct iser_rx_desc *rx_desc, unsigned char *buf)
1166 {
1167 struct scatterlist *sg_start;
1168 struct iscsi_conn *conn = isert_conn->conn;
1169 struct iscsi_cmd *cmd = NULL;
1170 struct iscsi_data *hdr = (struct iscsi_data *)buf;
1171 u32 unsol_data_len = ntoh24(hdr->dlength);
1172 int rc, sg_nents, sg_off, page_off;
1173
1174 rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
1175 if (rc < 0)
1176 return rc;
1177 else if (!cmd)
1178 return 0;
1179 /*
1180 * FIXME: Unexpected unsolicited_data out
1181 */
1182 if (!cmd->unsolicited_data) {
1183 isert_err("Received unexpected solicited data payload\n");
1184 dump_stack();
1185 return -1;
1186 }
1187
1188 isert_dbg("Unsolicited DataOut unsol_data_len: %u, "
1189 "write_data_done: %u, data_length: %u\n",
1190 unsol_data_len, cmd->write_data_done,
1191 cmd->se_cmd.data_length);
1192
1193 sg_off = cmd->write_data_done / PAGE_SIZE;
1194 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1195 sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE));
1196 page_off = cmd->write_data_done % PAGE_SIZE;
1197 /*
1198 * FIXME: Non page-aligned unsolicited_data out
1199 */
1200 if (page_off) {
1201 isert_err("unexpected non-page aligned data payload\n");
1202 dump_stack();
1203 return -1;
1204 }
1205 isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u "
1206 "sg_nents: %u from %p %u\n", sg_start, sg_off,
1207 sg_nents, &rx_desc->data[0], unsol_data_len);
1208
1209 sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
1210 unsol_data_len);
1211
1212 rc = iscsit_check_dataout_payload(cmd, hdr, false);
1213 if (rc < 0)
1214 return rc;
1215
1216 /*
1217 * multiple data-outs on the same command can arrive -
1218 * so post the buffer before hand
1219 */
1220 rc = isert_post_recv(isert_conn, rx_desc);
1221 if (rc) {
1222 isert_err("ib_post_recv failed with %d\n", rc);
1223 return rc;
1224 }
1225 return 0;
1226 }
1227
1228 static int
1229 isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1230 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1231 unsigned char *buf)
1232 {
1233 struct iscsi_conn *conn = isert_conn->conn;
1234 struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
1235 int rc;
1236
1237 rc = iscsit_setup_nop_out(conn, cmd, hdr);
1238 if (rc < 0)
1239 return rc;
1240 /*
1241 * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload
1242 */
1243
1244 return iscsit_process_nop_out(conn, cmd, hdr);
1245 }
1246
1247 static int
1248 isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1249 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1250 struct iscsi_text *hdr)
1251 {
1252 struct iscsi_conn *conn = isert_conn->conn;
1253 u32 payload_length = ntoh24(hdr->dlength);
1254 int rc;
1255 unsigned char *text_in = NULL;
1256
1257 rc = iscsit_setup_text_cmd(conn, cmd, hdr);
1258 if (rc < 0)
1259 return rc;
1260
1261 if (payload_length) {
1262 text_in = kzalloc(payload_length, GFP_KERNEL);
1263 if (!text_in) {
1264 isert_err("Unable to allocate text_in of payload_length: %u\n",
1265 payload_length);
1266 return -ENOMEM;
1267 }
1268 }
1269 cmd->text_in_ptr = text_in;
1270
1271 memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length);
1272
1273 return iscsit_process_text_cmd(conn, cmd, hdr);
1274 }
1275
1276 static int
1277 isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1278 uint32_t read_stag, uint64_t read_va,
1279 uint32_t write_stag, uint64_t write_va)
1280 {
1281 struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
1282 struct iscsi_conn *conn = isert_conn->conn;
1283 struct iscsi_cmd *cmd;
1284 struct isert_cmd *isert_cmd;
1285 int ret = -EINVAL;
1286 u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
1287
1288 if (conn->sess->sess_ops->SessionType &&
1289 (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) {
1290 isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
1291 " ignoring\n", opcode);
1292 return 0;
1293 }
1294
1295 switch (opcode) {
1296 case ISCSI_OP_SCSI_CMD:
1297 cmd = isert_allocate_cmd(conn, rx_desc);
1298 if (!cmd)
1299 break;
1300
1301 isert_cmd = iscsit_priv_cmd(cmd);
1302 isert_cmd->read_stag = read_stag;
1303 isert_cmd->read_va = read_va;
1304 isert_cmd->write_stag = write_stag;
1305 isert_cmd->write_va = write_va;
1306 isert_cmd->inv_rkey = read_stag ? read_stag : write_stag;
1307
1308 ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, cmd,
1309 rx_desc, (unsigned char *)hdr);
1310 break;
1311 case ISCSI_OP_NOOP_OUT:
1312 cmd = isert_allocate_cmd(conn, rx_desc);
1313 if (!cmd)
1314 break;
1315
1316 isert_cmd = iscsit_priv_cmd(cmd);
1317 ret = isert_handle_nop_out(isert_conn, isert_cmd, cmd,
1318 rx_desc, (unsigned char *)hdr);
1319 break;
1320 case ISCSI_OP_SCSI_DATA_OUT:
1321 ret = isert_handle_iscsi_dataout(isert_conn, rx_desc,
1322 (unsigned char *)hdr);
1323 break;
1324 case ISCSI_OP_SCSI_TMFUNC:
1325 cmd = isert_allocate_cmd(conn, rx_desc);
1326 if (!cmd)
1327 break;
1328
1329 ret = iscsit_handle_task_mgt_cmd(conn, cmd,
1330 (unsigned char *)hdr);
1331 break;
1332 case ISCSI_OP_LOGOUT:
1333 cmd = isert_allocate_cmd(conn, rx_desc);
1334 if (!cmd)
1335 break;
1336
1337 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
1338 break;
1339 case ISCSI_OP_TEXT:
1340 if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF)
1341 cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
1342 else
1343 cmd = isert_allocate_cmd(conn, rx_desc);
1344
1345 if (!cmd)
1346 break;
1347
1348 isert_cmd = iscsit_priv_cmd(cmd);
1349 ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd,
1350 rx_desc, (struct iscsi_text *)hdr);
1351 break;
1352 default:
1353 isert_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
1354 dump_stack();
1355 break;
1356 }
1357
1358 return ret;
1359 }
1360
1361 static void
1362 isert_print_wc(struct ib_wc *wc, const char *type)
1363 {
1364 if (wc->status != IB_WC_WR_FLUSH_ERR)
1365 isert_err("%s failure: %s (%d) vend_err %x\n", type,
1366 ib_wc_status_msg(wc->status), wc->status,
1367 wc->vendor_err);
1368 else
1369 isert_dbg("%s failure: %s (%d)\n", type,
1370 ib_wc_status_msg(wc->status), wc->status);
1371 }
1372
1373 static void
1374 isert_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1375 {
1376 struct isert_conn *isert_conn = wc->qp->qp_context;
1377 struct ib_device *ib_dev = isert_conn->cm_id->device;
1378 struct iser_rx_desc *rx_desc = cqe_to_rx_desc(wc->wr_cqe);
1379 struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
1380 struct iser_ctrl *iser_ctrl = &rx_desc->iser_header;
1381 uint64_t read_va = 0, write_va = 0;
1382 uint32_t read_stag = 0, write_stag = 0;
1383
1384 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1385 isert_print_wc(wc, "recv");
1386 if (wc->status != IB_WC_WR_FLUSH_ERR)
1387 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
1388 return;
1389 }
1390
1391 ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr,
1392 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
1393
1394 isert_dbg("DMA: 0x%llx, iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1395 rx_desc->dma_addr, hdr->opcode, hdr->itt, hdr->flags,
1396 (int)(wc->byte_len - ISER_HEADERS_LEN));
1397
1398 switch (iser_ctrl->flags & 0xF0) {
1399 case ISCSI_CTRL:
1400 if (iser_ctrl->flags & ISER_RSV) {
1401 read_stag = be32_to_cpu(iser_ctrl->read_stag);
1402 read_va = be64_to_cpu(iser_ctrl->read_va);
1403 isert_dbg("ISER_RSV: read_stag: 0x%x read_va: 0x%llx\n",
1404 read_stag, (unsigned long long)read_va);
1405 }
1406 if (iser_ctrl->flags & ISER_WSV) {
1407 write_stag = be32_to_cpu(iser_ctrl->write_stag);
1408 write_va = be64_to_cpu(iser_ctrl->write_va);
1409 isert_dbg("ISER_WSV: write_stag: 0x%x write_va: 0x%llx\n",
1410 write_stag, (unsigned long long)write_va);
1411 }
1412
1413 isert_dbg("ISER ISCSI_CTRL PDU\n");
1414 break;
1415 case ISER_HELLO:
1416 isert_err("iSER Hello message\n");
1417 break;
1418 default:
1419 isert_warn("Unknown iSER hdr flags: 0x%02x\n", iser_ctrl->flags);
1420 break;
1421 }
1422
1423 isert_rx_opcode(isert_conn, rx_desc,
1424 read_stag, read_va, write_stag, write_va);
1425
1426 ib_dma_sync_single_for_device(ib_dev, rx_desc->dma_addr,
1427 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
1428 }
1429
1430 static void
1431 isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1432 {
1433 struct isert_conn *isert_conn = wc->qp->qp_context;
1434 struct ib_device *ib_dev = isert_conn->cm_id->device;
1435
1436 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1437 isert_print_wc(wc, "login recv");
1438 return;
1439 }
1440
1441 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_req_dma,
1442 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
1443
1444 isert_conn->login_req_len = wc->byte_len - ISER_HEADERS_LEN;
1445
1446 if (isert_conn->conn) {
1447 struct iscsi_login *login = isert_conn->conn->conn_login;
1448
1449 if (login && !login->first_request)
1450 isert_rx_login_req(isert_conn);
1451 }
1452
1453 mutex_lock(&isert_conn->mutex);
1454 complete(&isert_conn->login_req_comp);
1455 mutex_unlock(&isert_conn->mutex);
1456
1457 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_req_dma,
1458 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
1459 }
1460
1461 static void
1462 isert_rdma_rw_ctx_destroy(struct isert_cmd *cmd, struct isert_conn *conn)
1463 {
1464 struct se_cmd *se_cmd = &cmd->iscsi_cmd->se_cmd;
1465 enum dma_data_direction dir = target_reverse_dma_direction(se_cmd);
1466
1467 if (!cmd->rw.nr_ops)
1468 return;
1469
1470 if (isert_prot_cmd(conn, se_cmd)) {
1471 rdma_rw_ctx_destroy_signature(&cmd->rw, conn->qp,
1472 conn->cm_id->port_num, se_cmd->t_data_sg,
1473 se_cmd->t_data_nents, se_cmd->t_prot_sg,
1474 se_cmd->t_prot_nents, dir);
1475 } else {
1476 rdma_rw_ctx_destroy(&cmd->rw, conn->qp, conn->cm_id->port_num,
1477 se_cmd->t_data_sg, se_cmd->t_data_nents, dir);
1478 }
1479
1480 cmd->rw.nr_ops = 0;
1481 }
1482
1483 static void
1484 isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
1485 {
1486 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1487 struct isert_conn *isert_conn = isert_cmd->conn;
1488 struct iscsi_conn *conn = isert_conn->conn;
1489 struct iscsi_text_rsp *hdr;
1490
1491 isert_dbg("Cmd %p\n", isert_cmd);
1492
1493 switch (cmd->iscsi_opcode) {
1494 case ISCSI_OP_SCSI_CMD:
1495 spin_lock_bh(&conn->cmd_lock);
1496 if (!list_empty(&cmd->i_conn_node))
1497 list_del_init(&cmd->i_conn_node);
1498 spin_unlock_bh(&conn->cmd_lock);
1499
1500 if (cmd->data_direction == DMA_TO_DEVICE) {
1501 iscsit_stop_dataout_timer(cmd);
1502 /*
1503 * Check for special case during comp_err where
1504 * WRITE_PENDING has been handed off from core,
1505 * but requires an extra target_put_sess_cmd()
1506 * before transport_generic_free_cmd() below.
1507 */
1508 if (comp_err &&
1509 cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
1510 struct se_cmd *se_cmd = &cmd->se_cmd;
1511
1512 target_put_sess_cmd(se_cmd);
1513 }
1514 }
1515
1516 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
1517 transport_generic_free_cmd(&cmd->se_cmd, 0);
1518 break;
1519 case ISCSI_OP_SCSI_TMFUNC:
1520 spin_lock_bh(&conn->cmd_lock);
1521 if (!list_empty(&cmd->i_conn_node))
1522 list_del_init(&cmd->i_conn_node);
1523 spin_unlock_bh(&conn->cmd_lock);
1524
1525 transport_generic_free_cmd(&cmd->se_cmd, 0);
1526 break;
1527 case ISCSI_OP_REJECT:
1528 case ISCSI_OP_NOOP_OUT:
1529 case ISCSI_OP_TEXT:
1530 hdr = (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
1531 /* If the continue bit is on, keep the command alive */
1532 if (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)
1533 break;
1534
1535 spin_lock_bh(&conn->cmd_lock);
1536 if (!list_empty(&cmd->i_conn_node))
1537 list_del_init(&cmd->i_conn_node);
1538 spin_unlock_bh(&conn->cmd_lock);
1539
1540 /*
1541 * Handle special case for REJECT when iscsi_add_reject*() has
1542 * overwritten the original iscsi_opcode assignment, and the
1543 * associated cmd->se_cmd needs to be released.
1544 */
1545 if (cmd->se_cmd.se_tfo != NULL) {
1546 isert_dbg("Calling transport_generic_free_cmd for 0x%02x\n",
1547 cmd->iscsi_opcode);
1548 transport_generic_free_cmd(&cmd->se_cmd, 0);
1549 break;
1550 }
1551 /*
1552 * Fall-through
1553 */
1554 default:
1555 iscsit_release_cmd(cmd);
1556 break;
1557 }
1558 }
1559
1560 static void
1561 isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
1562 {
1563 if (tx_desc->dma_addr != 0) {
1564 isert_dbg("unmap single for tx_desc->dma_addr\n");
1565 ib_dma_unmap_single(ib_dev, tx_desc->dma_addr,
1566 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1567 tx_desc->dma_addr = 0;
1568 }
1569 }
1570
1571 static void
1572 isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
1573 struct ib_device *ib_dev, bool comp_err)
1574 {
1575 if (isert_cmd->pdu_buf_dma != 0) {
1576 isert_dbg("unmap single for isert_cmd->pdu_buf_dma\n");
1577 ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma,
1578 isert_cmd->pdu_buf_len, DMA_TO_DEVICE);
1579 isert_cmd->pdu_buf_dma = 0;
1580 }
1581
1582 isert_unmap_tx_desc(tx_desc, ib_dev);
1583 isert_put_cmd(isert_cmd, comp_err);
1584 }
1585
1586 static int
1587 isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr)
1588 {
1589 struct ib_mr_status mr_status;
1590 int ret;
1591
1592 ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
1593 if (ret) {
1594 isert_err("ib_check_mr_status failed, ret %d\n", ret);
1595 goto fail_mr_status;
1596 }
1597
1598 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
1599 u64 sec_offset_err;
1600 u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8;
1601
1602 switch (mr_status.sig_err.err_type) {
1603 case IB_SIG_BAD_GUARD:
1604 se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
1605 break;
1606 case IB_SIG_BAD_REFTAG:
1607 se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1608 break;
1609 case IB_SIG_BAD_APPTAG:
1610 se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
1611 break;
1612 }
1613 sec_offset_err = mr_status.sig_err.sig_err_offset;
1614 do_div(sec_offset_err, block_size);
1615 se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba;
1616
1617 isert_err("PI error found type %d at sector 0x%llx "
1618 "expected 0x%x vs actual 0x%x\n",
1619 mr_status.sig_err.err_type,
1620 (unsigned long long)se_cmd->bad_sector,
1621 mr_status.sig_err.expected,
1622 mr_status.sig_err.actual);
1623 ret = 1;
1624 }
1625
1626 fail_mr_status:
1627 return ret;
1628 }
1629
1630 static void
1631 isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
1632 {
1633 struct isert_conn *isert_conn = wc->qp->qp_context;
1634 struct isert_device *device = isert_conn->device;
1635 struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe);
1636 struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc);
1637 struct se_cmd *cmd = &isert_cmd->iscsi_cmd->se_cmd;
1638 int ret = 0;
1639
1640 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1641 isert_print_wc(wc, "rdma write");
1642 if (wc->status != IB_WC_WR_FLUSH_ERR)
1643 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
1644 isert_completion_put(desc, isert_cmd, device->ib_device, true);
1645 return;
1646 }
1647
1648 isert_dbg("Cmd %p\n", isert_cmd);
1649
1650 ret = isert_check_pi_status(cmd, isert_cmd->rw.sig->sig_mr);
1651 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
1652
1653 if (ret)
1654 transport_send_check_condition_and_sense(cmd, cmd->pi_err, 0);
1655 else
1656 isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd);
1657 }
1658
1659 static void
1660 isert_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
1661 {
1662 struct isert_conn *isert_conn = wc->qp->qp_context;
1663 struct isert_device *device = isert_conn->device;
1664 struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe);
1665 struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc);
1666 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1667 struct se_cmd *se_cmd = &cmd->se_cmd;
1668 int ret = 0;
1669
1670 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1671 isert_print_wc(wc, "rdma read");
1672 if (wc->status != IB_WC_WR_FLUSH_ERR)
1673 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
1674 isert_completion_put(desc, isert_cmd, device->ib_device, true);
1675 return;
1676 }
1677
1678 isert_dbg("Cmd %p\n", isert_cmd);
1679
1680 iscsit_stop_dataout_timer(cmd);
1681
1682 if (isert_prot_cmd(isert_conn, se_cmd))
1683 ret = isert_check_pi_status(se_cmd, isert_cmd->rw.sig->sig_mr);
1684 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
1685 cmd->write_data_done = 0;
1686
1687 isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
1688 spin_lock_bh(&cmd->istate_lock);
1689 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1690 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1691 spin_unlock_bh(&cmd->istate_lock);
1692
1693 if (ret) {
1694 target_put_sess_cmd(se_cmd);
1695 transport_send_check_condition_and_sense(se_cmd,
1696 se_cmd->pi_err, 0);
1697 } else {
1698 target_execute_cmd(se_cmd);
1699 }
1700 }
1701
1702 static void
1703 isert_do_control_comp(struct work_struct *work)
1704 {
1705 struct isert_cmd *isert_cmd = container_of(work,
1706 struct isert_cmd, comp_work);
1707 struct isert_conn *isert_conn = isert_cmd->conn;
1708 struct ib_device *ib_dev = isert_conn->cm_id->device;
1709 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1710
1711 isert_dbg("Cmd %p i_state %d\n", isert_cmd, cmd->i_state);
1712
1713 switch (cmd->i_state) {
1714 case ISTATE_SEND_TASKMGTRSP:
1715 iscsit_tmr_post_handler(cmd, cmd->conn);
1716 case ISTATE_SEND_REJECT: /* FALLTHRU */
1717 case ISTATE_SEND_TEXTRSP: /* FALLTHRU */
1718 cmd->i_state = ISTATE_SENT_STATUS;
1719 isert_completion_put(&isert_cmd->tx_desc, isert_cmd,
1720 ib_dev, false);
1721 break;
1722 case ISTATE_SEND_LOGOUTRSP:
1723 iscsit_logout_post_handler(cmd, cmd->conn);
1724 break;
1725 default:
1726 isert_err("Unknown i_state %d\n", cmd->i_state);
1727 dump_stack();
1728 break;
1729 }
1730 }
1731
1732 static void
1733 isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc)
1734 {
1735 struct isert_conn *isert_conn = wc->qp->qp_context;
1736 struct ib_device *ib_dev = isert_conn->cm_id->device;
1737 struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe);
1738
1739 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1740 isert_print_wc(wc, "login send");
1741 if (wc->status != IB_WC_WR_FLUSH_ERR)
1742 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
1743 }
1744
1745 isert_unmap_tx_desc(tx_desc, ib_dev);
1746 }
1747
1748 static void
1749 isert_send_done(struct ib_cq *cq, struct ib_wc *wc)
1750 {
1751 struct isert_conn *isert_conn = wc->qp->qp_context;
1752 struct ib_device *ib_dev = isert_conn->cm_id->device;
1753 struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe);
1754 struct isert_cmd *isert_cmd = tx_desc_to_cmd(tx_desc);
1755
1756 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1757 isert_print_wc(wc, "send");
1758 if (wc->status != IB_WC_WR_FLUSH_ERR)
1759 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
1760 isert_completion_put(tx_desc, isert_cmd, ib_dev, true);
1761 return;
1762 }
1763
1764 isert_dbg("Cmd %p\n", isert_cmd);
1765
1766 switch (isert_cmd->iscsi_cmd->i_state) {
1767 case ISTATE_SEND_TASKMGTRSP:
1768 case ISTATE_SEND_LOGOUTRSP:
1769 case ISTATE_SEND_REJECT:
1770 case ISTATE_SEND_TEXTRSP:
1771 isert_unmap_tx_desc(tx_desc, ib_dev);
1772
1773 INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
1774 queue_work(isert_comp_wq, &isert_cmd->comp_work);
1775 return;
1776 default:
1777 isert_cmd->iscsi_cmd->i_state = ISTATE_SENT_STATUS;
1778 isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
1779 break;
1780 }
1781 }
1782
1783 static int
1784 isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
1785 {
1786 struct ib_send_wr *wr_failed;
1787 int ret;
1788
1789 ret = isert_post_recv(isert_conn, isert_cmd->rx_desc);
1790 if (ret) {
1791 isert_err("ib_post_recv failed with %d\n", ret);
1792 return ret;
1793 }
1794
1795 ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr,
1796 &wr_failed);
1797 if (ret) {
1798 isert_err("ib_post_send failed with %d\n", ret);
1799 return ret;
1800 }
1801 return ret;
1802 }
1803
1804 static int
1805 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
1806 {
1807 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1808 struct isert_conn *isert_conn = conn->context;
1809 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1810 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)
1811 &isert_cmd->tx_desc.iscsi_header;
1812
1813 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1814 iscsit_build_rsp_pdu(cmd, conn, true, hdr);
1815 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1816 /*
1817 * Attach SENSE DATA payload to iSCSI Response PDU
1818 */
1819 if (cmd->se_cmd.sense_buffer &&
1820 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
1821 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
1822 struct isert_device *device = isert_conn->device;
1823 struct ib_device *ib_dev = device->ib_device;
1824 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
1825 u32 padding, pdu_len;
1826
1827 put_unaligned_be16(cmd->se_cmd.scsi_sense_length,
1828 cmd->sense_buffer);
1829 cmd->se_cmd.scsi_sense_length += sizeof(__be16);
1830
1831 padding = -(cmd->se_cmd.scsi_sense_length) & 3;
1832 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
1833 pdu_len = cmd->se_cmd.scsi_sense_length + padding;
1834
1835 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
1836 (void *)cmd->sense_buffer, pdu_len,
1837 DMA_TO_DEVICE);
1838
1839 isert_cmd->pdu_buf_len = pdu_len;
1840 tx_dsg->addr = isert_cmd->pdu_buf_dma;
1841 tx_dsg->length = pdu_len;
1842 tx_dsg->lkey = device->pd->local_dma_lkey;
1843 isert_cmd->tx_desc.num_sge = 2;
1844 }
1845
1846 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
1847
1848 isert_dbg("Posting SCSI Response\n");
1849
1850 return isert_post_response(isert_conn, isert_cmd);
1851 }
1852
1853 static void
1854 isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
1855 {
1856 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1857 struct isert_conn *isert_conn = conn->context;
1858
1859 spin_lock_bh(&conn->cmd_lock);
1860 if (!list_empty(&cmd->i_conn_node))
1861 list_del_init(&cmd->i_conn_node);
1862 spin_unlock_bh(&conn->cmd_lock);
1863
1864 if (cmd->data_direction == DMA_TO_DEVICE)
1865 iscsit_stop_dataout_timer(cmd);
1866 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
1867 }
1868
1869 static enum target_prot_op
1870 isert_get_sup_prot_ops(struct iscsi_conn *conn)
1871 {
1872 struct isert_conn *isert_conn = conn->context;
1873 struct isert_device *device = isert_conn->device;
1874
1875 if (conn->tpg->tpg_attrib.t10_pi) {
1876 if (device->pi_capable) {
1877 isert_info("conn %p PI offload enabled\n", isert_conn);
1878 isert_conn->pi_support = true;
1879 return TARGET_PROT_ALL;
1880 }
1881 }
1882
1883 isert_info("conn %p PI offload disabled\n", isert_conn);
1884 isert_conn->pi_support = false;
1885
1886 return TARGET_PROT_NORMAL;
1887 }
1888
1889 static int
1890 isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
1891 bool nopout_response)
1892 {
1893 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1894 struct isert_conn *isert_conn = conn->context;
1895 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1896
1897 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1898 iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *)
1899 &isert_cmd->tx_desc.iscsi_header,
1900 nopout_response);
1901 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1902 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
1903
1904 isert_dbg("conn %p Posting NOPIN Response\n", isert_conn);
1905
1906 return isert_post_response(isert_conn, isert_cmd);
1907 }
1908
1909 static int
1910 isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1911 {
1912 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1913 struct isert_conn *isert_conn = conn->context;
1914 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1915
1916 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1917 iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
1918 &isert_cmd->tx_desc.iscsi_header);
1919 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1920 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
1921
1922 isert_dbg("conn %p Posting Logout Response\n", isert_conn);
1923
1924 return isert_post_response(isert_conn, isert_cmd);
1925 }
1926
1927 static int
1928 isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1929 {
1930 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1931 struct isert_conn *isert_conn = conn->context;
1932 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1933
1934 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1935 iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
1936 &isert_cmd->tx_desc.iscsi_header);
1937 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1938 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
1939
1940 isert_dbg("conn %p Posting Task Management Response\n", isert_conn);
1941
1942 return isert_post_response(isert_conn, isert_cmd);
1943 }
1944
1945 static int
1946 isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1947 {
1948 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1949 struct isert_conn *isert_conn = conn->context;
1950 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1951 struct isert_device *device = isert_conn->device;
1952 struct ib_device *ib_dev = device->ib_device;
1953 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
1954 struct iscsi_reject *hdr =
1955 (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header;
1956
1957 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1958 iscsit_build_reject(cmd, conn, hdr);
1959 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1960
1961 hton24(hdr->dlength, ISCSI_HDR_LEN);
1962 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
1963 (void *)cmd->buf_ptr, ISCSI_HDR_LEN,
1964 DMA_TO_DEVICE);
1965 isert_cmd->pdu_buf_len = ISCSI_HDR_LEN;
1966 tx_dsg->addr = isert_cmd->pdu_buf_dma;
1967 tx_dsg->length = ISCSI_HDR_LEN;
1968 tx_dsg->lkey = device->pd->local_dma_lkey;
1969 isert_cmd->tx_desc.num_sge = 2;
1970
1971 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
1972
1973 isert_dbg("conn %p Posting Reject\n", isert_conn);
1974
1975 return isert_post_response(isert_conn, isert_cmd);
1976 }
1977
1978 static int
1979 isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1980 {
1981 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1982 struct isert_conn *isert_conn = conn->context;
1983 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1984 struct iscsi_text_rsp *hdr =
1985 (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
1986 u32 txt_rsp_len;
1987 int rc;
1988
1989 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1990 rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_INFINIBAND);
1991 if (rc < 0)
1992 return rc;
1993
1994 txt_rsp_len = rc;
1995 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1996
1997 if (txt_rsp_len) {
1998 struct isert_device *device = isert_conn->device;
1999 struct ib_device *ib_dev = device->ib_device;
2000 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
2001 void *txt_rsp_buf = cmd->buf_ptr;
2002
2003 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
2004 txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE);
2005
2006 isert_cmd->pdu_buf_len = txt_rsp_len;
2007 tx_dsg->addr = isert_cmd->pdu_buf_dma;
2008 tx_dsg->length = txt_rsp_len;
2009 tx_dsg->lkey = device->pd->local_dma_lkey;
2010 isert_cmd->tx_desc.num_sge = 2;
2011 }
2012 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2013
2014 isert_dbg("conn %p Text Response\n", isert_conn);
2015
2016 return isert_post_response(isert_conn, isert_cmd);
2017 }
2018
2019 static inline void
2020 isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs,
2021 struct ib_sig_domain *domain)
2022 {
2023 domain->sig_type = IB_SIG_TYPE_T10_DIF;
2024 domain->sig.dif.bg_type = IB_T10DIF_CRC;
2025 domain->sig.dif.pi_interval = se_cmd->se_dev->dev_attrib.block_size;
2026 domain->sig.dif.ref_tag = se_cmd->reftag_seed;
2027 /*
2028 * At the moment we hard code those, but if in the future
2029 * the target core would like to use it, we will take it
2030 * from se_cmd.
2031 */
2032 domain->sig.dif.apptag_check_mask = 0xffff;
2033 domain->sig.dif.app_escape = true;
2034 domain->sig.dif.ref_escape = true;
2035 if (se_cmd->prot_type == TARGET_DIF_TYPE1_PROT ||
2036 se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)
2037 domain->sig.dif.ref_remap = true;
2038 };
2039
2040 static int
2041 isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
2042 {
2043 memset(sig_attrs, 0, sizeof(*sig_attrs));
2044
2045 switch (se_cmd->prot_op) {
2046 case TARGET_PROT_DIN_INSERT:
2047 case TARGET_PROT_DOUT_STRIP:
2048 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
2049 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire);
2050 break;
2051 case TARGET_PROT_DOUT_INSERT:
2052 case TARGET_PROT_DIN_STRIP:
2053 sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
2054 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
2055 break;
2056 case TARGET_PROT_DIN_PASS:
2057 case TARGET_PROT_DOUT_PASS:
2058 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire);
2059 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
2060 break;
2061 default:
2062 isert_err("Unsupported PI operation %d\n", se_cmd->prot_op);
2063 return -EINVAL;
2064 }
2065
2066 sig_attrs->check_mask =
2067 (se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD ? 0xc0 : 0) |
2068 (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) |
2069 (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0);
2070 return 0;
2071 }
2072
2073 static int
2074 isert_rdma_rw_ctx_post(struct isert_cmd *cmd, struct isert_conn *conn,
2075 struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
2076 {
2077 struct se_cmd *se_cmd = &cmd->iscsi_cmd->se_cmd;
2078 enum dma_data_direction dir = target_reverse_dma_direction(se_cmd);
2079 u8 port_num = conn->cm_id->port_num;
2080 u64 addr;
2081 u32 rkey, offset;
2082 int ret;
2083
2084 if (dir == DMA_FROM_DEVICE) {
2085 addr = cmd->write_va;
2086 rkey = cmd->write_stag;
2087 offset = cmd->iscsi_cmd->write_data_done;
2088 } else {
2089 addr = cmd->read_va;
2090 rkey = cmd->read_stag;
2091 offset = 0;
2092 }
2093
2094 if (isert_prot_cmd(conn, se_cmd)) {
2095 struct ib_sig_attrs sig_attrs;
2096
2097 ret = isert_set_sig_attrs(se_cmd, &sig_attrs);
2098 if (ret)
2099 return ret;
2100
2101 WARN_ON_ONCE(offset);
2102 ret = rdma_rw_ctx_signature_init(&cmd->rw, conn->qp, port_num,
2103 se_cmd->t_data_sg, se_cmd->t_data_nents,
2104 se_cmd->t_prot_sg, se_cmd->t_prot_nents,
2105 &sig_attrs, addr, rkey, dir);
2106 } else {
2107 ret = rdma_rw_ctx_init(&cmd->rw, conn->qp, port_num,
2108 se_cmd->t_data_sg, se_cmd->t_data_nents,
2109 offset, addr, rkey, dir);
2110 }
2111 if (ret < 0) {
2112 isert_err("Cmd: %p failed to prepare RDMA res\n", cmd);
2113 return ret;
2114 }
2115
2116 ret = rdma_rw_ctx_post(&cmd->rw, conn->qp, port_num, cqe, chain_wr);
2117 if (ret < 0)
2118 isert_err("Cmd: %p failed to post RDMA res\n", cmd);
2119 return ret;
2120 }
2121
2122 static int
2123 isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2124 {
2125 struct se_cmd *se_cmd = &cmd->se_cmd;
2126 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2127 struct isert_conn *isert_conn = conn->context;
2128 struct ib_cqe *cqe = NULL;
2129 struct ib_send_wr *chain_wr = NULL;
2130 int rc;
2131
2132 isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n",
2133 isert_cmd, se_cmd->data_length);
2134
2135 if (isert_prot_cmd(isert_conn, se_cmd)) {
2136 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_write_done;
2137 cqe = &isert_cmd->tx_desc.tx_cqe;
2138 } else {
2139 /*
2140 * Build isert_conn->tx_desc for iSCSI response PDU and attach
2141 */
2142 isert_create_send_desc(isert_conn, isert_cmd,
2143 &isert_cmd->tx_desc);
2144 iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
2145 &isert_cmd->tx_desc.iscsi_header);
2146 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2147 isert_init_send_wr(isert_conn, isert_cmd,
2148 &isert_cmd->tx_desc.send_wr);
2149
2150 rc = isert_post_recv(isert_conn, isert_cmd->rx_desc);
2151 if (rc) {
2152 isert_err("ib_post_recv failed with %d\n", rc);
2153 return rc;
2154 }
2155
2156 chain_wr = &isert_cmd->tx_desc.send_wr;
2157 }
2158
2159 isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr);
2160 isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n", isert_cmd);
2161 return 1;
2162 }
2163
2164 static int
2165 isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
2166 {
2167 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2168
2169 isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
2170 isert_cmd, cmd->se_cmd.data_length, cmd->write_data_done);
2171
2172 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done;
2173 isert_rdma_rw_ctx_post(isert_cmd, conn->context,
2174 &isert_cmd->tx_desc.tx_cqe, NULL);
2175
2176 isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
2177 isert_cmd);
2178 return 0;
2179 }
2180
2181 static int
2182 isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2183 {
2184 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2185 int ret = 0;
2186
2187 switch (state) {
2188 case ISTATE_REMOVE:
2189 spin_lock_bh(&conn->cmd_lock);
2190 list_del_init(&cmd->i_conn_node);
2191 spin_unlock_bh(&conn->cmd_lock);
2192 isert_put_cmd(isert_cmd, true);
2193 break;
2194 case ISTATE_SEND_NOPIN_WANT_RESPONSE:
2195 ret = isert_put_nopin(cmd, conn, false);
2196 break;
2197 default:
2198 isert_err("Unknown immediate state: 0x%02x\n", state);
2199 ret = -EINVAL;
2200 break;
2201 }
2202
2203 return ret;
2204 }
2205
2206 static int
2207 isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2208 {
2209 struct isert_conn *isert_conn = conn->context;
2210 int ret;
2211
2212 switch (state) {
2213 case ISTATE_SEND_LOGOUTRSP:
2214 ret = isert_put_logout_rsp(cmd, conn);
2215 if (!ret)
2216 isert_conn->logout_posted = true;
2217 break;
2218 case ISTATE_SEND_NOPIN:
2219 ret = isert_put_nopin(cmd, conn, true);
2220 break;
2221 case ISTATE_SEND_TASKMGTRSP:
2222 ret = isert_put_tm_rsp(cmd, conn);
2223 break;
2224 case ISTATE_SEND_REJECT:
2225 ret = isert_put_reject(cmd, conn);
2226 break;
2227 case ISTATE_SEND_TEXTRSP:
2228 ret = isert_put_text_rsp(cmd, conn);
2229 break;
2230 case ISTATE_SEND_STATUS:
2231 /*
2232 * Special case for sending non GOOD SCSI status from TX thread
2233 * context during pre se_cmd excecution failure.
2234 */
2235 ret = isert_put_response(conn, cmd);
2236 break;
2237 default:
2238 isert_err("Unknown response state: 0x%02x\n", state);
2239 ret = -EINVAL;
2240 break;
2241 }
2242
2243 return ret;
2244 }
2245
2246 struct rdma_cm_id *
2247 isert_setup_id(struct isert_np *isert_np)
2248 {
2249 struct iscsi_np *np = isert_np->np;
2250 struct rdma_cm_id *id;
2251 struct sockaddr *sa;
2252 int ret;
2253
2254 sa = (struct sockaddr *)&np->np_sockaddr;
2255 isert_dbg("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa);
2256
2257 id = rdma_create_id(&init_net, isert_cma_handler, isert_np,
2258 RDMA_PS_TCP, IB_QPT_RC);
2259 if (IS_ERR(id)) {
2260 isert_err("rdma_create_id() failed: %ld\n", PTR_ERR(id));
2261 ret = PTR_ERR(id);
2262 goto out;
2263 }
2264 isert_dbg("id %p context %p\n", id, id->context);
2265
2266 ret = rdma_bind_addr(id, sa);
2267 if (ret) {
2268 isert_err("rdma_bind_addr() failed: %d\n", ret);
2269 goto out_id;
2270 }
2271
2272 ret = rdma_listen(id, 0);
2273 if (ret) {
2274 isert_err("rdma_listen() failed: %d\n", ret);
2275 goto out_id;
2276 }
2277
2278 return id;
2279 out_id:
2280 rdma_destroy_id(id);
2281 out:
2282 return ERR_PTR(ret);
2283 }
2284
2285 static int
2286 isert_setup_np(struct iscsi_np *np,
2287 struct sockaddr_storage *ksockaddr)
2288 {
2289 struct isert_np *isert_np;
2290 struct rdma_cm_id *isert_lid;
2291 int ret;
2292
2293 isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
2294 if (!isert_np) {
2295 isert_err("Unable to allocate struct isert_np\n");
2296 return -ENOMEM;
2297 }
2298 sema_init(&isert_np->sem, 0);
2299 mutex_init(&isert_np->mutex);
2300 INIT_LIST_HEAD(&isert_np->accepted);
2301 INIT_LIST_HEAD(&isert_np->pending);
2302 isert_np->np = np;
2303
2304 /*
2305 * Setup the np->np_sockaddr from the passed sockaddr setup
2306 * in iscsi_target_configfs.c code..
2307 */
2308 memcpy(&np->np_sockaddr, ksockaddr,
2309 sizeof(struct sockaddr_storage));
2310
2311 isert_lid = isert_setup_id(isert_np);
2312 if (IS_ERR(isert_lid)) {
2313 ret = PTR_ERR(isert_lid);
2314 goto out;
2315 }
2316
2317 isert_np->cm_id = isert_lid;
2318 np->np_context = isert_np;
2319
2320 return 0;
2321
2322 out:
2323 kfree(isert_np);
2324
2325 return ret;
2326 }
2327
2328 static int
2329 isert_rdma_accept(struct isert_conn *isert_conn)
2330 {
2331 struct rdma_cm_id *cm_id = isert_conn->cm_id;
2332 struct rdma_conn_param cp;
2333 int ret;
2334 struct iser_cm_hdr rsp_hdr;
2335
2336 memset(&cp, 0, sizeof(struct rdma_conn_param));
2337 cp.initiator_depth = isert_conn->initiator_depth;
2338 cp.retry_count = 7;
2339 cp.rnr_retry_count = 7;
2340
2341 memset(&rsp_hdr, 0, sizeof(rsp_hdr));
2342 rsp_hdr.flags = ISERT_ZBVA_NOT_USED;
2343 if (!isert_conn->snd_w_inv)
2344 rsp_hdr.flags = rsp_hdr.flags | ISERT_SEND_W_INV_NOT_USED;
2345 cp.private_data = (void *)&rsp_hdr;
2346 cp.private_data_len = sizeof(rsp_hdr);
2347
2348 ret = rdma_accept(cm_id, &cp);
2349 if (ret) {
2350 isert_err("rdma_accept() failed with: %d\n", ret);
2351 return ret;
2352 }
2353
2354 return 0;
2355 }
2356
2357 static int
2358 isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
2359 {
2360 struct isert_conn *isert_conn = conn->context;
2361 int ret;
2362
2363 isert_info("before login_req comp conn: %p\n", isert_conn);
2364 ret = wait_for_completion_interruptible(&isert_conn->login_req_comp);
2365 if (ret) {
2366 isert_err("isert_conn %p interrupted before got login req\n",
2367 isert_conn);
2368 return ret;
2369 }
2370 reinit_completion(&isert_conn->login_req_comp);
2371
2372 /*
2373 * For login requests after the first PDU, isert_rx_login_req() will
2374 * kick schedule_delayed_work(&conn->login_work) as the packet is
2375 * received, which turns this callback from iscsi_target_do_login_rx()
2376 * into a NOP.
2377 */
2378 if (!login->first_request)
2379 return 0;
2380
2381 isert_rx_login_req(isert_conn);
2382
2383 isert_info("before login_comp conn: %p\n", conn);
2384 ret = wait_for_completion_interruptible(&isert_conn->login_comp);
2385 if (ret)
2386 return ret;
2387
2388 isert_info("processing login->req: %p\n", login->req);
2389
2390 return 0;
2391 }
2392
2393 static void
2394 isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
2395 struct isert_conn *isert_conn)
2396 {
2397 struct rdma_cm_id *cm_id = isert_conn->cm_id;
2398 struct rdma_route *cm_route = &cm_id->route;
2399
2400 conn->login_family = np->np_sockaddr.ss_family;
2401
2402 conn->login_sockaddr = cm_route->addr.dst_addr;
2403 conn->local_sockaddr = cm_route->addr.src_addr;
2404 }
2405
2406 static int
2407 isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
2408 {
2409 struct isert_np *isert_np = np->np_context;
2410 struct isert_conn *isert_conn;
2411 int ret;
2412
2413 accept_wait:
2414 ret = down_interruptible(&isert_np->sem);
2415 if (ret)
2416 return -ENODEV;
2417
2418 spin_lock_bh(&np->np_thread_lock);
2419 if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
2420 spin_unlock_bh(&np->np_thread_lock);
2421 isert_dbg("np_thread_state %d\n",
2422 np->np_thread_state);
2423 /**
2424 * No point in stalling here when np_thread
2425 * is in state RESET/SHUTDOWN/EXIT - bail
2426 **/
2427 return -ENODEV;
2428 }
2429 spin_unlock_bh(&np->np_thread_lock);
2430
2431 mutex_lock(&isert_np->mutex);
2432 if (list_empty(&isert_np->pending)) {
2433 mutex_unlock(&isert_np->mutex);
2434 goto accept_wait;
2435 }
2436 isert_conn = list_first_entry(&isert_np->pending,
2437 struct isert_conn, node);
2438 list_del_init(&isert_conn->node);
2439 mutex_unlock(&isert_np->mutex);
2440
2441 conn->context = isert_conn;
2442 isert_conn->conn = conn;
2443 isert_conn->state = ISER_CONN_BOUND;
2444
2445 isert_set_conn_info(np, conn, isert_conn);
2446
2447 isert_dbg("Processing isert_conn: %p\n", isert_conn);
2448
2449 return 0;
2450 }
2451
2452 static void
2453 isert_free_np(struct iscsi_np *np)
2454 {
2455 struct isert_np *isert_np = np->np_context;
2456 struct isert_conn *isert_conn, *n;
2457
2458 if (isert_np->cm_id)
2459 rdma_destroy_id(isert_np->cm_id);
2460
2461 /*
2462 * FIXME: At this point we don't have a good way to insure
2463 * that at this point we don't have hanging connections that
2464 * completed RDMA establishment but didn't start iscsi login
2465 * process. So work-around this by cleaning up what ever piled
2466 * up in accepted and pending lists.
2467 */
2468 mutex_lock(&isert_np->mutex);
2469 if (!list_empty(&isert_np->pending)) {
2470 isert_info("Still have isert pending connections\n");
2471 list_for_each_entry_safe(isert_conn, n,
2472 &isert_np->pending,
2473 node) {
2474 isert_info("cleaning isert_conn %p state (%d)\n",
2475 isert_conn, isert_conn->state);
2476 isert_connect_release(isert_conn);
2477 }
2478 }
2479
2480 if (!list_empty(&isert_np->accepted)) {
2481 isert_info("Still have isert accepted connections\n");
2482 list_for_each_entry_safe(isert_conn, n,
2483 &isert_np->accepted,
2484 node) {
2485 isert_info("cleaning isert_conn %p state (%d)\n",
2486 isert_conn, isert_conn->state);
2487 isert_connect_release(isert_conn);
2488 }
2489 }
2490 mutex_unlock(&isert_np->mutex);
2491
2492 np->np_context = NULL;
2493 kfree(isert_np);
2494 }
2495
2496 static void isert_release_work(struct work_struct *work)
2497 {
2498 struct isert_conn *isert_conn = container_of(work,
2499 struct isert_conn,
2500 release_work);
2501
2502 isert_info("Starting release conn %p\n", isert_conn);
2503
2504 mutex_lock(&isert_conn->mutex);
2505 isert_conn->state = ISER_CONN_DOWN;
2506 mutex_unlock(&isert_conn->mutex);
2507
2508 isert_info("Destroying conn %p\n", isert_conn);
2509 isert_put_conn(isert_conn);
2510 }
2511
2512 static void
2513 isert_wait4logout(struct isert_conn *isert_conn)
2514 {
2515 struct iscsi_conn *conn = isert_conn->conn;
2516
2517 isert_info("conn %p\n", isert_conn);
2518
2519 if (isert_conn->logout_posted) {
2520 isert_info("conn %p wait for conn_logout_comp\n", isert_conn);
2521 wait_for_completion_timeout(&conn->conn_logout_comp,
2522 SECONDS_FOR_LOGOUT_COMP * HZ);
2523 }
2524 }
2525
2526 static void
2527 isert_wait4cmds(struct iscsi_conn *conn)
2528 {
2529 isert_info("iscsi_conn %p\n", conn);
2530
2531 if (conn->sess) {
2532 target_sess_cmd_list_set_waiting(conn->sess->se_sess);
2533 target_wait_for_sess_cmds(conn->sess->se_sess);
2534 }
2535 }
2536
2537 /**
2538 * isert_put_unsol_pending_cmds() - Drop commands waiting for
2539 * unsolicitate dataout
2540 * @conn: iscsi connection
2541 *
2542 * We might still have commands that are waiting for unsolicited
2543 * dataouts messages. We must put the extra reference on those
2544 * before blocking on the target_wait_for_session_cmds
2545 */
2546 static void
2547 isert_put_unsol_pending_cmds(struct iscsi_conn *conn)
2548 {
2549 struct iscsi_cmd *cmd, *tmp;
2550 static LIST_HEAD(drop_cmd_list);
2551
2552 spin_lock_bh(&conn->cmd_lock);
2553 list_for_each_entry_safe(cmd, tmp, &conn->conn_cmd_list, i_conn_node) {
2554 if ((cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA) &&
2555 (cmd->write_data_done < conn->sess->sess_ops->FirstBurstLength) &&
2556 (cmd->write_data_done < cmd->se_cmd.data_length))
2557 list_move_tail(&cmd->i_conn_node, &drop_cmd_list);
2558 }
2559 spin_unlock_bh(&conn->cmd_lock);
2560
2561 list_for_each_entry_safe(cmd, tmp, &drop_cmd_list, i_conn_node) {
2562 list_del_init(&cmd->i_conn_node);
2563 if (cmd->i_state != ISTATE_REMOVE) {
2564 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2565
2566 isert_info("conn %p dropping cmd %p\n", conn, cmd);
2567 isert_put_cmd(isert_cmd, true);
2568 }
2569 }
2570 }
2571
2572 static void isert_wait_conn(struct iscsi_conn *conn)
2573 {
2574 struct isert_conn *isert_conn = conn->context;
2575
2576 isert_info("Starting conn %p\n", isert_conn);
2577
2578 mutex_lock(&isert_conn->mutex);
2579 isert_conn_terminate(isert_conn);
2580 mutex_unlock(&isert_conn->mutex);
2581
2582 ib_drain_qp(isert_conn->qp);
2583 isert_put_unsol_pending_cmds(conn);
2584 isert_wait4cmds(conn);
2585 isert_wait4logout(isert_conn);
2586
2587 queue_work(isert_release_wq, &isert_conn->release_work);
2588 }
2589
2590 static void isert_free_conn(struct iscsi_conn *conn)
2591 {
2592 struct isert_conn *isert_conn = conn->context;
2593
2594 ib_drain_qp(isert_conn->qp);
2595 isert_put_conn(isert_conn);
2596 }
2597
2598 static void isert_get_rx_pdu(struct iscsi_conn *conn)
2599 {
2600 struct completion comp;
2601
2602 init_completion(&comp);
2603
2604 wait_for_completion_interruptible(&comp);
2605 }
2606
2607 static struct iscsit_transport iser_target_transport = {
2608 .name = "IB/iSER",
2609 .transport_type = ISCSI_INFINIBAND,
2610 .rdma_shutdown = true,
2611 .priv_size = sizeof(struct isert_cmd),
2612 .owner = THIS_MODULE,
2613 .iscsit_setup_np = isert_setup_np,
2614 .iscsit_accept_np = isert_accept_np,
2615 .iscsit_free_np = isert_free_np,
2616 .iscsit_wait_conn = isert_wait_conn,
2617 .iscsit_free_conn = isert_free_conn,
2618 .iscsit_get_login_rx = isert_get_login_rx,
2619 .iscsit_put_login_tx = isert_put_login_tx,
2620 .iscsit_immediate_queue = isert_immediate_queue,
2621 .iscsit_response_queue = isert_response_queue,
2622 .iscsit_get_dataout = isert_get_dataout,
2623 .iscsit_queue_data_in = isert_put_datain,
2624 .iscsit_queue_status = isert_put_response,
2625 .iscsit_aborted_task = isert_aborted_task,
2626 .iscsit_get_rx_pdu = isert_get_rx_pdu,
2627 .iscsit_get_sup_prot_ops = isert_get_sup_prot_ops,
2628 };
2629
2630 static int __init isert_init(void)
2631 {
2632 int ret;
2633
2634 isert_comp_wq = alloc_workqueue("isert_comp_wq",
2635 WQ_UNBOUND | WQ_HIGHPRI, 0);
2636 if (!isert_comp_wq) {
2637 isert_err("Unable to allocate isert_comp_wq\n");
2638 ret = -ENOMEM;
2639 return -ENOMEM;
2640 }
2641
2642 isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND,
2643 WQ_UNBOUND_MAX_ACTIVE);
2644 if (!isert_release_wq) {
2645 isert_err("Unable to allocate isert_release_wq\n");
2646 ret = -ENOMEM;
2647 goto destroy_comp_wq;
2648 }
2649
2650 iscsit_register_transport(&iser_target_transport);
2651 isert_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
2652
2653 return 0;
2654
2655 destroy_comp_wq:
2656 destroy_workqueue(isert_comp_wq);
2657
2658 return ret;
2659 }
2660
2661 static void __exit isert_exit(void)
2662 {
2663 flush_scheduled_work();
2664 destroy_workqueue(isert_release_wq);
2665 destroy_workqueue(isert_comp_wq);
2666 iscsit_unregister_transport(&iser_target_transport);
2667 isert_info("iSER_TARGET[0] - Released iser_target_transport\n");
2668 }
2669
2670 MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
2671 MODULE_VERSION("1.0");
2672 MODULE_AUTHOR("nab@Linux-iSCSI.org");
2673 MODULE_LICENSE("GPL");
2674
2675 module_init(isert_init);
2676 module_exit(isert_exit);
2677
2678
2679
2680
2681
2682 /* LDV_COMMENT_BEGIN_MAIN */
2683 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful
2684
2685 /*###########################################################################*/
2686
2687 /*############## Driver Environment Generator 0.2 output ####################*/
2688
2689 /*###########################################################################*/
2690
2691
2692
2693 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
2694 void ldv_check_final_state(void);
2695
2696 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
2697 void ldv_check_return_value(int res);
2698
2699 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
2700 void ldv_check_return_value_probe(int res);
2701
2702 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
2703 void ldv_initialize(void);
2704
2705 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
2706 void ldv_handler_precall(void);
2707
2708 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
2709 int nondet_int(void);
2710
2711 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
2712 int LDV_IN_INTERRUPT;
2713
2714 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
2715 void ldv_main0_sequence_infinite_withcheck_stateful(void) {
2716
2717
2718
2719 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
2720 /*============================= VARIABLE DECLARATION PART =============================*/
2721 /** STRUCT: struct type: iscsit_transport, struct name: iser_target_transport **/
2722 /* content: static int isert_setup_np(struct iscsi_np *np, struct sockaddr_storage *ksockaddr)*/
2723 /* LDV_COMMENT_BEGIN_PREP */
2724 #define ISERT_MAX_CONN 8
2725 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
2726 #define ISER_MAX_TX_CQ_LEN \
2727 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
2728 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
2729 ISERT_MAX_CONN)
2730 /* LDV_COMMENT_END_PREP */
2731 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "isert_setup_np" */
2732 struct iscsi_np * var_group1;
2733 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "isert_setup_np" */
2734 struct sockaddr_storage * var_group2;
2735 /* content: static int isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)*/
2736 /* LDV_COMMENT_BEGIN_PREP */
2737 #define ISERT_MAX_CONN 8
2738 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
2739 #define ISER_MAX_TX_CQ_LEN \
2740 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
2741 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
2742 ISERT_MAX_CONN)
2743 /* LDV_COMMENT_END_PREP */
2744 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "isert_accept_np" */
2745 struct iscsi_conn * var_group3;
2746 /* content: static void isert_free_np(struct iscsi_np *np)*/
2747 /* LDV_COMMENT_BEGIN_PREP */
2748 #define ISERT_MAX_CONN 8
2749 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
2750 #define ISER_MAX_TX_CQ_LEN \
2751 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
2752 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
2753 ISERT_MAX_CONN)
2754 /* LDV_COMMENT_END_PREP */
2755 /* content: static void isert_wait_conn(struct iscsi_conn *conn)*/
2756 /* LDV_COMMENT_BEGIN_PREP */
2757 #define ISERT_MAX_CONN 8
2758 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
2759 #define ISER_MAX_TX_CQ_LEN \
2760 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
2761 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
2762 ISERT_MAX_CONN)
2763 /* LDV_COMMENT_END_PREP */
2764 /* content: static void isert_free_conn(struct iscsi_conn *conn)*/
2765 /* LDV_COMMENT_BEGIN_PREP */
2766 #define ISERT_MAX_CONN 8
2767 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
2768 #define ISER_MAX_TX_CQ_LEN \
2769 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
2770 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
2771 ISERT_MAX_CONN)
2772 /* LDV_COMMENT_END_PREP */
2773 /* content: static int isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)*/
2774 /* LDV_COMMENT_BEGIN_PREP */
2775 #define ISERT_MAX_CONN 8
2776 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
2777 #define ISER_MAX_TX_CQ_LEN \
2778 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
2779 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
2780 ISERT_MAX_CONN)
2781 /* LDV_COMMENT_END_PREP */
2782 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "isert_get_login_rx" */
2783 struct iscsi_login * var_group4;
2784 /* content: static int isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, u32 length)*/
2785 /* LDV_COMMENT_BEGIN_PREP */
2786 #define ISERT_MAX_CONN 8
2787 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
2788 #define ISER_MAX_TX_CQ_LEN \
2789 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
2790 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
2791 ISERT_MAX_CONN)
2792 /* LDV_COMMENT_END_PREP */
2793 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "isert_put_login_tx" */
2794 u32 var_isert_put_login_tx_36_p2;
2795 /* content: static int isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)*/
2796 /* LDV_COMMENT_BEGIN_PREP */
2797 #define ISERT_MAX_CONN 8
2798 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
2799 #define ISER_MAX_TX_CQ_LEN \
2800 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
2801 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
2802 ISERT_MAX_CONN)
2803 /* LDV_COMMENT_END_PREP */
2804 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "isert_immediate_queue" */
2805 struct iscsi_cmd * var_group5;
2806 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "isert_immediate_queue" */
2807 int var_isert_immediate_queue_71_p2;
2808 /* content: static int isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)*/
2809 /* LDV_COMMENT_BEGIN_PREP */
2810 #define ISERT_MAX_CONN 8
2811 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
2812 #define ISER_MAX_TX_CQ_LEN \
2813 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
2814 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
2815 ISERT_MAX_CONN)
2816 /* LDV_COMMENT_END_PREP */
2817 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "isert_response_queue" */
2818 int var_isert_response_queue_72_p2;
2819 /* content: static int isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)*/
2820 /* LDV_COMMENT_BEGIN_PREP */
2821 #define ISERT_MAX_CONN 8
2822 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
2823 #define ISER_MAX_TX_CQ_LEN \
2824 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
2825 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
2826 ISERT_MAX_CONN)
2827 /* LDV_COMMENT_END_PREP */
2828 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "isert_get_dataout" */
2829 bool var_isert_get_dataout_70_p2;
2830 /* content: static int isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)*/
2831 /* LDV_COMMENT_BEGIN_PREP */
2832 #define ISERT_MAX_CONN 8
2833 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
2834 #define ISER_MAX_TX_CQ_LEN \
2835 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
2836 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
2837 ISERT_MAX_CONN)
2838 /* LDV_COMMENT_END_PREP */
2839 /* content: static int isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)*/
2840 /* LDV_COMMENT_BEGIN_PREP */
2841 #define ISERT_MAX_CONN 8
2842 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
2843 #define ISER_MAX_TX_CQ_LEN \
2844 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
2845 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
2846 ISERT_MAX_CONN)
2847 /* LDV_COMMENT_END_PREP */
2848 /* content: static void isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)*/
2849 /* LDV_COMMENT_BEGIN_PREP */
2850 #define ISERT_MAX_CONN 8
2851 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
2852 #define ISER_MAX_TX_CQ_LEN \
2853 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
2854 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
2855 ISERT_MAX_CONN)
2856 /* LDV_COMMENT_END_PREP */
2857 /* content: static void isert_get_rx_pdu(struct iscsi_conn *conn)*/
2858 /* LDV_COMMENT_BEGIN_PREP */
2859 #define ISERT_MAX_CONN 8
2860 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
2861 #define ISER_MAX_TX_CQ_LEN \
2862 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
2863 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
2864 ISERT_MAX_CONN)
2865 /* LDV_COMMENT_END_PREP */
2866 /* content: static enum target_prot_op isert_get_sup_prot_ops(struct iscsi_conn *conn)*/
2867 /* LDV_COMMENT_BEGIN_PREP */
2868 #define ISERT_MAX_CONN 8
2869 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
2870 #define ISER_MAX_TX_CQ_LEN \
2871 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
2872 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
2873 ISERT_MAX_CONN)
2874 /* LDV_COMMENT_END_PREP */
2875
2876
2877
2878
2879 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
2880 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
2881 /*============================= VARIABLE INITIALIZING PART =============================*/
2882 LDV_IN_INTERRUPT=1;
2883
2884
2885
2886
2887 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
2888 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
2889 /*============================= FUNCTION CALL SECTION =============================*/
2890 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
2891 ldv_initialize();
2892
2893 /** INIT: init_type: ST_MODULE_INIT **/
2894 /* content: static int __init isert_init(void)*/
2895 /* LDV_COMMENT_BEGIN_PREP */
2896 #define ISERT_MAX_CONN 8
2897 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
2898 #define ISER_MAX_TX_CQ_LEN \
2899 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
2900 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
2901 ISERT_MAX_CONN)
2902 /* LDV_COMMENT_END_PREP */
2903 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver init function after driver loading to kernel. This function declared as "MODULE_INIT(function name)". */
2904 ldv_handler_precall();
2905 if(isert_init())
2906 goto ldv_final;
2907
2908
2909
2910 while( nondet_int()
2911 ) {
2912
2913 switch(nondet_int()) {
2914
2915 case 0: {
2916
2917 /** STRUCT: struct type: iscsit_transport, struct name: iser_target_transport **/
2918
2919
2920 /* content: static int isert_setup_np(struct iscsi_np *np, struct sockaddr_storage *ksockaddr)*/
2921 /* LDV_COMMENT_BEGIN_PREP */
2922 #define ISERT_MAX_CONN 8
2923 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
2924 #define ISER_MAX_TX_CQ_LEN \
2925 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
2926 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
2927 ISERT_MAX_CONN)
2928 /* LDV_COMMENT_END_PREP */
2929 /* LDV_COMMENT_FUNCTION_CALL Function from field "iscsit_setup_np" from driver structure with callbacks "iser_target_transport" */
2930 ldv_handler_precall();
2931 isert_setup_np( var_group1, var_group2);
2932
2933
2934
2935
2936 }
2937
2938 break;
2939 case 1: {
2940
2941 /** STRUCT: struct type: iscsit_transport, struct name: iser_target_transport **/
2942
2943
2944 /* content: static int isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)*/
2945 /* LDV_COMMENT_BEGIN_PREP */
2946 #define ISERT_MAX_CONN 8
2947 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
2948 #define ISER_MAX_TX_CQ_LEN \
2949 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
2950 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
2951 ISERT_MAX_CONN)
2952 /* LDV_COMMENT_END_PREP */
2953 /* LDV_COMMENT_FUNCTION_CALL Function from field "iscsit_accept_np" from driver structure with callbacks "iser_target_transport" */
2954 ldv_handler_precall();
2955 isert_accept_np( var_group1, var_group3);
2956
2957
2958
2959
2960 }
2961
2962 break;
2963 case 2: {
2964
2965 /** STRUCT: struct type: iscsit_transport, struct name: iser_target_transport **/
2966
2967
2968 /* content: static void isert_free_np(struct iscsi_np *np)*/
2969 /* LDV_COMMENT_BEGIN_PREP */
2970 #define ISERT_MAX_CONN 8
2971 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
2972 #define ISER_MAX_TX_CQ_LEN \
2973 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
2974 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
2975 ISERT_MAX_CONN)
2976 /* LDV_COMMENT_END_PREP */
2977 /* LDV_COMMENT_FUNCTION_CALL Function from field "iscsit_free_np" from driver structure with callbacks "iser_target_transport" */
2978 ldv_handler_precall();
2979 isert_free_np( var_group1);
2980
2981
2982
2983
2984 }
2985
2986 break;
2987 case 3: {
2988
2989 /** STRUCT: struct type: iscsit_transport, struct name: iser_target_transport **/
2990
2991
2992 /* content: static void isert_wait_conn(struct iscsi_conn *conn)*/
2993 /* LDV_COMMENT_BEGIN_PREP */
2994 #define ISERT_MAX_CONN 8
2995 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
2996 #define ISER_MAX_TX_CQ_LEN \
2997 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
2998 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
2999 ISERT_MAX_CONN)
3000 /* LDV_COMMENT_END_PREP */
3001 /* LDV_COMMENT_FUNCTION_CALL Function from field "iscsit_wait_conn" from driver structure with callbacks "iser_target_transport" */
3002 ldv_handler_precall();
3003 isert_wait_conn( var_group3);
3004
3005
3006
3007
3008 }
3009
3010 break;
3011 case 4: {
3012
3013 /** STRUCT: struct type: iscsit_transport, struct name: iser_target_transport **/
3014
3015
3016 /* content: static void isert_free_conn(struct iscsi_conn *conn)*/
3017 /* LDV_COMMENT_BEGIN_PREP */
3018 #define ISERT_MAX_CONN 8
3019 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
3020 #define ISER_MAX_TX_CQ_LEN \
3021 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
3022 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
3023 ISERT_MAX_CONN)
3024 /* LDV_COMMENT_END_PREP */
3025 /* LDV_COMMENT_FUNCTION_CALL Function from field "iscsit_free_conn" from driver structure with callbacks "iser_target_transport" */
3026 ldv_handler_precall();
3027 isert_free_conn( var_group3);
3028
3029
3030
3031
3032 }
3033
3034 break;
3035 case 5: {
3036
3037 /** STRUCT: struct type: iscsit_transport, struct name: iser_target_transport **/
3038
3039
3040 /* content: static int isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)*/
3041 /* LDV_COMMENT_BEGIN_PREP */
3042 #define ISERT_MAX_CONN 8
3043 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
3044 #define ISER_MAX_TX_CQ_LEN \
3045 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
3046 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
3047 ISERT_MAX_CONN)
3048 /* LDV_COMMENT_END_PREP */
3049 /* LDV_COMMENT_FUNCTION_CALL Function from field "iscsit_get_login_rx" from driver structure with callbacks "iser_target_transport" */
3050 ldv_handler_precall();
3051 isert_get_login_rx( var_group3, var_group4);
3052
3053
3054
3055
3056 }
3057
3058 break;
3059 case 6: {
3060
3061 /** STRUCT: struct type: iscsit_transport, struct name: iser_target_transport **/
3062
3063
3064 /* content: static int isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, u32 length)*/
3065 /* LDV_COMMENT_BEGIN_PREP */
3066 #define ISERT_MAX_CONN 8
3067 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
3068 #define ISER_MAX_TX_CQ_LEN \
3069 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
3070 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
3071 ISERT_MAX_CONN)
3072 /* LDV_COMMENT_END_PREP */
3073 /* LDV_COMMENT_FUNCTION_CALL Function from field "iscsit_put_login_tx" from driver structure with callbacks "iser_target_transport" */
3074 ldv_handler_precall();
3075 isert_put_login_tx( var_group3, var_group4, var_isert_put_login_tx_36_p2);
3076
3077
3078
3079
3080 }
3081
3082 break;
3083 case 7: {
3084
3085 /** STRUCT: struct type: iscsit_transport, struct name: iser_target_transport **/
3086
3087
3088 /* content: static int isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)*/
3089 /* LDV_COMMENT_BEGIN_PREP */
3090 #define ISERT_MAX_CONN 8
3091 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
3092 #define ISER_MAX_TX_CQ_LEN \
3093 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
3094 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
3095 ISERT_MAX_CONN)
3096 /* LDV_COMMENT_END_PREP */
3097 /* LDV_COMMENT_FUNCTION_CALL Function from field "iscsit_immediate_queue" from driver structure with callbacks "iser_target_transport" */
3098 ldv_handler_precall();
3099 isert_immediate_queue( var_group3, var_group5, var_isert_immediate_queue_71_p2);
3100
3101
3102
3103
3104 }
3105
3106 break;
3107 case 8: {
3108
3109 /** STRUCT: struct type: iscsit_transport, struct name: iser_target_transport **/
3110
3111
3112 /* content: static int isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)*/
3113 /* LDV_COMMENT_BEGIN_PREP */
3114 #define ISERT_MAX_CONN 8
3115 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
3116 #define ISER_MAX_TX_CQ_LEN \
3117 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
3118 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
3119 ISERT_MAX_CONN)
3120 /* LDV_COMMENT_END_PREP */
3121 /* LDV_COMMENT_FUNCTION_CALL Function from field "iscsit_response_queue" from driver structure with callbacks "iser_target_transport" */
3122 ldv_handler_precall();
3123 isert_response_queue( var_group3, var_group5, var_isert_response_queue_72_p2);
3124
3125
3126
3127
3128 }
3129
3130 break;
3131 case 9: {
3132
3133 /** STRUCT: struct type: iscsit_transport, struct name: iser_target_transport **/
3134
3135
3136 /* content: static int isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)*/
3137 /* LDV_COMMENT_BEGIN_PREP */
3138 #define ISERT_MAX_CONN 8
3139 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
3140 #define ISER_MAX_TX_CQ_LEN \
3141 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
3142 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
3143 ISERT_MAX_CONN)
3144 /* LDV_COMMENT_END_PREP */
3145 /* LDV_COMMENT_FUNCTION_CALL Function from field "iscsit_get_dataout" from driver structure with callbacks "iser_target_transport" */
3146 ldv_handler_precall();
3147 isert_get_dataout( var_group3, var_group5, var_isert_get_dataout_70_p2);
3148
3149
3150
3151
3152 }
3153
3154 break;
3155 case 10: {
3156
3157 /** STRUCT: struct type: iscsit_transport, struct name: iser_target_transport **/
3158
3159
3160 /* content: static int isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)*/
3161 /* LDV_COMMENT_BEGIN_PREP */
3162 #define ISERT_MAX_CONN 8
3163 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
3164 #define ISER_MAX_TX_CQ_LEN \
3165 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
3166 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
3167 ISERT_MAX_CONN)
3168 /* LDV_COMMENT_END_PREP */
3169 /* LDV_COMMENT_FUNCTION_CALL Function from field "iscsit_queue_data_in" from driver structure with callbacks "iser_target_transport" */
3170 ldv_handler_precall();
3171 isert_put_datain( var_group3, var_group5);
3172
3173
3174
3175
3176 }
3177
3178 break;
3179 case 11: {
3180
3181 /** STRUCT: struct type: iscsit_transport, struct name: iser_target_transport **/
3182
3183
3184 /* content: static int isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)*/
3185 /* LDV_COMMENT_BEGIN_PREP */
3186 #define ISERT_MAX_CONN 8
3187 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
3188 #define ISER_MAX_TX_CQ_LEN \
3189 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
3190 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
3191 ISERT_MAX_CONN)
3192 /* LDV_COMMENT_END_PREP */
3193 /* LDV_COMMENT_FUNCTION_CALL Function from field "iscsit_queue_status" from driver structure with callbacks "iser_target_transport" */
3194 ldv_handler_precall();
3195 isert_put_response( var_group3, var_group5);
3196
3197
3198
3199
3200 }
3201
3202 break;
3203 case 12: {
3204
3205 /** STRUCT: struct type: iscsit_transport, struct name: iser_target_transport **/
3206
3207
3208 /* content: static void isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)*/
3209 /* LDV_COMMENT_BEGIN_PREP */
3210 #define ISERT_MAX_CONN 8
3211 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
3212 #define ISER_MAX_TX_CQ_LEN \
3213 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
3214 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
3215 ISERT_MAX_CONN)
3216 /* LDV_COMMENT_END_PREP */
3217 /* LDV_COMMENT_FUNCTION_CALL Function from field "iscsit_aborted_task" from driver structure with callbacks "iser_target_transport" */
3218 ldv_handler_precall();
3219 isert_aborted_task( var_group3, var_group5);
3220
3221
3222
3223
3224 }
3225
3226 break;
3227 case 13: {
3228
3229 /** STRUCT: struct type: iscsit_transport, struct name: iser_target_transport **/
3230
3231
3232 /* content: static void isert_get_rx_pdu(struct iscsi_conn *conn)*/
3233 /* LDV_COMMENT_BEGIN_PREP */
3234 #define ISERT_MAX_CONN 8
3235 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
3236 #define ISER_MAX_TX_CQ_LEN \
3237 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
3238 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
3239 ISERT_MAX_CONN)
3240 /* LDV_COMMENT_END_PREP */
3241 /* LDV_COMMENT_FUNCTION_CALL Function from field "iscsit_get_rx_pdu" from driver structure with callbacks "iser_target_transport" */
3242 ldv_handler_precall();
3243 isert_get_rx_pdu( var_group3);
3244
3245
3246
3247
3248 }
3249
3250 break;
3251 case 14: {
3252
3253 /** STRUCT: struct type: iscsit_transport, struct name: iser_target_transport **/
3254
3255
3256 /* content: static enum target_prot_op isert_get_sup_prot_ops(struct iscsi_conn *conn)*/
3257 /* LDV_COMMENT_BEGIN_PREP */
3258 #define ISERT_MAX_CONN 8
3259 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
3260 #define ISER_MAX_TX_CQ_LEN \
3261 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
3262 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
3263 ISERT_MAX_CONN)
3264 /* LDV_COMMENT_END_PREP */
3265 /* LDV_COMMENT_FUNCTION_CALL Function from field "iscsit_get_sup_prot_ops" from driver structure with callbacks "iser_target_transport" */
3266 ldv_handler_precall();
3267 isert_get_sup_prot_ops( var_group3);
3268
3269
3270
3271
3272 }
3273
3274 break;
3275 default: break;
3276
3277 }
3278
3279 }
3280
3281 ldv_module_exit:
3282
3283 /** INIT: init_type: ST_MODULE_EXIT **/
3284 /* content: static void __exit isert_exit(void)*/
3285 /* LDV_COMMENT_BEGIN_PREP */
3286 #define ISERT_MAX_CONN 8
3287 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
3288 #define ISER_MAX_TX_CQ_LEN \
3289 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
3290 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
3291 ISERT_MAX_CONN)
3292 /* LDV_COMMENT_END_PREP */
3293 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver release function before driver will be uploaded from kernel. This function declared as "MODULE_EXIT(function name)". */
3294 ldv_handler_precall();
3295 isert_exit();
3296
3297 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
3298 ldv_final: ldv_check_final_state();
3299
3300 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
3301 return;
3302
3303 }
3304 #endif
3305
3306 /* LDV_COMMENT_END_MAIN */ 1
2 #include <linux/kernel.h>
3 bool ldv_is_err(const void *ptr);
4 bool ldv_is_err_or_null(const void *ptr);
5 void* ldv_err_ptr(long error);
6 long ldv_ptr_err(const void *ptr);
7
8 extern void ldv_dma_map_page(void);
9 extern void ldv_dma_mapping_error(void);
10 #line 1 "/home/ldvuser/ldv/ref_launches/work/current--X--drivers--X--defaultlinux-4.8-rc1.tar.xz--X--331_1a--X--cpachecker/linux-4.8-rc1.tar.xz/csd_deg_dscv/1067/dscv_tempdir/dscv/ri/331_1a/drivers/infiniband/ulp/isert/ib_isert.c"
11
12 /*******************************************************************************
13 * This file contains iSCSI extentions for RDMA (iSER) Verbs
14 *
15 * (c) Copyright 2013 Datera, Inc.
16 *
17 * Nicholas A. Bellinger <nab@linux-iscsi.org>
18 *
19 * This program is free software; you can redistribute it and/or modify
20 * it under the terms of the GNU General Public License as published by
21 * the Free Software Foundation; either version 2 of the License, or
22 * (at your option) any later version.
23 *
24 * This program is distributed in the hope that it will be useful,
25 * but WITHOUT ANY WARRANTY; without even the implied warranty of
26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
27 * GNU General Public License for more details.
28 ****************************************************************************/
29
30 #include <linux/string.h>
31 #include <linux/module.h>
32 #include <linux/scatterlist.h>
33 #include <linux/socket.h>
34 #include <linux/in.h>
35 #include <linux/in6.h>
36 #include <rdma/ib_verbs.h>
37 #include <rdma/rdma_cm.h>
38 #include <target/target_core_base.h>
39 #include <target/target_core_fabric.h>
40 #include <target/iscsi/iscsi_transport.h>
41 #include <linux/semaphore.h>
42
43 #include "ib_isert.h"
44
45 #define ISERT_MAX_CONN 8
46 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
47 #define ISER_MAX_TX_CQ_LEN \
48 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
49 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
50 ISERT_MAX_CONN)
51
52 static int isert_debug_level;
53 module_param_named(debug_level, isert_debug_level, int, 0644);
54 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)");
55
56 static DEFINE_MUTEX(device_list_mutex);
57 static LIST_HEAD(device_list);
58 static struct workqueue_struct *isert_comp_wq;
59 static struct workqueue_struct *isert_release_wq;
60
61 static int
62 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd);
63 static int
64 isert_login_post_recv(struct isert_conn *isert_conn);
65 static int
66 isert_rdma_accept(struct isert_conn *isert_conn);
67 struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
68
69 static void isert_release_work(struct work_struct *work);
70 static void isert_recv_done(struct ib_cq *cq, struct ib_wc *wc);
71 static void isert_send_done(struct ib_cq *cq, struct ib_wc *wc);
72 static void isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc);
73 static void isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc);
74
75 static inline bool
76 isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
77 {
78 return (conn->pi_support &&
79 cmd->prot_op != TARGET_PROT_NORMAL);
80 }
81
82
83 static void
84 isert_qp_event_callback(struct ib_event *e, void *context)
85 {
86 struct isert_conn *isert_conn = context;
87
88 isert_err("%s (%d): conn %p\n",
89 ib_event_msg(e->event), e->event, isert_conn);
90
91 switch (e->event) {
92 case IB_EVENT_COMM_EST:
93 rdma_notify(isert_conn->cm_id, IB_EVENT_COMM_EST);
94 break;
95 case IB_EVENT_QP_LAST_WQE_REACHED:
96 isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED\n");
97 break;
98 default:
99 break;
100 }
101 }
102
103 static struct isert_comp *
104 isert_comp_get(struct isert_conn *isert_conn)
105 {
106 struct isert_device *device = isert_conn->device;
107 struct isert_comp *comp;
108 int i, min = 0;
109
110 mutex_lock(&device_list_mutex);
111 for (i = 0; i < device->comps_used; i++)
112 if (device->comps[i].active_qps <
113 device->comps[min].active_qps)
114 min = i;
115 comp = &device->comps[min];
116 comp->active_qps++;
117 mutex_unlock(&device_list_mutex);
118
119 isert_info("conn %p, using comp %p min_index: %d\n",
120 isert_conn, comp, min);
121
122 return comp;
123 }
124
125 static void
126 isert_comp_put(struct isert_comp *comp)
127 {
128 mutex_lock(&device_list_mutex);
129 comp->active_qps--;
130 mutex_unlock(&device_list_mutex);
131 }
132
133 static struct ib_qp *
134 isert_create_qp(struct isert_conn *isert_conn,
135 struct isert_comp *comp,
136 struct rdma_cm_id *cma_id)
137 {
138 struct isert_device *device = isert_conn->device;
139 struct ib_qp_init_attr attr;
140 int ret;
141
142 memset(&attr, 0, sizeof(struct ib_qp_init_attr));
143 attr.event_handler = isert_qp_event_callback;
144 attr.qp_context = isert_conn;
145 attr.send_cq = comp->cq;
146 attr.recv_cq = comp->cq;
147 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS + 1;
148 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1;
149 attr.cap.max_rdma_ctxs = ISCSI_DEF_XMIT_CMDS_MAX;
150 attr.cap.max_send_sge = device->ib_device->attrs.max_sge;
151 attr.cap.max_recv_sge = 1;
152 attr.sq_sig_type = IB_SIGNAL_REQ_WR;
153 attr.qp_type = IB_QPT_RC;
154 if (device->pi_capable)
155 attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
156
157 ret = rdma_create_qp(cma_id, device->pd, &attr);
158 if (ret) {
159 isert_err("rdma_create_qp failed for cma_id %d\n", ret);
160 return ERR_PTR(ret);
161 }
162
163 return cma_id->qp;
164 }
165
166 static int
167 isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
168 {
169 struct isert_comp *comp;
170 int ret;
171
172 comp = isert_comp_get(isert_conn);
173 isert_conn->qp = isert_create_qp(isert_conn, comp, cma_id);
174 if (IS_ERR(isert_conn->qp)) {
175 ret = PTR_ERR(isert_conn->qp);
176 goto err;
177 }
178
179 return 0;
180 err:
181 isert_comp_put(comp);
182 return ret;
183 }
184
185 static int
186 isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
187 {
188 struct isert_device *device = isert_conn->device;
189 struct ib_device *ib_dev = device->ib_device;
190 struct iser_rx_desc *rx_desc;
191 struct ib_sge *rx_sg;
192 u64 dma_addr;
193 int i, j;
194
195 isert_conn->rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS *
196 sizeof(struct iser_rx_desc), GFP_KERNEL);
197 if (!isert_conn->rx_descs)
198 goto fail;
199
200 rx_desc = isert_conn->rx_descs;
201
202 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
203 dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc,
204 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
205 if (ib_dma_mapping_error(ib_dev, dma_addr))
206 goto dma_map_fail;
207
208 rx_desc->dma_addr = dma_addr;
209
210 rx_sg = &rx_desc->rx_sg;
211 rx_sg->addr = rx_desc->dma_addr;
212 rx_sg->length = ISER_RX_PAYLOAD_SIZE;
213 rx_sg->lkey = device->pd->local_dma_lkey;
214 rx_desc->rx_cqe.done = isert_recv_done;
215 }
216
217 return 0;
218
219 dma_map_fail:
220 rx_desc = isert_conn->rx_descs;
221 for (j = 0; j < i; j++, rx_desc++) {
222 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
223 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
224 }
225 kfree(isert_conn->rx_descs);
226 isert_conn->rx_descs = NULL;
227 fail:
228 isert_err("conn %p failed to allocate rx descriptors\n", isert_conn);
229
230 return -ENOMEM;
231 }
232
233 static void
234 isert_free_rx_descriptors(struct isert_conn *isert_conn)
235 {
236 struct ib_device *ib_dev = isert_conn->device->ib_device;
237 struct iser_rx_desc *rx_desc;
238 int i;
239
240 if (!isert_conn->rx_descs)
241 return;
242
243 rx_desc = isert_conn->rx_descs;
244 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
245 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
246 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
247 }
248
249 kfree(isert_conn->rx_descs);
250 isert_conn->rx_descs = NULL;
251 }
252
253 static void
254 isert_free_comps(struct isert_device *device)
255 {
256 int i;
257
258 for (i = 0; i < device->comps_used; i++) {
259 struct isert_comp *comp = &device->comps[i];
260
261 if (comp->cq)
262 ib_free_cq(comp->cq);
263 }
264 kfree(device->comps);
265 }
266
267 static int
268 isert_alloc_comps(struct isert_device *device)
269 {
270 int i, max_cqe, ret = 0;
271
272 device->comps_used = min(ISERT_MAX_CQ, min_t(int, num_online_cpus(),
273 device->ib_device->num_comp_vectors));
274
275 isert_info("Using %d CQs, %s supports %d vectors support "
276 "pi_capable %d\n",
277 device->comps_used, device->ib_device->name,
278 device->ib_device->num_comp_vectors,
279 device->pi_capable);
280
281 device->comps = kcalloc(device->comps_used, sizeof(struct isert_comp),
282 GFP_KERNEL);
283 if (!device->comps) {
284 isert_err("Unable to allocate completion contexts\n");
285 return -ENOMEM;
286 }
287
288 max_cqe = min(ISER_MAX_CQ_LEN, device->ib_device->attrs.max_cqe);
289
290 for (i = 0; i < device->comps_used; i++) {
291 struct isert_comp *comp = &device->comps[i];
292
293 comp->device = device;
294 comp->cq = ib_alloc_cq(device->ib_device, comp, max_cqe, i,
295 IB_POLL_WORKQUEUE);
296 if (IS_ERR(comp->cq)) {
297 isert_err("Unable to allocate cq\n");
298 ret = PTR_ERR(comp->cq);
299 comp->cq = NULL;
300 goto out_cq;
301 }
302 }
303
304 return 0;
305 out_cq:
306 isert_free_comps(device);
307 return ret;
308 }
309
310 static int
311 isert_create_device_ib_res(struct isert_device *device)
312 {
313 struct ib_device *ib_dev = device->ib_device;
314 int ret;
315
316 isert_dbg("devattr->max_sge: %d\n", ib_dev->attrs.max_sge);
317 isert_dbg("devattr->max_sge_rd: %d\n", ib_dev->attrs.max_sge_rd);
318
319 ret = isert_alloc_comps(device);
320 if (ret)
321 goto out;
322
323 device->pd = ib_alloc_pd(ib_dev);
324 if (IS_ERR(device->pd)) {
325 ret = PTR_ERR(device->pd);
326 isert_err("failed to allocate pd, device %p, ret=%d\n",
327 device, ret);
328 goto out_cq;
329 }
330
331 /* Check signature cap */
332 device->pi_capable = ib_dev->attrs.device_cap_flags &
333 IB_DEVICE_SIGNATURE_HANDOVER ? true : false;
334
335 return 0;
336
337 out_cq:
338 isert_free_comps(device);
339 out:
340 if (ret > 0)
341 ret = -EINVAL;
342 return ret;
343 }
344
345 static void
346 isert_free_device_ib_res(struct isert_device *device)
347 {
348 isert_info("device %p\n", device);
349
350 ib_dealloc_pd(device->pd);
351 isert_free_comps(device);
352 }
353
354 static void
355 isert_device_put(struct isert_device *device)
356 {
357 mutex_lock(&device_list_mutex);
358 device->refcount--;
359 isert_info("device %p refcount %d\n", device, device->refcount);
360 if (!device->refcount) {
361 isert_free_device_ib_res(device);
362 list_del(&device->dev_node);
363 kfree(device);
364 }
365 mutex_unlock(&device_list_mutex);
366 }
367
368 static struct isert_device *
369 isert_device_get(struct rdma_cm_id *cma_id)
370 {
371 struct isert_device *device;
372 int ret;
373
374 mutex_lock(&device_list_mutex);
375 list_for_each_entry(device, &device_list, dev_node) {
376 if (device->ib_device->node_guid == cma_id->device->node_guid) {
377 device->refcount++;
378 isert_info("Found iser device %p refcount %d\n",
379 device, device->refcount);
380 mutex_unlock(&device_list_mutex);
381 return device;
382 }
383 }
384
385 device = kzalloc(sizeof(struct isert_device), GFP_KERNEL);
386 if (!device) {
387 mutex_unlock(&device_list_mutex);
388 return ERR_PTR(-ENOMEM);
389 }
390
391 INIT_LIST_HEAD(&device->dev_node);
392
393 device->ib_device = cma_id->device;
394 ret = isert_create_device_ib_res(device);
395 if (ret) {
396 kfree(device);
397 mutex_unlock(&device_list_mutex);
398 return ERR_PTR(ret);
399 }
400
401 device->refcount++;
402 list_add_tail(&device->dev_node, &device_list);
403 isert_info("Created a new iser device %p refcount %d\n",
404 device, device->refcount);
405 mutex_unlock(&device_list_mutex);
406
407 return device;
408 }
409
410 static void
411 isert_init_conn(struct isert_conn *isert_conn)
412 {
413 isert_conn->state = ISER_CONN_INIT;
414 INIT_LIST_HEAD(&isert_conn->node);
415 init_completion(&isert_conn->login_comp);
416 init_completion(&isert_conn->login_req_comp);
417 kref_init(&isert_conn->kref);
418 mutex_init(&isert_conn->mutex);
419 INIT_WORK(&isert_conn->release_work, isert_release_work);
420 }
421
422 static void
423 isert_free_login_buf(struct isert_conn *isert_conn)
424 {
425 struct ib_device *ib_dev = isert_conn->device->ib_device;
426
427 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
428 ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE);
429 kfree(isert_conn->login_rsp_buf);
430
431 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
432 ISER_RX_PAYLOAD_SIZE,
433 DMA_FROM_DEVICE);
434 kfree(isert_conn->login_req_buf);
435 }
436
437 static int
438 isert_alloc_login_buf(struct isert_conn *isert_conn,
439 struct ib_device *ib_dev)
440 {
441 int ret;
442
443 isert_conn->login_req_buf = kzalloc(sizeof(*isert_conn->login_req_buf),
444 GFP_KERNEL);
445 if (!isert_conn->login_req_buf) {
446 isert_err("Unable to allocate isert_conn->login_buf\n");
447 return -ENOMEM;
448 }
449
450 isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
451 isert_conn->login_req_buf,
452 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
453 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
454 if (ret) {
455 isert_err("login_req_dma mapping error: %d\n", ret);
456 isert_conn->login_req_dma = 0;
457 goto out_free_login_req_buf;
458 }
459
460 isert_conn->login_rsp_buf = kzalloc(ISER_RX_PAYLOAD_SIZE, GFP_KERNEL);
461 if (!isert_conn->login_rsp_buf) {
462 isert_err("Unable to allocate isert_conn->login_rspbuf\n");
463 goto out_unmap_login_req_buf;
464 }
465
466 isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
467 isert_conn->login_rsp_buf,
468 ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE);
469 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
470 if (ret) {
471 isert_err("login_rsp_dma mapping error: %d\n", ret);
472 isert_conn->login_rsp_dma = 0;
473 goto out_free_login_rsp_buf;
474 }
475
476 return 0;
477
478 out_free_login_rsp_buf:
479 kfree(isert_conn->login_rsp_buf);
480 out_unmap_login_req_buf:
481 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
482 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
483 out_free_login_req_buf:
484 kfree(isert_conn->login_req_buf);
485 return ret;
486 }
487
488 static void
489 isert_set_nego_params(struct isert_conn *isert_conn,
490 struct rdma_conn_param *param)
491 {
492 struct ib_device_attr *attr = &isert_conn->device->ib_device->attrs;
493
494 /* Set max inflight RDMA READ requests */
495 isert_conn->initiator_depth = min_t(u8, param->initiator_depth,
496 attr->max_qp_init_rd_atom);
497 isert_dbg("Using initiator_depth: %u\n", isert_conn->initiator_depth);
498
499 if (param->private_data) {
500 u8 flags = *(u8 *)param->private_data;
501
502 /*
503 * use remote invalidation if the both initiator
504 * and the HCA support it
505 */
506 isert_conn->snd_w_inv = !(flags & ISER_SEND_W_INV_NOT_SUP) &&
507 (attr->device_cap_flags &
508 IB_DEVICE_MEM_MGT_EXTENSIONS);
509 if (isert_conn->snd_w_inv)
510 isert_info("Using remote invalidation\n");
511 }
512 }
513
514 static int
515 isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
516 {
517 struct isert_np *isert_np = cma_id->context;
518 struct iscsi_np *np = isert_np->np;
519 struct isert_conn *isert_conn;
520 struct isert_device *device;
521 int ret = 0;
522
523 spin_lock_bh(&np->np_thread_lock);
524 if (!np->enabled) {
525 spin_unlock_bh(&np->np_thread_lock);
526 isert_dbg("iscsi_np is not enabled, reject connect request\n");
527 return rdma_reject(cma_id, NULL, 0);
528 }
529 spin_unlock_bh(&np->np_thread_lock);
530
531 isert_dbg("cma_id: %p, portal: %p\n",
532 cma_id, cma_id->context);
533
534 isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
535 if (!isert_conn)
536 return -ENOMEM;
537
538 isert_init_conn(isert_conn);
539 isert_conn->cm_id = cma_id;
540
541 ret = isert_alloc_login_buf(isert_conn, cma_id->device);
542 if (ret)
543 goto out;
544
545 device = isert_device_get(cma_id);
546 if (IS_ERR(device)) {
547 ret = PTR_ERR(device);
548 goto out_rsp_dma_map;
549 }
550 isert_conn->device = device;
551
552 isert_set_nego_params(isert_conn, &event->param.conn);
553
554 ret = isert_conn_setup_qp(isert_conn, cma_id);
555 if (ret)
556 goto out_conn_dev;
557
558 ret = isert_login_post_recv(isert_conn);
559 if (ret)
560 goto out_conn_dev;
561
562 ret = isert_rdma_accept(isert_conn);
563 if (ret)
564 goto out_conn_dev;
565
566 mutex_lock(&isert_np->mutex);
567 list_add_tail(&isert_conn->node, &isert_np->accepted);
568 mutex_unlock(&isert_np->mutex);
569
570 return 0;
571
572 out_conn_dev:
573 isert_device_put(device);
574 out_rsp_dma_map:
575 isert_free_login_buf(isert_conn);
576 out:
577 kfree(isert_conn);
578 rdma_reject(cma_id, NULL, 0);
579 return ret;
580 }
581
582 static void
583 isert_connect_release(struct isert_conn *isert_conn)
584 {
585 struct isert_device *device = isert_conn->device;
586
587 isert_dbg("conn %p\n", isert_conn);
588
589 BUG_ON(!device);
590
591 isert_free_rx_descriptors(isert_conn);
592 if (isert_conn->cm_id)
593 rdma_destroy_id(isert_conn->cm_id);
594
595 if (isert_conn->qp) {
596 struct isert_comp *comp = isert_conn->qp->recv_cq->cq_context;
597
598 isert_comp_put(comp);
599 ib_destroy_qp(isert_conn->qp);
600 }
601
602 if (isert_conn->login_req_buf)
603 isert_free_login_buf(isert_conn);
604
605 isert_device_put(device);
606
607 kfree(isert_conn);
608 }
609
610 static void
611 isert_connected_handler(struct rdma_cm_id *cma_id)
612 {
613 struct isert_conn *isert_conn = cma_id->qp->qp_context;
614 struct isert_np *isert_np = cma_id->context;
615
616 isert_info("conn %p\n", isert_conn);
617
618 mutex_lock(&isert_conn->mutex);
619 isert_conn->state = ISER_CONN_UP;
620 kref_get(&isert_conn->kref);
621 mutex_unlock(&isert_conn->mutex);
622
623 mutex_lock(&isert_np->mutex);
624 list_move_tail(&isert_conn->node, &isert_np->pending);
625 mutex_unlock(&isert_np->mutex);
626
627 isert_info("np %p: Allow accept_np to continue\n", isert_np);
628 up(&isert_np->sem);
629 }
630
631 static void
632 isert_release_kref(struct kref *kref)
633 {
634 struct isert_conn *isert_conn = container_of(kref,
635 struct isert_conn, kref);
636
637 isert_info("conn %p final kref %s/%d\n", isert_conn, current->comm,
638 current->pid);
639
640 isert_connect_release(isert_conn);
641 }
642
643 static void
644 isert_put_conn(struct isert_conn *isert_conn)
645 {
646 kref_put(&isert_conn->kref, isert_release_kref);
647 }
648
649 static void
650 isert_handle_unbound_conn(struct isert_conn *isert_conn)
651 {
652 struct isert_np *isert_np = isert_conn->cm_id->context;
653
654 mutex_lock(&isert_np->mutex);
655 if (!list_empty(&isert_conn->node)) {
656 /*
657 * This means iscsi doesn't know this connection
658 * so schedule a cleanup ourselves
659 */
660 list_del_init(&isert_conn->node);
661 isert_put_conn(isert_conn);
662 queue_work(isert_release_wq, &isert_conn->release_work);
663 }
664 mutex_unlock(&isert_np->mutex);
665 }
666
667 /**
668 * isert_conn_terminate() - Initiate connection termination
669 * @isert_conn: isert connection struct
670 *
671 * Notes:
672 * In case the connection state is BOUND, move state
673 * to TEMINATING and start teardown sequence (rdma_disconnect).
674 * In case the connection state is UP, complete flush as well.
675 *
676 * This routine must be called with mutex held. Thus it is
677 * safe to call multiple times.
678 */
679 static void
680 isert_conn_terminate(struct isert_conn *isert_conn)
681 {
682 int err;
683
684 if (isert_conn->state >= ISER_CONN_TERMINATING)
685 return;
686
687 isert_info("Terminating conn %p state %d\n",
688 isert_conn, isert_conn->state);
689 isert_conn->state = ISER_CONN_TERMINATING;
690 err = rdma_disconnect(isert_conn->cm_id);
691 if (err)
692 isert_warn("Failed rdma_disconnect isert_conn %p\n",
693 isert_conn);
694 }
695
696 static int
697 isert_np_cma_handler(struct isert_np *isert_np,
698 enum rdma_cm_event_type event)
699 {
700 isert_dbg("%s (%d): isert np %p\n",
701 rdma_event_msg(event), event, isert_np);
702
703 switch (event) {
704 case RDMA_CM_EVENT_DEVICE_REMOVAL:
705 isert_np->cm_id = NULL;
706 break;
707 case RDMA_CM_EVENT_ADDR_CHANGE:
708 isert_np->cm_id = isert_setup_id(isert_np);
709 if (IS_ERR(isert_np->cm_id)) {
710 isert_err("isert np %p setup id failed: %ld\n",
711 isert_np, PTR_ERR(isert_np->cm_id));
712 isert_np->cm_id = NULL;
713 }
714 break;
715 default:
716 isert_err("isert np %p Unexpected event %d\n",
717 isert_np, event);
718 }
719
720 return -1;
721 }
722
723 static int
724 isert_disconnected_handler(struct rdma_cm_id *cma_id,
725 enum rdma_cm_event_type event)
726 {
727 struct isert_conn *isert_conn = cma_id->qp->qp_context;
728
729 mutex_lock(&isert_conn->mutex);
730 switch (isert_conn->state) {
731 case ISER_CONN_TERMINATING:
732 break;
733 case ISER_CONN_UP:
734 isert_conn_terminate(isert_conn);
735 ib_drain_qp(isert_conn->qp);
736 isert_handle_unbound_conn(isert_conn);
737 break;
738 case ISER_CONN_BOUND:
739 case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
740 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
741 break;
742 default:
743 isert_warn("conn %p teminating in state %d\n",
744 isert_conn, isert_conn->state);
745 }
746 mutex_unlock(&isert_conn->mutex);
747
748 return 0;
749 }
750
751 static int
752 isert_connect_error(struct rdma_cm_id *cma_id)
753 {
754 struct isert_conn *isert_conn = cma_id->qp->qp_context;
755
756 list_del_init(&isert_conn->node);
757 isert_conn->cm_id = NULL;
758 isert_put_conn(isert_conn);
759
760 return -1;
761 }
762
763 static int
764 isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
765 {
766 struct isert_np *isert_np = cma_id->context;
767 int ret = 0;
768
769 isert_info("%s (%d): status %d id %p np %p\n",
770 rdma_event_msg(event->event), event->event,
771 event->status, cma_id, cma_id->context);
772
773 if (isert_np->cm_id == cma_id)
774 return isert_np_cma_handler(cma_id->context, event->event);
775
776 switch (event->event) {
777 case RDMA_CM_EVENT_CONNECT_REQUEST:
778 ret = isert_connect_request(cma_id, event);
779 if (ret)
780 isert_err("failed handle connect request %d\n", ret);
781 break;
782 case RDMA_CM_EVENT_ESTABLISHED:
783 isert_connected_handler(cma_id);
784 break;
785 case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */
786 case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */
787 case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
788 case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
789 ret = isert_disconnected_handler(cma_id, event->event);
790 break;
791 case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
792 case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
793 case RDMA_CM_EVENT_CONNECT_ERROR:
794 ret = isert_connect_error(cma_id);
795 break;
796 default:
797 isert_err("Unhandled RDMA CMA event: %d\n", event->event);
798 break;
799 }
800
801 return ret;
802 }
803
804 static int
805 isert_post_recvm(struct isert_conn *isert_conn, u32 count)
806 {
807 struct ib_recv_wr *rx_wr, *rx_wr_failed;
808 int i, ret;
809 struct iser_rx_desc *rx_desc;
810
811 for (rx_wr = isert_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
812 rx_desc = &isert_conn->rx_descs[i];
813
814 rx_wr->wr_cqe = &rx_desc->rx_cqe;
815 rx_wr->sg_list = &rx_desc->rx_sg;
816 rx_wr->num_sge = 1;
817 rx_wr->next = rx_wr + 1;
818 }
819 rx_wr--;
820 rx_wr->next = NULL; /* mark end of work requests list */
821
822 ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr,
823 &rx_wr_failed);
824 if (ret)
825 isert_err("ib_post_recv() failed with ret: %d\n", ret);
826
827 return ret;
828 }
829
830 static int
831 isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc)
832 {
833 struct ib_recv_wr *rx_wr_failed, rx_wr;
834 int ret;
835
836 rx_wr.wr_cqe = &rx_desc->rx_cqe;
837 rx_wr.sg_list = &rx_desc->rx_sg;
838 rx_wr.num_sge = 1;
839 rx_wr.next = NULL;
840
841 ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_failed);
842 if (ret)
843 isert_err("ib_post_recv() failed with ret: %d\n", ret);
844
845 return ret;
846 }
847
848 static int
849 isert_login_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
850 {
851 struct ib_device *ib_dev = isert_conn->cm_id->device;
852 struct ib_send_wr send_wr, *send_wr_failed;
853 int ret;
854
855 ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr,
856 ISER_HEADERS_LEN, DMA_TO_DEVICE);
857
858 tx_desc->tx_cqe.done = isert_login_send_done;
859
860 send_wr.next = NULL;
861 send_wr.wr_cqe = &tx_desc->tx_cqe;
862 send_wr.sg_list = tx_desc->tx_sg;
863 send_wr.num_sge = tx_desc->num_sge;
864 send_wr.opcode = IB_WR_SEND;
865 send_wr.send_flags = IB_SEND_SIGNALED;
866
867 ret = ib_post_send(isert_conn->qp, &send_wr, &send_wr_failed);
868 if (ret)
869 isert_err("ib_post_send() failed, ret: %d\n", ret);
870
871 return ret;
872 }
873
874 static void
875 isert_create_send_desc(struct isert_conn *isert_conn,
876 struct isert_cmd *isert_cmd,
877 struct iser_tx_desc *tx_desc)
878 {
879 struct isert_device *device = isert_conn->device;
880 struct ib_device *ib_dev = device->ib_device;
881
882 ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
883 ISER_HEADERS_LEN, DMA_TO_DEVICE);
884
885 memset(&tx_desc->iser_header, 0, sizeof(struct iser_ctrl));
886 tx_desc->iser_header.flags = ISCSI_CTRL;
887
888 tx_desc->num_sge = 1;
889
890 if (tx_desc->tx_sg[0].lkey != device->pd->local_dma_lkey) {
891 tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey;
892 isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc);
893 }
894 }
895
896 static int
897 isert_init_tx_hdrs(struct isert_conn *isert_conn,
898 struct iser_tx_desc *tx_desc)
899 {
900 struct isert_device *device = isert_conn->device;
901 struct ib_device *ib_dev = device->ib_device;
902 u64 dma_addr;
903
904 dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
905 ISER_HEADERS_LEN, DMA_TO_DEVICE);
906 if (ib_dma_mapping_error(ib_dev, dma_addr)) {
907 isert_err("ib_dma_mapping_error() failed\n");
908 return -ENOMEM;
909 }
910
911 tx_desc->dma_addr = dma_addr;
912 tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
913 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
914 tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey;
915
916 isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n",
917 tx_desc->tx_sg[0].addr, tx_desc->tx_sg[0].length,
918 tx_desc->tx_sg[0].lkey);
919
920 return 0;
921 }
922
923 static void
924 isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
925 struct ib_send_wr *send_wr)
926 {
927 struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
928
929 tx_desc->tx_cqe.done = isert_send_done;
930 send_wr->wr_cqe = &tx_desc->tx_cqe;
931
932 if (isert_conn->snd_w_inv && isert_cmd->inv_rkey) {
933 send_wr->opcode = IB_WR_SEND_WITH_INV;
934 send_wr->ex.invalidate_rkey = isert_cmd->inv_rkey;
935 } else {
936 send_wr->opcode = IB_WR_SEND;
937 }
938
939 send_wr->sg_list = &tx_desc->tx_sg[0];
940 send_wr->num_sge = isert_cmd->tx_desc.num_sge;
941 send_wr->send_flags = IB_SEND_SIGNALED;
942 }
943
944 static int
945 isert_login_post_recv(struct isert_conn *isert_conn)
946 {
947 struct ib_recv_wr rx_wr, *rx_wr_fail;
948 struct ib_sge sge;
949 int ret;
950
951 memset(&sge, 0, sizeof(struct ib_sge));
952 sge.addr = isert_conn->login_req_dma;
953 sge.length = ISER_RX_PAYLOAD_SIZE;
954 sge.lkey = isert_conn->device->pd->local_dma_lkey;
955
956 isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n",
957 sge.addr, sge.length, sge.lkey);
958
959 isert_conn->login_req_buf->rx_cqe.done = isert_login_recv_done;
960
961 memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
962 rx_wr.wr_cqe = &isert_conn->login_req_buf->rx_cqe;
963 rx_wr.sg_list = &sge;
964 rx_wr.num_sge = 1;
965
966 ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_fail);
967 if (ret)
968 isert_err("ib_post_recv() failed: %d\n", ret);
969
970 return ret;
971 }
972
973 static int
974 isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
975 u32 length)
976 {
977 struct isert_conn *isert_conn = conn->context;
978 struct isert_device *device = isert_conn->device;
979 struct ib_device *ib_dev = device->ib_device;
980 struct iser_tx_desc *tx_desc = &isert_conn->login_tx_desc;
981 int ret;
982
983 isert_create_send_desc(isert_conn, NULL, tx_desc);
984
985 memcpy(&tx_desc->iscsi_header, &login->rsp[0],
986 sizeof(struct iscsi_hdr));
987
988 isert_init_tx_hdrs(isert_conn, tx_desc);
989
990 if (length > 0) {
991 struct ib_sge *tx_dsg = &tx_desc->tx_sg[1];
992
993 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma,
994 length, DMA_TO_DEVICE);
995
996 memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length);
997
998 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma,
999 length, DMA_TO_DEVICE);
1000
1001 tx_dsg->addr = isert_conn->login_rsp_dma;
1002 tx_dsg->length = length;
1003 tx_dsg->lkey = isert_conn->device->pd->local_dma_lkey;
1004 tx_desc->num_sge = 2;
1005 }
1006 if (!login->login_failed) {
1007 if (login->login_complete) {
1008 ret = isert_alloc_rx_descriptors(isert_conn);
1009 if (ret)
1010 return ret;
1011
1012 ret = isert_post_recvm(isert_conn,
1013 ISERT_QP_MAX_RECV_DTOS);
1014 if (ret)
1015 return ret;
1016
1017 /* Now we are in FULL_FEATURE phase */
1018 mutex_lock(&isert_conn->mutex);
1019 isert_conn->state = ISER_CONN_FULL_FEATURE;
1020 mutex_unlock(&isert_conn->mutex);
1021 goto post_send;
1022 }
1023
1024 ret = isert_login_post_recv(isert_conn);
1025 if (ret)
1026 return ret;
1027 }
1028 post_send:
1029 ret = isert_login_post_send(isert_conn, tx_desc);
1030 if (ret)
1031 return ret;
1032
1033 return 0;
1034 }
1035
1036 static void
1037 isert_rx_login_req(struct isert_conn *isert_conn)
1038 {
1039 struct iser_rx_desc *rx_desc = isert_conn->login_req_buf;
1040 int rx_buflen = isert_conn->login_req_len;
1041 struct iscsi_conn *conn = isert_conn->conn;
1042 struct iscsi_login *login = conn->conn_login;
1043 int size;
1044
1045 isert_info("conn %p\n", isert_conn);
1046
1047 WARN_ON_ONCE(!login);
1048
1049 if (login->first_request) {
1050 struct iscsi_login_req *login_req =
1051 (struct iscsi_login_req *)&rx_desc->iscsi_header;
1052 /*
1053 * Setup the initial iscsi_login values from the leading
1054 * login request PDU.
1055 */
1056 login->leading_connection = (!login_req->tsih) ? 1 : 0;
1057 login->current_stage =
1058 (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK)
1059 >> 2;
1060 login->version_min = login_req->min_version;
1061 login->version_max = login_req->max_version;
1062 memcpy(login->isid, login_req->isid, 6);
1063 login->cmd_sn = be32_to_cpu(login_req->cmdsn);
1064 login->init_task_tag = login_req->itt;
1065 login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
1066 login->cid = be16_to_cpu(login_req->cid);
1067 login->tsih = be16_to_cpu(login_req->tsih);
1068 }
1069
1070 memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
1071
1072 size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
1073 isert_dbg("Using login payload size: %d, rx_buflen: %d "
1074 "MAX_KEY_VALUE_PAIRS: %d\n", size, rx_buflen,
1075 MAX_KEY_VALUE_PAIRS);
1076 memcpy(login->req_buf, &rx_desc->data[0], size);
1077
1078 if (login->first_request) {
1079 complete(&isert_conn->login_comp);
1080 return;
1081 }
1082 schedule_delayed_work(&conn->login_work, 0);
1083 }
1084
1085 static struct iscsi_cmd
1086 *isert_allocate_cmd(struct iscsi_conn *conn, struct iser_rx_desc *rx_desc)
1087 {
1088 struct isert_conn *isert_conn = conn->context;
1089 struct isert_cmd *isert_cmd;
1090 struct iscsi_cmd *cmd;
1091
1092 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
1093 if (!cmd) {
1094 isert_err("Unable to allocate iscsi_cmd + isert_cmd\n");
1095 return NULL;
1096 }
1097 isert_cmd = iscsit_priv_cmd(cmd);
1098 isert_cmd->conn = isert_conn;
1099 isert_cmd->iscsi_cmd = cmd;
1100 isert_cmd->rx_desc = rx_desc;
1101
1102 return cmd;
1103 }
1104
1105 static int
1106 isert_handle_scsi_cmd(struct isert_conn *isert_conn,
1107 struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd,
1108 struct iser_rx_desc *rx_desc, unsigned char *buf)
1109 {
1110 struct iscsi_conn *conn = isert_conn->conn;
1111 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
1112 int imm_data, imm_data_len, unsol_data, sg_nents, rc;
1113 bool dump_payload = false;
1114 unsigned int data_len;
1115
1116 rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
1117 if (rc < 0)
1118 return rc;
1119
1120 imm_data = cmd->immediate_data;
1121 imm_data_len = cmd->first_burst_len;
1122 unsol_data = cmd->unsolicited_data;
1123 data_len = cmd->se_cmd.data_length;
1124
1125 if (imm_data && imm_data_len == data_len)
1126 cmd->se_cmd.se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
1127 rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
1128 if (rc < 0) {
1129 return 0;
1130 } else if (rc > 0) {
1131 dump_payload = true;
1132 goto sequence_cmd;
1133 }
1134
1135 if (!imm_data)
1136 return 0;
1137
1138 if (imm_data_len != data_len) {
1139 sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
1140 sg_copy_from_buffer(cmd->se_cmd.t_data_sg, sg_nents,
1141 &rx_desc->data[0], imm_data_len);
1142 isert_dbg("Copy Immediate sg_nents: %u imm_data_len: %d\n",
1143 sg_nents, imm_data_len);
1144 } else {
1145 sg_init_table(&isert_cmd->sg, 1);
1146 cmd->se_cmd.t_data_sg = &isert_cmd->sg;
1147 cmd->se_cmd.t_data_nents = 1;
1148 sg_set_buf(&isert_cmd->sg, &rx_desc->data[0], imm_data_len);
1149 isert_dbg("Transfer Immediate imm_data_len: %d\n",
1150 imm_data_len);
1151 }
1152
1153 cmd->write_data_done += imm_data_len;
1154
1155 if (cmd->write_data_done == cmd->se_cmd.data_length) {
1156 spin_lock_bh(&cmd->istate_lock);
1157 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1158 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1159 spin_unlock_bh(&cmd->istate_lock);
1160 }
1161
1162 sequence_cmd:
1163 rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
1164
1165 if (!rc && dump_payload == false && unsol_data)
1166 iscsit_set_unsoliticed_dataout(cmd);
1167 else if (dump_payload && imm_data)
1168 target_put_sess_cmd(&cmd->se_cmd);
1169
1170 return 0;
1171 }
1172
1173 static int
1174 isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
1175 struct iser_rx_desc *rx_desc, unsigned char *buf)
1176 {
1177 struct scatterlist *sg_start;
1178 struct iscsi_conn *conn = isert_conn->conn;
1179 struct iscsi_cmd *cmd = NULL;
1180 struct iscsi_data *hdr = (struct iscsi_data *)buf;
1181 u32 unsol_data_len = ntoh24(hdr->dlength);
1182 int rc, sg_nents, sg_off, page_off;
1183
1184 rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
1185 if (rc < 0)
1186 return rc;
1187 else if (!cmd)
1188 return 0;
1189 /*
1190 * FIXME: Unexpected unsolicited_data out
1191 */
1192 if (!cmd->unsolicited_data) {
1193 isert_err("Received unexpected solicited data payload\n");
1194 dump_stack();
1195 return -1;
1196 }
1197
1198 isert_dbg("Unsolicited DataOut unsol_data_len: %u, "
1199 "write_data_done: %u, data_length: %u\n",
1200 unsol_data_len, cmd->write_data_done,
1201 cmd->se_cmd.data_length);
1202
1203 sg_off = cmd->write_data_done / PAGE_SIZE;
1204 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1205 sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE));
1206 page_off = cmd->write_data_done % PAGE_SIZE;
1207 /*
1208 * FIXME: Non page-aligned unsolicited_data out
1209 */
1210 if (page_off) {
1211 isert_err("unexpected non-page aligned data payload\n");
1212 dump_stack();
1213 return -1;
1214 }
1215 isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u "
1216 "sg_nents: %u from %p %u\n", sg_start, sg_off,
1217 sg_nents, &rx_desc->data[0], unsol_data_len);
1218
1219 sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
1220 unsol_data_len);
1221
1222 rc = iscsit_check_dataout_payload(cmd, hdr, false);
1223 if (rc < 0)
1224 return rc;
1225
1226 /*
1227 * multiple data-outs on the same command can arrive -
1228 * so post the buffer before hand
1229 */
1230 rc = isert_post_recv(isert_conn, rx_desc);
1231 if (rc) {
1232 isert_err("ib_post_recv failed with %d\n", rc);
1233 return rc;
1234 }
1235 return 0;
1236 }
1237
1238 static int
1239 isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1240 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1241 unsigned char *buf)
1242 {
1243 struct iscsi_conn *conn = isert_conn->conn;
1244 struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
1245 int rc;
1246
1247 rc = iscsit_setup_nop_out(conn, cmd, hdr);
1248 if (rc < 0)
1249 return rc;
1250 /*
1251 * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload
1252 */
1253
1254 return iscsit_process_nop_out(conn, cmd, hdr);
1255 }
1256
1257 static int
1258 isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1259 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1260 struct iscsi_text *hdr)
1261 {
1262 struct iscsi_conn *conn = isert_conn->conn;
1263 u32 payload_length = ntoh24(hdr->dlength);
1264 int rc;
1265 unsigned char *text_in = NULL;
1266
1267 rc = iscsit_setup_text_cmd(conn, cmd, hdr);
1268 if (rc < 0)
1269 return rc;
1270
1271 if (payload_length) {
1272 text_in = kzalloc(payload_length, GFP_KERNEL);
1273 if (!text_in) {
1274 isert_err("Unable to allocate text_in of payload_length: %u\n",
1275 payload_length);
1276 return -ENOMEM;
1277 }
1278 }
1279 cmd->text_in_ptr = text_in;
1280
1281 memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length);
1282
1283 return iscsit_process_text_cmd(conn, cmd, hdr);
1284 }
1285
1286 static int
1287 isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1288 uint32_t read_stag, uint64_t read_va,
1289 uint32_t write_stag, uint64_t write_va)
1290 {
1291 struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
1292 struct iscsi_conn *conn = isert_conn->conn;
1293 struct iscsi_cmd *cmd;
1294 struct isert_cmd *isert_cmd;
1295 int ret = -EINVAL;
1296 u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
1297
1298 if (conn->sess->sess_ops->SessionType &&
1299 (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) {
1300 isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
1301 " ignoring\n", opcode);
1302 return 0;
1303 }
1304
1305 switch (opcode) {
1306 case ISCSI_OP_SCSI_CMD:
1307 cmd = isert_allocate_cmd(conn, rx_desc);
1308 if (!cmd)
1309 break;
1310
1311 isert_cmd = iscsit_priv_cmd(cmd);
1312 isert_cmd->read_stag = read_stag;
1313 isert_cmd->read_va = read_va;
1314 isert_cmd->write_stag = write_stag;
1315 isert_cmd->write_va = write_va;
1316 isert_cmd->inv_rkey = read_stag ? read_stag : write_stag;
1317
1318 ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, cmd,
1319 rx_desc, (unsigned char *)hdr);
1320 break;
1321 case ISCSI_OP_NOOP_OUT:
1322 cmd = isert_allocate_cmd(conn, rx_desc);
1323 if (!cmd)
1324 break;
1325
1326 isert_cmd = iscsit_priv_cmd(cmd);
1327 ret = isert_handle_nop_out(isert_conn, isert_cmd, cmd,
1328 rx_desc, (unsigned char *)hdr);
1329 break;
1330 case ISCSI_OP_SCSI_DATA_OUT:
1331 ret = isert_handle_iscsi_dataout(isert_conn, rx_desc,
1332 (unsigned char *)hdr);
1333 break;
1334 case ISCSI_OP_SCSI_TMFUNC:
1335 cmd = isert_allocate_cmd(conn, rx_desc);
1336 if (!cmd)
1337 break;
1338
1339 ret = iscsit_handle_task_mgt_cmd(conn, cmd,
1340 (unsigned char *)hdr);
1341 break;
1342 case ISCSI_OP_LOGOUT:
1343 cmd = isert_allocate_cmd(conn, rx_desc);
1344 if (!cmd)
1345 break;
1346
1347 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
1348 break;
1349 case ISCSI_OP_TEXT:
1350 if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF)
1351 cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
1352 else
1353 cmd = isert_allocate_cmd(conn, rx_desc);
1354
1355 if (!cmd)
1356 break;
1357
1358 isert_cmd = iscsit_priv_cmd(cmd);
1359 ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd,
1360 rx_desc, (struct iscsi_text *)hdr);
1361 break;
1362 default:
1363 isert_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
1364 dump_stack();
1365 break;
1366 }
1367
1368 return ret;
1369 }
1370
1371 static void
1372 isert_print_wc(struct ib_wc *wc, const char *type)
1373 {
1374 if (wc->status != IB_WC_WR_FLUSH_ERR)
1375 isert_err("%s failure: %s (%d) vend_err %x\n", type,
1376 ib_wc_status_msg(wc->status), wc->status,
1377 wc->vendor_err);
1378 else
1379 isert_dbg("%s failure: %s (%d)\n", type,
1380 ib_wc_status_msg(wc->status), wc->status);
1381 }
1382
1383 static void
1384 isert_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1385 {
1386 struct isert_conn *isert_conn = wc->qp->qp_context;
1387 struct ib_device *ib_dev = isert_conn->cm_id->device;
1388 struct iser_rx_desc *rx_desc = cqe_to_rx_desc(wc->wr_cqe);
1389 struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
1390 struct iser_ctrl *iser_ctrl = &rx_desc->iser_header;
1391 uint64_t read_va = 0, write_va = 0;
1392 uint32_t read_stag = 0, write_stag = 0;
1393
1394 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1395 isert_print_wc(wc, "recv");
1396 if (wc->status != IB_WC_WR_FLUSH_ERR)
1397 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
1398 return;
1399 }
1400
1401 ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr,
1402 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
1403
1404 isert_dbg("DMA: 0x%llx, iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1405 rx_desc->dma_addr, hdr->opcode, hdr->itt, hdr->flags,
1406 (int)(wc->byte_len - ISER_HEADERS_LEN));
1407
1408 switch (iser_ctrl->flags & 0xF0) {
1409 case ISCSI_CTRL:
1410 if (iser_ctrl->flags & ISER_RSV) {
1411 read_stag = be32_to_cpu(iser_ctrl->read_stag);
1412 read_va = be64_to_cpu(iser_ctrl->read_va);
1413 isert_dbg("ISER_RSV: read_stag: 0x%x read_va: 0x%llx\n",
1414 read_stag, (unsigned long long)read_va);
1415 }
1416 if (iser_ctrl->flags & ISER_WSV) {
1417 write_stag = be32_to_cpu(iser_ctrl->write_stag);
1418 write_va = be64_to_cpu(iser_ctrl->write_va);
1419 isert_dbg("ISER_WSV: write_stag: 0x%x write_va: 0x%llx\n",
1420 write_stag, (unsigned long long)write_va);
1421 }
1422
1423 isert_dbg("ISER ISCSI_CTRL PDU\n");
1424 break;
1425 case ISER_HELLO:
1426 isert_err("iSER Hello message\n");
1427 break;
1428 default:
1429 isert_warn("Unknown iSER hdr flags: 0x%02x\n", iser_ctrl->flags);
1430 break;
1431 }
1432
1433 isert_rx_opcode(isert_conn, rx_desc,
1434 read_stag, read_va, write_stag, write_va);
1435
1436 ib_dma_sync_single_for_device(ib_dev, rx_desc->dma_addr,
1437 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
1438 }
1439
1440 static void
1441 isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1442 {
1443 struct isert_conn *isert_conn = wc->qp->qp_context;
1444 struct ib_device *ib_dev = isert_conn->cm_id->device;
1445
1446 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1447 isert_print_wc(wc, "login recv");
1448 return;
1449 }
1450
1451 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_req_dma,
1452 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
1453
1454 isert_conn->login_req_len = wc->byte_len - ISER_HEADERS_LEN;
1455
1456 if (isert_conn->conn) {
1457 struct iscsi_login *login = isert_conn->conn->conn_login;
1458
1459 if (login && !login->first_request)
1460 isert_rx_login_req(isert_conn);
1461 }
1462
1463 mutex_lock(&isert_conn->mutex);
1464 complete(&isert_conn->login_req_comp);
1465 mutex_unlock(&isert_conn->mutex);
1466
1467 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_req_dma,
1468 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
1469 }
1470
1471 static void
1472 isert_rdma_rw_ctx_destroy(struct isert_cmd *cmd, struct isert_conn *conn)
1473 {
1474 struct se_cmd *se_cmd = &cmd->iscsi_cmd->se_cmd;
1475 enum dma_data_direction dir = target_reverse_dma_direction(se_cmd);
1476
1477 if (!cmd->rw.nr_ops)
1478 return;
1479
1480 if (isert_prot_cmd(conn, se_cmd)) {
1481 rdma_rw_ctx_destroy_signature(&cmd->rw, conn->qp,
1482 conn->cm_id->port_num, se_cmd->t_data_sg,
1483 se_cmd->t_data_nents, se_cmd->t_prot_sg,
1484 se_cmd->t_prot_nents, dir);
1485 } else {
1486 rdma_rw_ctx_destroy(&cmd->rw, conn->qp, conn->cm_id->port_num,
1487 se_cmd->t_data_sg, se_cmd->t_data_nents, dir);
1488 }
1489
1490 cmd->rw.nr_ops = 0;
1491 }
1492
1493 static void
1494 isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
1495 {
1496 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1497 struct isert_conn *isert_conn = isert_cmd->conn;
1498 struct iscsi_conn *conn = isert_conn->conn;
1499 struct iscsi_text_rsp *hdr;
1500
1501 isert_dbg("Cmd %p\n", isert_cmd);
1502
1503 switch (cmd->iscsi_opcode) {
1504 case ISCSI_OP_SCSI_CMD:
1505 spin_lock_bh(&conn->cmd_lock);
1506 if (!list_empty(&cmd->i_conn_node))
1507 list_del_init(&cmd->i_conn_node);
1508 spin_unlock_bh(&conn->cmd_lock);
1509
1510 if (cmd->data_direction == DMA_TO_DEVICE) {
1511 iscsit_stop_dataout_timer(cmd);
1512 /*
1513 * Check for special case during comp_err where
1514 * WRITE_PENDING has been handed off from core,
1515 * but requires an extra target_put_sess_cmd()
1516 * before transport_generic_free_cmd() below.
1517 */
1518 if (comp_err &&
1519 cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
1520 struct se_cmd *se_cmd = &cmd->se_cmd;
1521
1522 target_put_sess_cmd(se_cmd);
1523 }
1524 }
1525
1526 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
1527 transport_generic_free_cmd(&cmd->se_cmd, 0);
1528 break;
1529 case ISCSI_OP_SCSI_TMFUNC:
1530 spin_lock_bh(&conn->cmd_lock);
1531 if (!list_empty(&cmd->i_conn_node))
1532 list_del_init(&cmd->i_conn_node);
1533 spin_unlock_bh(&conn->cmd_lock);
1534
1535 transport_generic_free_cmd(&cmd->se_cmd, 0);
1536 break;
1537 case ISCSI_OP_REJECT:
1538 case ISCSI_OP_NOOP_OUT:
1539 case ISCSI_OP_TEXT:
1540 hdr = (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
1541 /* If the continue bit is on, keep the command alive */
1542 if (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)
1543 break;
1544
1545 spin_lock_bh(&conn->cmd_lock);
1546 if (!list_empty(&cmd->i_conn_node))
1547 list_del_init(&cmd->i_conn_node);
1548 spin_unlock_bh(&conn->cmd_lock);
1549
1550 /*
1551 * Handle special case for REJECT when iscsi_add_reject*() has
1552 * overwritten the original iscsi_opcode assignment, and the
1553 * associated cmd->se_cmd needs to be released.
1554 */
1555 if (cmd->se_cmd.se_tfo != NULL) {
1556 isert_dbg("Calling transport_generic_free_cmd for 0x%02x\n",
1557 cmd->iscsi_opcode);
1558 transport_generic_free_cmd(&cmd->se_cmd, 0);
1559 break;
1560 }
1561 /*
1562 * Fall-through
1563 */
1564 default:
1565 iscsit_release_cmd(cmd);
1566 break;
1567 }
1568 }
1569
1570 static void
1571 isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
1572 {
1573 if (tx_desc->dma_addr != 0) {
1574 isert_dbg("unmap single for tx_desc->dma_addr\n");
1575 ib_dma_unmap_single(ib_dev, tx_desc->dma_addr,
1576 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1577 tx_desc->dma_addr = 0;
1578 }
1579 }
1580
1581 static void
1582 isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
1583 struct ib_device *ib_dev, bool comp_err)
1584 {
1585 if (isert_cmd->pdu_buf_dma != 0) {
1586 isert_dbg("unmap single for isert_cmd->pdu_buf_dma\n");
1587 ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma,
1588 isert_cmd->pdu_buf_len, DMA_TO_DEVICE);
1589 isert_cmd->pdu_buf_dma = 0;
1590 }
1591
1592 isert_unmap_tx_desc(tx_desc, ib_dev);
1593 isert_put_cmd(isert_cmd, comp_err);
1594 }
1595
1596 static int
1597 isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr)
1598 {
1599 struct ib_mr_status mr_status;
1600 int ret;
1601
1602 ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
1603 if (ret) {
1604 isert_err("ib_check_mr_status failed, ret %d\n", ret);
1605 goto fail_mr_status;
1606 }
1607
1608 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
1609 u64 sec_offset_err;
1610 u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8;
1611
1612 switch (mr_status.sig_err.err_type) {
1613 case IB_SIG_BAD_GUARD:
1614 se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
1615 break;
1616 case IB_SIG_BAD_REFTAG:
1617 se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1618 break;
1619 case IB_SIG_BAD_APPTAG:
1620 se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
1621 break;
1622 }
1623 sec_offset_err = mr_status.sig_err.sig_err_offset;
1624 do_div(sec_offset_err, block_size);
1625 se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba;
1626
1627 isert_err("PI error found type %d at sector 0x%llx "
1628 "expected 0x%x vs actual 0x%x\n",
1629 mr_status.sig_err.err_type,
1630 (unsigned long long)se_cmd->bad_sector,
1631 mr_status.sig_err.expected,
1632 mr_status.sig_err.actual);
1633 ret = 1;
1634 }
1635
1636 fail_mr_status:
1637 return ret;
1638 }
1639
1640 static void
1641 isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
1642 {
1643 struct isert_conn *isert_conn = wc->qp->qp_context;
1644 struct isert_device *device = isert_conn->device;
1645 struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe);
1646 struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc);
1647 struct se_cmd *cmd = &isert_cmd->iscsi_cmd->se_cmd;
1648 int ret = 0;
1649
1650 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1651 isert_print_wc(wc, "rdma write");
1652 if (wc->status != IB_WC_WR_FLUSH_ERR)
1653 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
1654 isert_completion_put(desc, isert_cmd, device->ib_device, true);
1655 return;
1656 }
1657
1658 isert_dbg("Cmd %p\n", isert_cmd);
1659
1660 ret = isert_check_pi_status(cmd, isert_cmd->rw.sig->sig_mr);
1661 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
1662
1663 if (ret)
1664 transport_send_check_condition_and_sense(cmd, cmd->pi_err, 0);
1665 else
1666 isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd);
1667 }
1668
1669 static void
1670 isert_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
1671 {
1672 struct isert_conn *isert_conn = wc->qp->qp_context;
1673 struct isert_device *device = isert_conn->device;
1674 struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe);
1675 struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc);
1676 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1677 struct se_cmd *se_cmd = &cmd->se_cmd;
1678 int ret = 0;
1679
1680 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1681 isert_print_wc(wc, "rdma read");
1682 if (wc->status != IB_WC_WR_FLUSH_ERR)
1683 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
1684 isert_completion_put(desc, isert_cmd, device->ib_device, true);
1685 return;
1686 }
1687
1688 isert_dbg("Cmd %p\n", isert_cmd);
1689
1690 iscsit_stop_dataout_timer(cmd);
1691
1692 if (isert_prot_cmd(isert_conn, se_cmd))
1693 ret = isert_check_pi_status(se_cmd, isert_cmd->rw.sig->sig_mr);
1694 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
1695 cmd->write_data_done = 0;
1696
1697 isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
1698 spin_lock_bh(&cmd->istate_lock);
1699 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1700 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1701 spin_unlock_bh(&cmd->istate_lock);
1702
1703 if (ret) {
1704 target_put_sess_cmd(se_cmd);
1705 transport_send_check_condition_and_sense(se_cmd,
1706 se_cmd->pi_err, 0);
1707 } else {
1708 target_execute_cmd(se_cmd);
1709 }
1710 }
1711
1712 static void
1713 isert_do_control_comp(struct work_struct *work)
1714 {
1715 struct isert_cmd *isert_cmd = container_of(work,
1716 struct isert_cmd, comp_work);
1717 struct isert_conn *isert_conn = isert_cmd->conn;
1718 struct ib_device *ib_dev = isert_conn->cm_id->device;
1719 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1720
1721 isert_dbg("Cmd %p i_state %d\n", isert_cmd, cmd->i_state);
1722
1723 switch (cmd->i_state) {
1724 case ISTATE_SEND_TASKMGTRSP:
1725 iscsit_tmr_post_handler(cmd, cmd->conn);
1726 case ISTATE_SEND_REJECT: /* FALLTHRU */
1727 case ISTATE_SEND_TEXTRSP: /* FALLTHRU */
1728 cmd->i_state = ISTATE_SENT_STATUS;
1729 isert_completion_put(&isert_cmd->tx_desc, isert_cmd,
1730 ib_dev, false);
1731 break;
1732 case ISTATE_SEND_LOGOUTRSP:
1733 iscsit_logout_post_handler(cmd, cmd->conn);
1734 break;
1735 default:
1736 isert_err("Unknown i_state %d\n", cmd->i_state);
1737 dump_stack();
1738 break;
1739 }
1740 }
1741
1742 static void
1743 isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc)
1744 {
1745 struct isert_conn *isert_conn = wc->qp->qp_context;
1746 struct ib_device *ib_dev = isert_conn->cm_id->device;
1747 struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe);
1748
1749 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1750 isert_print_wc(wc, "login send");
1751 if (wc->status != IB_WC_WR_FLUSH_ERR)
1752 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
1753 }
1754
1755 isert_unmap_tx_desc(tx_desc, ib_dev);
1756 }
1757
1758 static void
1759 isert_send_done(struct ib_cq *cq, struct ib_wc *wc)
1760 {
1761 struct isert_conn *isert_conn = wc->qp->qp_context;
1762 struct ib_device *ib_dev = isert_conn->cm_id->device;
1763 struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe);
1764 struct isert_cmd *isert_cmd = tx_desc_to_cmd(tx_desc);
1765
1766 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1767 isert_print_wc(wc, "send");
1768 if (wc->status != IB_WC_WR_FLUSH_ERR)
1769 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
1770 isert_completion_put(tx_desc, isert_cmd, ib_dev, true);
1771 return;
1772 }
1773
1774 isert_dbg("Cmd %p\n", isert_cmd);
1775
1776 switch (isert_cmd->iscsi_cmd->i_state) {
1777 case ISTATE_SEND_TASKMGTRSP:
1778 case ISTATE_SEND_LOGOUTRSP:
1779 case ISTATE_SEND_REJECT:
1780 case ISTATE_SEND_TEXTRSP:
1781 isert_unmap_tx_desc(tx_desc, ib_dev);
1782
1783 INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
1784 queue_work(isert_comp_wq, &isert_cmd->comp_work);
1785 return;
1786 default:
1787 isert_cmd->iscsi_cmd->i_state = ISTATE_SENT_STATUS;
1788 isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
1789 break;
1790 }
1791 }
1792
1793 static int
1794 isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
1795 {
1796 struct ib_send_wr *wr_failed;
1797 int ret;
1798
1799 ret = isert_post_recv(isert_conn, isert_cmd->rx_desc);
1800 if (ret) {
1801 isert_err("ib_post_recv failed with %d\n", ret);
1802 return ret;
1803 }
1804
1805 ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr,
1806 &wr_failed);
1807 if (ret) {
1808 isert_err("ib_post_send failed with %d\n", ret);
1809 return ret;
1810 }
1811 return ret;
1812 }
1813
1814 static int
1815 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
1816 {
1817 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1818 struct isert_conn *isert_conn = conn->context;
1819 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1820 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)
1821 &isert_cmd->tx_desc.iscsi_header;
1822
1823 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1824 iscsit_build_rsp_pdu(cmd, conn, true, hdr);
1825 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1826 /*
1827 * Attach SENSE DATA payload to iSCSI Response PDU
1828 */
1829 if (cmd->se_cmd.sense_buffer &&
1830 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
1831 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
1832 struct isert_device *device = isert_conn->device;
1833 struct ib_device *ib_dev = device->ib_device;
1834 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
1835 u32 padding, pdu_len;
1836
1837 put_unaligned_be16(cmd->se_cmd.scsi_sense_length,
1838 cmd->sense_buffer);
1839 cmd->se_cmd.scsi_sense_length += sizeof(__be16);
1840
1841 padding = -(cmd->se_cmd.scsi_sense_length) & 3;
1842 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
1843 pdu_len = cmd->se_cmd.scsi_sense_length + padding;
1844
1845 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
1846 (void *)cmd->sense_buffer, pdu_len,
1847 DMA_TO_DEVICE);
1848
1849 isert_cmd->pdu_buf_len = pdu_len;
1850 tx_dsg->addr = isert_cmd->pdu_buf_dma;
1851 tx_dsg->length = pdu_len;
1852 tx_dsg->lkey = device->pd->local_dma_lkey;
1853 isert_cmd->tx_desc.num_sge = 2;
1854 }
1855
1856 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
1857
1858 isert_dbg("Posting SCSI Response\n");
1859
1860 return isert_post_response(isert_conn, isert_cmd);
1861 }
1862
1863 static void
1864 isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
1865 {
1866 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1867 struct isert_conn *isert_conn = conn->context;
1868
1869 spin_lock_bh(&conn->cmd_lock);
1870 if (!list_empty(&cmd->i_conn_node))
1871 list_del_init(&cmd->i_conn_node);
1872 spin_unlock_bh(&conn->cmd_lock);
1873
1874 if (cmd->data_direction == DMA_TO_DEVICE)
1875 iscsit_stop_dataout_timer(cmd);
1876 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
1877 }
1878
1879 static enum target_prot_op
1880 isert_get_sup_prot_ops(struct iscsi_conn *conn)
1881 {
1882 struct isert_conn *isert_conn = conn->context;
1883 struct isert_device *device = isert_conn->device;
1884
1885 if (conn->tpg->tpg_attrib.t10_pi) {
1886 if (device->pi_capable) {
1887 isert_info("conn %p PI offload enabled\n", isert_conn);
1888 isert_conn->pi_support = true;
1889 return TARGET_PROT_ALL;
1890 }
1891 }
1892
1893 isert_info("conn %p PI offload disabled\n", isert_conn);
1894 isert_conn->pi_support = false;
1895
1896 return TARGET_PROT_NORMAL;
1897 }
1898
1899 static int
1900 isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
1901 bool nopout_response)
1902 {
1903 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1904 struct isert_conn *isert_conn = conn->context;
1905 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1906
1907 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1908 iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *)
1909 &isert_cmd->tx_desc.iscsi_header,
1910 nopout_response);
1911 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1912 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
1913
1914 isert_dbg("conn %p Posting NOPIN Response\n", isert_conn);
1915
1916 return isert_post_response(isert_conn, isert_cmd);
1917 }
1918
1919 static int
1920 isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1921 {
1922 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1923 struct isert_conn *isert_conn = conn->context;
1924 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1925
1926 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1927 iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
1928 &isert_cmd->tx_desc.iscsi_header);
1929 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1930 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
1931
1932 isert_dbg("conn %p Posting Logout Response\n", isert_conn);
1933
1934 return isert_post_response(isert_conn, isert_cmd);
1935 }
1936
1937 static int
1938 isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1939 {
1940 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1941 struct isert_conn *isert_conn = conn->context;
1942 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1943
1944 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1945 iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
1946 &isert_cmd->tx_desc.iscsi_header);
1947 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1948 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
1949
1950 isert_dbg("conn %p Posting Task Management Response\n", isert_conn);
1951
1952 return isert_post_response(isert_conn, isert_cmd);
1953 }
1954
1955 static int
1956 isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1957 {
1958 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1959 struct isert_conn *isert_conn = conn->context;
1960 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1961 struct isert_device *device = isert_conn->device;
1962 struct ib_device *ib_dev = device->ib_device;
1963 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
1964 struct iscsi_reject *hdr =
1965 (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header;
1966
1967 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1968 iscsit_build_reject(cmd, conn, hdr);
1969 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1970
1971 hton24(hdr->dlength, ISCSI_HDR_LEN);
1972 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
1973 (void *)cmd->buf_ptr, ISCSI_HDR_LEN,
1974 DMA_TO_DEVICE);
1975 isert_cmd->pdu_buf_len = ISCSI_HDR_LEN;
1976 tx_dsg->addr = isert_cmd->pdu_buf_dma;
1977 tx_dsg->length = ISCSI_HDR_LEN;
1978 tx_dsg->lkey = device->pd->local_dma_lkey;
1979 isert_cmd->tx_desc.num_sge = 2;
1980
1981 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
1982
1983 isert_dbg("conn %p Posting Reject\n", isert_conn);
1984
1985 return isert_post_response(isert_conn, isert_cmd);
1986 }
1987
1988 static int
1989 isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1990 {
1991 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1992 struct isert_conn *isert_conn = conn->context;
1993 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1994 struct iscsi_text_rsp *hdr =
1995 (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
1996 u32 txt_rsp_len;
1997 int rc;
1998
1999 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2000 rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_INFINIBAND);
2001 if (rc < 0)
2002 return rc;
2003
2004 txt_rsp_len = rc;
2005 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2006
2007 if (txt_rsp_len) {
2008 struct isert_device *device = isert_conn->device;
2009 struct ib_device *ib_dev = device->ib_device;
2010 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
2011 void *txt_rsp_buf = cmd->buf_ptr;
2012
2013 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
2014 txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE);
2015
2016 isert_cmd->pdu_buf_len = txt_rsp_len;
2017 tx_dsg->addr = isert_cmd->pdu_buf_dma;
2018 tx_dsg->length = txt_rsp_len;
2019 tx_dsg->lkey = device->pd->local_dma_lkey;
2020 isert_cmd->tx_desc.num_sge = 2;
2021 }
2022 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2023
2024 isert_dbg("conn %p Text Response\n", isert_conn);
2025
2026 return isert_post_response(isert_conn, isert_cmd);
2027 }
2028
2029 static inline void
2030 isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs,
2031 struct ib_sig_domain *domain)
2032 {
2033 domain->sig_type = IB_SIG_TYPE_T10_DIF;
2034 domain->sig.dif.bg_type = IB_T10DIF_CRC;
2035 domain->sig.dif.pi_interval = se_cmd->se_dev->dev_attrib.block_size;
2036 domain->sig.dif.ref_tag = se_cmd->reftag_seed;
2037 /*
2038 * At the moment we hard code those, but if in the future
2039 * the target core would like to use it, we will take it
2040 * from se_cmd.
2041 */
2042 domain->sig.dif.apptag_check_mask = 0xffff;
2043 domain->sig.dif.app_escape = true;
2044 domain->sig.dif.ref_escape = true;
2045 if (se_cmd->prot_type == TARGET_DIF_TYPE1_PROT ||
2046 se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)
2047 domain->sig.dif.ref_remap = true;
2048 };
2049
2050 static int
2051 isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
2052 {
2053 memset(sig_attrs, 0, sizeof(*sig_attrs));
2054
2055 switch (se_cmd->prot_op) {
2056 case TARGET_PROT_DIN_INSERT:
2057 case TARGET_PROT_DOUT_STRIP:
2058 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
2059 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire);
2060 break;
2061 case TARGET_PROT_DOUT_INSERT:
2062 case TARGET_PROT_DIN_STRIP:
2063 sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
2064 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
2065 break;
2066 case TARGET_PROT_DIN_PASS:
2067 case TARGET_PROT_DOUT_PASS:
2068 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire);
2069 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
2070 break;
2071 default:
2072 isert_err("Unsupported PI operation %d\n", se_cmd->prot_op);
2073 return -EINVAL;
2074 }
2075
2076 sig_attrs->check_mask =
2077 (se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD ? 0xc0 : 0) |
2078 (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) |
2079 (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0);
2080 return 0;
2081 }
2082
2083 static int
2084 isert_rdma_rw_ctx_post(struct isert_cmd *cmd, struct isert_conn *conn,
2085 struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
2086 {
2087 struct se_cmd *se_cmd = &cmd->iscsi_cmd->se_cmd;
2088 enum dma_data_direction dir = target_reverse_dma_direction(se_cmd);
2089 u8 port_num = conn->cm_id->port_num;
2090 u64 addr;
2091 u32 rkey, offset;
2092 int ret;
2093
2094 if (dir == DMA_FROM_DEVICE) {
2095 addr = cmd->write_va;
2096 rkey = cmd->write_stag;
2097 offset = cmd->iscsi_cmd->write_data_done;
2098 } else {
2099 addr = cmd->read_va;
2100 rkey = cmd->read_stag;
2101 offset = 0;
2102 }
2103
2104 if (isert_prot_cmd(conn, se_cmd)) {
2105 struct ib_sig_attrs sig_attrs;
2106
2107 ret = isert_set_sig_attrs(se_cmd, &sig_attrs);
2108 if (ret)
2109 return ret;
2110
2111 WARN_ON_ONCE(offset);
2112 ret = rdma_rw_ctx_signature_init(&cmd->rw, conn->qp, port_num,
2113 se_cmd->t_data_sg, se_cmd->t_data_nents,
2114 se_cmd->t_prot_sg, se_cmd->t_prot_nents,
2115 &sig_attrs, addr, rkey, dir);
2116 } else {
2117 ret = rdma_rw_ctx_init(&cmd->rw, conn->qp, port_num,
2118 se_cmd->t_data_sg, se_cmd->t_data_nents,
2119 offset, addr, rkey, dir);
2120 }
2121 if (ret < 0) {
2122 isert_err("Cmd: %p failed to prepare RDMA res\n", cmd);
2123 return ret;
2124 }
2125
2126 ret = rdma_rw_ctx_post(&cmd->rw, conn->qp, port_num, cqe, chain_wr);
2127 if (ret < 0)
2128 isert_err("Cmd: %p failed to post RDMA res\n", cmd);
2129 return ret;
2130 }
2131
2132 static int
2133 isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2134 {
2135 struct se_cmd *se_cmd = &cmd->se_cmd;
2136 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2137 struct isert_conn *isert_conn = conn->context;
2138 struct ib_cqe *cqe = NULL;
2139 struct ib_send_wr *chain_wr = NULL;
2140 int rc;
2141
2142 isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n",
2143 isert_cmd, se_cmd->data_length);
2144
2145 if (isert_prot_cmd(isert_conn, se_cmd)) {
2146 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_write_done;
2147 cqe = &isert_cmd->tx_desc.tx_cqe;
2148 } else {
2149 /*
2150 * Build isert_conn->tx_desc for iSCSI response PDU and attach
2151 */
2152 isert_create_send_desc(isert_conn, isert_cmd,
2153 &isert_cmd->tx_desc);
2154 iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
2155 &isert_cmd->tx_desc.iscsi_header);
2156 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2157 isert_init_send_wr(isert_conn, isert_cmd,
2158 &isert_cmd->tx_desc.send_wr);
2159
2160 rc = isert_post_recv(isert_conn, isert_cmd->rx_desc);
2161 if (rc) {
2162 isert_err("ib_post_recv failed with %d\n", rc);
2163 return rc;
2164 }
2165
2166 chain_wr = &isert_cmd->tx_desc.send_wr;
2167 }
2168
2169 isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr);
2170 isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n", isert_cmd);
2171 return 1;
2172 }
2173
2174 static int
2175 isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
2176 {
2177 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2178
2179 isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
2180 isert_cmd, cmd->se_cmd.data_length, cmd->write_data_done);
2181
2182 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done;
2183 isert_rdma_rw_ctx_post(isert_cmd, conn->context,
2184 &isert_cmd->tx_desc.tx_cqe, NULL);
2185
2186 isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
2187 isert_cmd);
2188 return 0;
2189 }
2190
2191 static int
2192 isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2193 {
2194 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2195 int ret = 0;
2196
2197 switch (state) {
2198 case ISTATE_REMOVE:
2199 spin_lock_bh(&conn->cmd_lock);
2200 list_del_init(&cmd->i_conn_node);
2201 spin_unlock_bh(&conn->cmd_lock);
2202 isert_put_cmd(isert_cmd, true);
2203 break;
2204 case ISTATE_SEND_NOPIN_WANT_RESPONSE:
2205 ret = isert_put_nopin(cmd, conn, false);
2206 break;
2207 default:
2208 isert_err("Unknown immediate state: 0x%02x\n", state);
2209 ret = -EINVAL;
2210 break;
2211 }
2212
2213 return ret;
2214 }
2215
2216 static int
2217 isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2218 {
2219 struct isert_conn *isert_conn = conn->context;
2220 int ret;
2221
2222 switch (state) {
2223 case ISTATE_SEND_LOGOUTRSP:
2224 ret = isert_put_logout_rsp(cmd, conn);
2225 if (!ret)
2226 isert_conn->logout_posted = true;
2227 break;
2228 case ISTATE_SEND_NOPIN:
2229 ret = isert_put_nopin(cmd, conn, true);
2230 break;
2231 case ISTATE_SEND_TASKMGTRSP:
2232 ret = isert_put_tm_rsp(cmd, conn);
2233 break;
2234 case ISTATE_SEND_REJECT:
2235 ret = isert_put_reject(cmd, conn);
2236 break;
2237 case ISTATE_SEND_TEXTRSP:
2238 ret = isert_put_text_rsp(cmd, conn);
2239 break;
2240 case ISTATE_SEND_STATUS:
2241 /*
2242 * Special case for sending non GOOD SCSI status from TX thread
2243 * context during pre se_cmd excecution failure.
2244 */
2245 ret = isert_put_response(conn, cmd);
2246 break;
2247 default:
2248 isert_err("Unknown response state: 0x%02x\n", state);
2249 ret = -EINVAL;
2250 break;
2251 }
2252
2253 return ret;
2254 }
2255
2256 struct rdma_cm_id *
2257 isert_setup_id(struct isert_np *isert_np)
2258 {
2259 struct iscsi_np *np = isert_np->np;
2260 struct rdma_cm_id *id;
2261 struct sockaddr *sa;
2262 int ret;
2263
2264 sa = (struct sockaddr *)&np->np_sockaddr;
2265 isert_dbg("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa);
2266
2267 id = rdma_create_id(&init_net, isert_cma_handler, isert_np,
2268 RDMA_PS_TCP, IB_QPT_RC);
2269 if (IS_ERR(id)) {
2270 isert_err("rdma_create_id() failed: %ld\n", PTR_ERR(id));
2271 ret = PTR_ERR(id);
2272 goto out;
2273 }
2274 isert_dbg("id %p context %p\n", id, id->context);
2275
2276 ret = rdma_bind_addr(id, sa);
2277 if (ret) {
2278 isert_err("rdma_bind_addr() failed: %d\n", ret);
2279 goto out_id;
2280 }
2281
2282 ret = rdma_listen(id, 0);
2283 if (ret) {
2284 isert_err("rdma_listen() failed: %d\n", ret);
2285 goto out_id;
2286 }
2287
2288 return id;
2289 out_id:
2290 rdma_destroy_id(id);
2291 out:
2292 return ERR_PTR(ret);
2293 }
2294
2295 static int
2296 isert_setup_np(struct iscsi_np *np,
2297 struct sockaddr_storage *ksockaddr)
2298 {
2299 struct isert_np *isert_np;
2300 struct rdma_cm_id *isert_lid;
2301 int ret;
2302
2303 isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
2304 if (!isert_np) {
2305 isert_err("Unable to allocate struct isert_np\n");
2306 return -ENOMEM;
2307 }
2308 sema_init(&isert_np->sem, 0);
2309 mutex_init(&isert_np->mutex);
2310 INIT_LIST_HEAD(&isert_np->accepted);
2311 INIT_LIST_HEAD(&isert_np->pending);
2312 isert_np->np = np;
2313
2314 /*
2315 * Setup the np->np_sockaddr from the passed sockaddr setup
2316 * in iscsi_target_configfs.c code..
2317 */
2318 memcpy(&np->np_sockaddr, ksockaddr,
2319 sizeof(struct sockaddr_storage));
2320
2321 isert_lid = isert_setup_id(isert_np);
2322 if (IS_ERR(isert_lid)) {
2323 ret = PTR_ERR(isert_lid);
2324 goto out;
2325 }
2326
2327 isert_np->cm_id = isert_lid;
2328 np->np_context = isert_np;
2329
2330 return 0;
2331
2332 out:
2333 kfree(isert_np);
2334
2335 return ret;
2336 }
2337
2338 static int
2339 isert_rdma_accept(struct isert_conn *isert_conn)
2340 {
2341 struct rdma_cm_id *cm_id = isert_conn->cm_id;
2342 struct rdma_conn_param cp;
2343 int ret;
2344 struct iser_cm_hdr rsp_hdr;
2345
2346 memset(&cp, 0, sizeof(struct rdma_conn_param));
2347 cp.initiator_depth = isert_conn->initiator_depth;
2348 cp.retry_count = 7;
2349 cp.rnr_retry_count = 7;
2350
2351 memset(&rsp_hdr, 0, sizeof(rsp_hdr));
2352 rsp_hdr.flags = ISERT_ZBVA_NOT_USED;
2353 if (!isert_conn->snd_w_inv)
2354 rsp_hdr.flags = rsp_hdr.flags | ISERT_SEND_W_INV_NOT_USED;
2355 cp.private_data = (void *)&rsp_hdr;
2356 cp.private_data_len = sizeof(rsp_hdr);
2357
2358 ret = rdma_accept(cm_id, &cp);
2359 if (ret) {
2360 isert_err("rdma_accept() failed with: %d\n", ret);
2361 return ret;
2362 }
2363
2364 return 0;
2365 }
2366
2367 static int
2368 isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
2369 {
2370 struct isert_conn *isert_conn = conn->context;
2371 int ret;
2372
2373 isert_info("before login_req comp conn: %p\n", isert_conn);
2374 ret = wait_for_completion_interruptible(&isert_conn->login_req_comp);
2375 if (ret) {
2376 isert_err("isert_conn %p interrupted before got login req\n",
2377 isert_conn);
2378 return ret;
2379 }
2380 reinit_completion(&isert_conn->login_req_comp);
2381
2382 /*
2383 * For login requests after the first PDU, isert_rx_login_req() will
2384 * kick schedule_delayed_work(&conn->login_work) as the packet is
2385 * received, which turns this callback from iscsi_target_do_login_rx()
2386 * into a NOP.
2387 */
2388 if (!login->first_request)
2389 return 0;
2390
2391 isert_rx_login_req(isert_conn);
2392
2393 isert_info("before login_comp conn: %p\n", conn);
2394 ret = wait_for_completion_interruptible(&isert_conn->login_comp);
2395 if (ret)
2396 return ret;
2397
2398 isert_info("processing login->req: %p\n", login->req);
2399
2400 return 0;
2401 }
2402
2403 static void
2404 isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
2405 struct isert_conn *isert_conn)
2406 {
2407 struct rdma_cm_id *cm_id = isert_conn->cm_id;
2408 struct rdma_route *cm_route = &cm_id->route;
2409
2410 conn->login_family = np->np_sockaddr.ss_family;
2411
2412 conn->login_sockaddr = cm_route->addr.dst_addr;
2413 conn->local_sockaddr = cm_route->addr.src_addr;
2414 }
2415
2416 static int
2417 isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
2418 {
2419 struct isert_np *isert_np = np->np_context;
2420 struct isert_conn *isert_conn;
2421 int ret;
2422
2423 accept_wait:
2424 ret = down_interruptible(&isert_np->sem);
2425 if (ret)
2426 return -ENODEV;
2427
2428 spin_lock_bh(&np->np_thread_lock);
2429 if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
2430 spin_unlock_bh(&np->np_thread_lock);
2431 isert_dbg("np_thread_state %d\n",
2432 np->np_thread_state);
2433 /**
2434 * No point in stalling here when np_thread
2435 * is in state RESET/SHUTDOWN/EXIT - bail
2436 **/
2437 return -ENODEV;
2438 }
2439 spin_unlock_bh(&np->np_thread_lock);
2440
2441 mutex_lock(&isert_np->mutex);
2442 if (list_empty(&isert_np->pending)) {
2443 mutex_unlock(&isert_np->mutex);
2444 goto accept_wait;
2445 }
2446 isert_conn = list_first_entry(&isert_np->pending,
2447 struct isert_conn, node);
2448 list_del_init(&isert_conn->node);
2449 mutex_unlock(&isert_np->mutex);
2450
2451 conn->context = isert_conn;
2452 isert_conn->conn = conn;
2453 isert_conn->state = ISER_CONN_BOUND;
2454
2455 isert_set_conn_info(np, conn, isert_conn);
2456
2457 isert_dbg("Processing isert_conn: %p\n", isert_conn);
2458
2459 return 0;
2460 }
2461
2462 static void
2463 isert_free_np(struct iscsi_np *np)
2464 {
2465 struct isert_np *isert_np = np->np_context;
2466 struct isert_conn *isert_conn, *n;
2467
2468 if (isert_np->cm_id)
2469 rdma_destroy_id(isert_np->cm_id);
2470
2471 /*
2472 * FIXME: At this point we don't have a good way to insure
2473 * that at this point we don't have hanging connections that
2474 * completed RDMA establishment but didn't start iscsi login
2475 * process. So work-around this by cleaning up what ever piled
2476 * up in accepted and pending lists.
2477 */
2478 mutex_lock(&isert_np->mutex);
2479 if (!list_empty(&isert_np->pending)) {
2480 isert_info("Still have isert pending connections\n");
2481 list_for_each_entry_safe(isert_conn, n,
2482 &isert_np->pending,
2483 node) {
2484 isert_info("cleaning isert_conn %p state (%d)\n",
2485 isert_conn, isert_conn->state);
2486 isert_connect_release(isert_conn);
2487 }
2488 }
2489
2490 if (!list_empty(&isert_np->accepted)) {
2491 isert_info("Still have isert accepted connections\n");
2492 list_for_each_entry_safe(isert_conn, n,
2493 &isert_np->accepted,
2494 node) {
2495 isert_info("cleaning isert_conn %p state (%d)\n",
2496 isert_conn, isert_conn->state);
2497 isert_connect_release(isert_conn);
2498 }
2499 }
2500 mutex_unlock(&isert_np->mutex);
2501
2502 np->np_context = NULL;
2503 kfree(isert_np);
2504 }
2505
2506 static void isert_release_work(struct work_struct *work)
2507 {
2508 struct isert_conn *isert_conn = container_of(work,
2509 struct isert_conn,
2510 release_work);
2511
2512 isert_info("Starting release conn %p\n", isert_conn);
2513
2514 mutex_lock(&isert_conn->mutex);
2515 isert_conn->state = ISER_CONN_DOWN;
2516 mutex_unlock(&isert_conn->mutex);
2517
2518 isert_info("Destroying conn %p\n", isert_conn);
2519 isert_put_conn(isert_conn);
2520 }
2521
2522 static void
2523 isert_wait4logout(struct isert_conn *isert_conn)
2524 {
2525 struct iscsi_conn *conn = isert_conn->conn;
2526
2527 isert_info("conn %p\n", isert_conn);
2528
2529 if (isert_conn->logout_posted) {
2530 isert_info("conn %p wait for conn_logout_comp\n", isert_conn);
2531 wait_for_completion_timeout(&conn->conn_logout_comp,
2532 SECONDS_FOR_LOGOUT_COMP * HZ);
2533 }
2534 }
2535
2536 static void
2537 isert_wait4cmds(struct iscsi_conn *conn)
2538 {
2539 isert_info("iscsi_conn %p\n", conn);
2540
2541 if (conn->sess) {
2542 target_sess_cmd_list_set_waiting(conn->sess->se_sess);
2543 target_wait_for_sess_cmds(conn->sess->se_sess);
2544 }
2545 }
2546
2547 /**
2548 * isert_put_unsol_pending_cmds() - Drop commands waiting for
2549 * unsolicitate dataout
2550 * @conn: iscsi connection
2551 *
2552 * We might still have commands that are waiting for unsolicited
2553 * dataouts messages. We must put the extra reference on those
2554 * before blocking on the target_wait_for_session_cmds
2555 */
2556 static void
2557 isert_put_unsol_pending_cmds(struct iscsi_conn *conn)
2558 {
2559 struct iscsi_cmd *cmd, *tmp;
2560 static LIST_HEAD(drop_cmd_list);
2561
2562 spin_lock_bh(&conn->cmd_lock);
2563 list_for_each_entry_safe(cmd, tmp, &conn->conn_cmd_list, i_conn_node) {
2564 if ((cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA) &&
2565 (cmd->write_data_done < conn->sess->sess_ops->FirstBurstLength) &&
2566 (cmd->write_data_done < cmd->se_cmd.data_length))
2567 list_move_tail(&cmd->i_conn_node, &drop_cmd_list);
2568 }
2569 spin_unlock_bh(&conn->cmd_lock);
2570
2571 list_for_each_entry_safe(cmd, tmp, &drop_cmd_list, i_conn_node) {
2572 list_del_init(&cmd->i_conn_node);
2573 if (cmd->i_state != ISTATE_REMOVE) {
2574 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2575
2576 isert_info("conn %p dropping cmd %p\n", conn, cmd);
2577 isert_put_cmd(isert_cmd, true);
2578 }
2579 }
2580 }
2581
2582 static void isert_wait_conn(struct iscsi_conn *conn)
2583 {
2584 struct isert_conn *isert_conn = conn->context;
2585
2586 isert_info("Starting conn %p\n", isert_conn);
2587
2588 mutex_lock(&isert_conn->mutex);
2589 isert_conn_terminate(isert_conn);
2590 mutex_unlock(&isert_conn->mutex);
2591
2592 ib_drain_qp(isert_conn->qp);
2593 isert_put_unsol_pending_cmds(conn);
2594 isert_wait4cmds(conn);
2595 isert_wait4logout(isert_conn);
2596
2597 queue_work(isert_release_wq, &isert_conn->release_work);
2598 }
2599
2600 static void isert_free_conn(struct iscsi_conn *conn)
2601 {
2602 struct isert_conn *isert_conn = conn->context;
2603
2604 ib_drain_qp(isert_conn->qp);
2605 isert_put_conn(isert_conn);
2606 }
2607
2608 static void isert_get_rx_pdu(struct iscsi_conn *conn)
2609 {
2610 struct completion comp;
2611
2612 init_completion(&comp);
2613
2614 wait_for_completion_interruptible(&comp);
2615 }
2616
2617 static struct iscsit_transport iser_target_transport = {
2618 .name = "IB/iSER",
2619 .transport_type = ISCSI_INFINIBAND,
2620 .rdma_shutdown = true,
2621 .priv_size = sizeof(struct isert_cmd),
2622 .owner = THIS_MODULE,
2623 .iscsit_setup_np = isert_setup_np,
2624 .iscsit_accept_np = isert_accept_np,
2625 .iscsit_free_np = isert_free_np,
2626 .iscsit_wait_conn = isert_wait_conn,
2627 .iscsit_free_conn = isert_free_conn,
2628 .iscsit_get_login_rx = isert_get_login_rx,
2629 .iscsit_put_login_tx = isert_put_login_tx,
2630 .iscsit_immediate_queue = isert_immediate_queue,
2631 .iscsit_response_queue = isert_response_queue,
2632 .iscsit_get_dataout = isert_get_dataout,
2633 .iscsit_queue_data_in = isert_put_datain,
2634 .iscsit_queue_status = isert_put_response,
2635 .iscsit_aborted_task = isert_aborted_task,
2636 .iscsit_get_rx_pdu = isert_get_rx_pdu,
2637 .iscsit_get_sup_prot_ops = isert_get_sup_prot_ops,
2638 };
2639
2640 static int __init isert_init(void)
2641 {
2642 int ret;
2643
2644 isert_comp_wq = alloc_workqueue("isert_comp_wq",
2645 WQ_UNBOUND | WQ_HIGHPRI, 0);
2646 if (!isert_comp_wq) {
2647 isert_err("Unable to allocate isert_comp_wq\n");
2648 ret = -ENOMEM;
2649 return -ENOMEM;
2650 }
2651
2652 isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND,
2653 WQ_UNBOUND_MAX_ACTIVE);
2654 if (!isert_release_wq) {
2655 isert_err("Unable to allocate isert_release_wq\n");
2656 ret = -ENOMEM;
2657 goto destroy_comp_wq;
2658 }
2659
2660 iscsit_register_transport(&iser_target_transport);
2661 isert_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
2662
2663 return 0;
2664
2665 destroy_comp_wq:
2666 destroy_workqueue(isert_comp_wq);
2667
2668 return ret;
2669 }
2670
2671 static void __exit isert_exit(void)
2672 {
2673 flush_scheduled_work();
2674 destroy_workqueue(isert_release_wq);
2675 destroy_workqueue(isert_comp_wq);
2676 iscsit_unregister_transport(&iser_target_transport);
2677 isert_info("iSER_TARGET[0] - Released iser_target_transport\n");
2678 }
2679
2680 MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
2681 MODULE_VERSION("1.0");
2682 MODULE_AUTHOR("nab@Linux-iSCSI.org");
2683 MODULE_LICENSE("GPL");
2684
2685 module_init(isert_init);
2686 module_exit(isert_exit);
2687
2688
2689
2690
2691
2692 /* LDV_COMMENT_BEGIN_MAIN */
2693 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful
2694
2695 /*###########################################################################*/
2696
2697 /*############## Driver Environment Generator 0.2 output ####################*/
2698
2699 /*###########################################################################*/
2700
2701
2702
2703 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
2704 void ldv_check_final_state(void);
2705
2706 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
2707 void ldv_check_return_value(int res);
2708
2709 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
2710 void ldv_check_return_value_probe(int res);
2711
2712 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
2713 void ldv_initialize(void);
2714
2715 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
2716 void ldv_handler_precall(void);
2717
2718 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
2719 int nondet_int(void);
2720
2721 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
2722 int LDV_IN_INTERRUPT;
2723
2724 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
2725 void ldv_main0_sequence_infinite_withcheck_stateful(void) {
2726
2727
2728
2729 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
2730 /*============================= VARIABLE DECLARATION PART =============================*/
2731 /** STRUCT: struct type: iscsit_transport, struct name: iser_target_transport **/
2732 /* content: static int isert_setup_np(struct iscsi_np *np, struct sockaddr_storage *ksockaddr)*/
2733 /* LDV_COMMENT_BEGIN_PREP */
2734 #define ISERT_MAX_CONN 8
2735 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
2736 #define ISER_MAX_TX_CQ_LEN \
2737 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
2738 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
2739 ISERT_MAX_CONN)
2740 /* LDV_COMMENT_END_PREP */
2741 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "isert_setup_np" */
2742 struct iscsi_np * var_group1;
2743 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "isert_setup_np" */
2744 struct sockaddr_storage * var_group2;
2745 /* content: static int isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)*/
2746 /* LDV_COMMENT_BEGIN_PREP */
2747 #define ISERT_MAX_CONN 8
2748 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
2749 #define ISER_MAX_TX_CQ_LEN \
2750 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
2751 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
2752 ISERT_MAX_CONN)
2753 /* LDV_COMMENT_END_PREP */
2754 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "isert_accept_np" */
2755 struct iscsi_conn * var_group3;
2756 /* content: static void isert_free_np(struct iscsi_np *np)*/
2757 /* LDV_COMMENT_BEGIN_PREP */
2758 #define ISERT_MAX_CONN 8
2759 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
2760 #define ISER_MAX_TX_CQ_LEN \
2761 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
2762 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
2763 ISERT_MAX_CONN)
2764 /* LDV_COMMENT_END_PREP */
2765 /* content: static void isert_wait_conn(struct iscsi_conn *conn)*/
2766 /* LDV_COMMENT_BEGIN_PREP */
2767 #define ISERT_MAX_CONN 8
2768 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
2769 #define ISER_MAX_TX_CQ_LEN \
2770 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
2771 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
2772 ISERT_MAX_CONN)
2773 /* LDV_COMMENT_END_PREP */
2774 /* content: static void isert_free_conn(struct iscsi_conn *conn)*/
2775 /* LDV_COMMENT_BEGIN_PREP */
2776 #define ISERT_MAX_CONN 8
2777 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
2778 #define ISER_MAX_TX_CQ_LEN \
2779 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
2780 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
2781 ISERT_MAX_CONN)
2782 /* LDV_COMMENT_END_PREP */
2783 /* content: static int isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)*/
2784 /* LDV_COMMENT_BEGIN_PREP */
2785 #define ISERT_MAX_CONN 8
2786 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
2787 #define ISER_MAX_TX_CQ_LEN \
2788 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
2789 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
2790 ISERT_MAX_CONN)
2791 /* LDV_COMMENT_END_PREP */
2792 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "isert_get_login_rx" */
2793 struct iscsi_login * var_group4;
2794 /* content: static int isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, u32 length)*/
2795 /* LDV_COMMENT_BEGIN_PREP */
2796 #define ISERT_MAX_CONN 8
2797 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
2798 #define ISER_MAX_TX_CQ_LEN \
2799 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
2800 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
2801 ISERT_MAX_CONN)
2802 /* LDV_COMMENT_END_PREP */
2803 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "isert_put_login_tx" */
2804 u32 var_isert_put_login_tx_36_p2;
2805 /* content: static int isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)*/
2806 /* LDV_COMMENT_BEGIN_PREP */
2807 #define ISERT_MAX_CONN 8
2808 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
2809 #define ISER_MAX_TX_CQ_LEN \
2810 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
2811 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
2812 ISERT_MAX_CONN)
2813 /* LDV_COMMENT_END_PREP */
2814 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "isert_immediate_queue" */
2815 struct iscsi_cmd * var_group5;
2816 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "isert_immediate_queue" */
2817 int var_isert_immediate_queue_71_p2;
2818 /* content: static int isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)*/
2819 /* LDV_COMMENT_BEGIN_PREP */
2820 #define ISERT_MAX_CONN 8
2821 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
2822 #define ISER_MAX_TX_CQ_LEN \
2823 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
2824 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
2825 ISERT_MAX_CONN)
2826 /* LDV_COMMENT_END_PREP */
2827 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "isert_response_queue" */
2828 int var_isert_response_queue_72_p2;
2829 /* content: static int isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)*/
2830 /* LDV_COMMENT_BEGIN_PREP */
2831 #define ISERT_MAX_CONN 8
2832 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
2833 #define ISER_MAX_TX_CQ_LEN \
2834 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
2835 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
2836 ISERT_MAX_CONN)
2837 /* LDV_COMMENT_END_PREP */
2838 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "isert_get_dataout" */
2839 bool var_isert_get_dataout_70_p2;
2840 /* content: static int isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)*/
2841 /* LDV_COMMENT_BEGIN_PREP */
2842 #define ISERT_MAX_CONN 8
2843 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
2844 #define ISER_MAX_TX_CQ_LEN \
2845 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
2846 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
2847 ISERT_MAX_CONN)
2848 /* LDV_COMMENT_END_PREP */
2849 /* content: static int isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)*/
2850 /* LDV_COMMENT_BEGIN_PREP */
2851 #define ISERT_MAX_CONN 8
2852 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
2853 #define ISER_MAX_TX_CQ_LEN \
2854 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
2855 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
2856 ISERT_MAX_CONN)
2857 /* LDV_COMMENT_END_PREP */
2858 /* content: static void isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)*/
2859 /* LDV_COMMENT_BEGIN_PREP */
2860 #define ISERT_MAX_CONN 8
2861 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
2862 #define ISER_MAX_TX_CQ_LEN \
2863 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
2864 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
2865 ISERT_MAX_CONN)
2866 /* LDV_COMMENT_END_PREP */
2867 /* content: static void isert_get_rx_pdu(struct iscsi_conn *conn)*/
2868 /* LDV_COMMENT_BEGIN_PREP */
2869 #define ISERT_MAX_CONN 8
2870 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
2871 #define ISER_MAX_TX_CQ_LEN \
2872 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
2873 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
2874 ISERT_MAX_CONN)
2875 /* LDV_COMMENT_END_PREP */
2876 /* content: static enum target_prot_op isert_get_sup_prot_ops(struct iscsi_conn *conn)*/
2877 /* LDV_COMMENT_BEGIN_PREP */
2878 #define ISERT_MAX_CONN 8
2879 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
2880 #define ISER_MAX_TX_CQ_LEN \
2881 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
2882 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
2883 ISERT_MAX_CONN)
2884 /* LDV_COMMENT_END_PREP */
2885
2886
2887
2888
2889 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
2890 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
2891 /*============================= VARIABLE INITIALIZING PART =============================*/
2892 LDV_IN_INTERRUPT=1;
2893
2894
2895
2896
2897 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
2898 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
2899 /*============================= FUNCTION CALL SECTION =============================*/
2900 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
2901 ldv_initialize();
2902
2903 /** INIT: init_type: ST_MODULE_INIT **/
2904 /* content: static int __init isert_init(void)*/
2905 /* LDV_COMMENT_BEGIN_PREP */
2906 #define ISERT_MAX_CONN 8
2907 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
2908 #define ISER_MAX_TX_CQ_LEN \
2909 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
2910 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
2911 ISERT_MAX_CONN)
2912 /* LDV_COMMENT_END_PREP */
2913 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver init function after driver loading to kernel. This function declared as "MODULE_INIT(function name)". */
2914 ldv_handler_precall();
2915 if(isert_init())
2916 goto ldv_final;
2917
2918
2919
2920 while( nondet_int()
2921 ) {
2922
2923 switch(nondet_int()) {
2924
2925 case 0: {
2926
2927 /** STRUCT: struct type: iscsit_transport, struct name: iser_target_transport **/
2928
2929
2930 /* content: static int isert_setup_np(struct iscsi_np *np, struct sockaddr_storage *ksockaddr)*/
2931 /* LDV_COMMENT_BEGIN_PREP */
2932 #define ISERT_MAX_CONN 8
2933 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
2934 #define ISER_MAX_TX_CQ_LEN \
2935 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
2936 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
2937 ISERT_MAX_CONN)
2938 /* LDV_COMMENT_END_PREP */
2939 /* LDV_COMMENT_FUNCTION_CALL Function from field "iscsit_setup_np" from driver structure with callbacks "iser_target_transport" */
2940 ldv_handler_precall();
2941 isert_setup_np( var_group1, var_group2);
2942
2943
2944
2945
2946 }
2947
2948 break;
2949 case 1: {
2950
2951 /** STRUCT: struct type: iscsit_transport, struct name: iser_target_transport **/
2952
2953
2954 /* content: static int isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)*/
2955 /* LDV_COMMENT_BEGIN_PREP */
2956 #define ISERT_MAX_CONN 8
2957 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
2958 #define ISER_MAX_TX_CQ_LEN \
2959 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
2960 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
2961 ISERT_MAX_CONN)
2962 /* LDV_COMMENT_END_PREP */
2963 /* LDV_COMMENT_FUNCTION_CALL Function from field "iscsit_accept_np" from driver structure with callbacks "iser_target_transport" */
2964 ldv_handler_precall();
2965 isert_accept_np( var_group1, var_group3);
2966
2967
2968
2969
2970 }
2971
2972 break;
2973 case 2: {
2974
2975 /** STRUCT: struct type: iscsit_transport, struct name: iser_target_transport **/
2976
2977
2978 /* content: static void isert_free_np(struct iscsi_np *np)*/
2979 /* LDV_COMMENT_BEGIN_PREP */
2980 #define ISERT_MAX_CONN 8
2981 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
2982 #define ISER_MAX_TX_CQ_LEN \
2983 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
2984 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
2985 ISERT_MAX_CONN)
2986 /* LDV_COMMENT_END_PREP */
2987 /* LDV_COMMENT_FUNCTION_CALL Function from field "iscsit_free_np" from driver structure with callbacks "iser_target_transport" */
2988 ldv_handler_precall();
2989 isert_free_np( var_group1);
2990
2991
2992
2993
2994 }
2995
2996 break;
2997 case 3: {
2998
2999 /** STRUCT: struct type: iscsit_transport, struct name: iser_target_transport **/
3000
3001
3002 /* content: static void isert_wait_conn(struct iscsi_conn *conn)*/
3003 /* LDV_COMMENT_BEGIN_PREP */
3004 #define ISERT_MAX_CONN 8
3005 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
3006 #define ISER_MAX_TX_CQ_LEN \
3007 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
3008 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
3009 ISERT_MAX_CONN)
3010 /* LDV_COMMENT_END_PREP */
3011 /* LDV_COMMENT_FUNCTION_CALL Function from field "iscsit_wait_conn" from driver structure with callbacks "iser_target_transport" */
3012 ldv_handler_precall();
3013 isert_wait_conn( var_group3);
3014
3015
3016
3017
3018 }
3019
3020 break;
3021 case 4: {
3022
3023 /** STRUCT: struct type: iscsit_transport, struct name: iser_target_transport **/
3024
3025
3026 /* content: static void isert_free_conn(struct iscsi_conn *conn)*/
3027 /* LDV_COMMENT_BEGIN_PREP */
3028 #define ISERT_MAX_CONN 8
3029 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
3030 #define ISER_MAX_TX_CQ_LEN \
3031 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
3032 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
3033 ISERT_MAX_CONN)
3034 /* LDV_COMMENT_END_PREP */
3035 /* LDV_COMMENT_FUNCTION_CALL Function from field "iscsit_free_conn" from driver structure with callbacks "iser_target_transport" */
3036 ldv_handler_precall();
3037 isert_free_conn( var_group3);
3038
3039
3040
3041
3042 }
3043
3044 break;
3045 case 5: {
3046
3047 /** STRUCT: struct type: iscsit_transport, struct name: iser_target_transport **/
3048
3049
3050 /* content: static int isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)*/
3051 /* LDV_COMMENT_BEGIN_PREP */
3052 #define ISERT_MAX_CONN 8
3053 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
3054 #define ISER_MAX_TX_CQ_LEN \
3055 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
3056 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
3057 ISERT_MAX_CONN)
3058 /* LDV_COMMENT_END_PREP */
3059 /* LDV_COMMENT_FUNCTION_CALL Function from field "iscsit_get_login_rx" from driver structure with callbacks "iser_target_transport" */
3060 ldv_handler_precall();
3061 isert_get_login_rx( var_group3, var_group4);
3062
3063
3064
3065
3066 }
3067
3068 break;
3069 case 6: {
3070
3071 /** STRUCT: struct type: iscsit_transport, struct name: iser_target_transport **/
3072
3073
3074 /* content: static int isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, u32 length)*/
3075 /* LDV_COMMENT_BEGIN_PREP */
3076 #define ISERT_MAX_CONN 8
3077 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
3078 #define ISER_MAX_TX_CQ_LEN \
3079 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
3080 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
3081 ISERT_MAX_CONN)
3082 /* LDV_COMMENT_END_PREP */
3083 /* LDV_COMMENT_FUNCTION_CALL Function from field "iscsit_put_login_tx" from driver structure with callbacks "iser_target_transport" */
3084 ldv_handler_precall();
3085 isert_put_login_tx( var_group3, var_group4, var_isert_put_login_tx_36_p2);
3086
3087
3088
3089
3090 }
3091
3092 break;
3093 case 7: {
3094
3095 /** STRUCT: struct type: iscsit_transport, struct name: iser_target_transport **/
3096
3097
3098 /* content: static int isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)*/
3099 /* LDV_COMMENT_BEGIN_PREP */
3100 #define ISERT_MAX_CONN 8
3101 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
3102 #define ISER_MAX_TX_CQ_LEN \
3103 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
3104 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
3105 ISERT_MAX_CONN)
3106 /* LDV_COMMENT_END_PREP */
3107 /* LDV_COMMENT_FUNCTION_CALL Function from field "iscsit_immediate_queue" from driver structure with callbacks "iser_target_transport" */
3108 ldv_handler_precall();
3109 isert_immediate_queue( var_group3, var_group5, var_isert_immediate_queue_71_p2);
3110
3111
3112
3113
3114 }
3115
3116 break;
3117 case 8: {
3118
3119 /** STRUCT: struct type: iscsit_transport, struct name: iser_target_transport **/
3120
3121
3122 /* content: static int isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)*/
3123 /* LDV_COMMENT_BEGIN_PREP */
3124 #define ISERT_MAX_CONN 8
3125 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
3126 #define ISER_MAX_TX_CQ_LEN \
3127 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
3128 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
3129 ISERT_MAX_CONN)
3130 /* LDV_COMMENT_END_PREP */
3131 /* LDV_COMMENT_FUNCTION_CALL Function from field "iscsit_response_queue" from driver structure with callbacks "iser_target_transport" */
3132 ldv_handler_precall();
3133 isert_response_queue( var_group3, var_group5, var_isert_response_queue_72_p2);
3134
3135
3136
3137
3138 }
3139
3140 break;
3141 case 9: {
3142
3143 /** STRUCT: struct type: iscsit_transport, struct name: iser_target_transport **/
3144
3145
3146 /* content: static int isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)*/
3147 /* LDV_COMMENT_BEGIN_PREP */
3148 #define ISERT_MAX_CONN 8
3149 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
3150 #define ISER_MAX_TX_CQ_LEN \
3151 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
3152 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
3153 ISERT_MAX_CONN)
3154 /* LDV_COMMENT_END_PREP */
3155 /* LDV_COMMENT_FUNCTION_CALL Function from field "iscsit_get_dataout" from driver structure with callbacks "iser_target_transport" */
3156 ldv_handler_precall();
3157 isert_get_dataout( var_group3, var_group5, var_isert_get_dataout_70_p2);
3158
3159
3160
3161
3162 }
3163
3164 break;
3165 case 10: {
3166
3167 /** STRUCT: struct type: iscsit_transport, struct name: iser_target_transport **/
3168
3169
3170 /* content: static int isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)*/
3171 /* LDV_COMMENT_BEGIN_PREP */
3172 #define ISERT_MAX_CONN 8
3173 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
3174 #define ISER_MAX_TX_CQ_LEN \
3175 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
3176 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
3177 ISERT_MAX_CONN)
3178 /* LDV_COMMENT_END_PREP */
3179 /* LDV_COMMENT_FUNCTION_CALL Function from field "iscsit_queue_data_in" from driver structure with callbacks "iser_target_transport" */
3180 ldv_handler_precall();
3181 isert_put_datain( var_group3, var_group5);
3182
3183
3184
3185
3186 }
3187
3188 break;
3189 case 11: {
3190
3191 /** STRUCT: struct type: iscsit_transport, struct name: iser_target_transport **/
3192
3193
3194 /* content: static int isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)*/
3195 /* LDV_COMMENT_BEGIN_PREP */
3196 #define ISERT_MAX_CONN 8
3197 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
3198 #define ISER_MAX_TX_CQ_LEN \
3199 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
3200 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
3201 ISERT_MAX_CONN)
3202 /* LDV_COMMENT_END_PREP */
3203 /* LDV_COMMENT_FUNCTION_CALL Function from field "iscsit_queue_status" from driver structure with callbacks "iser_target_transport" */
3204 ldv_handler_precall();
3205 isert_put_response( var_group3, var_group5);
3206
3207
3208
3209
3210 }
3211
3212 break;
3213 case 12: {
3214
3215 /** STRUCT: struct type: iscsit_transport, struct name: iser_target_transport **/
3216
3217
3218 /* content: static void isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)*/
3219 /* LDV_COMMENT_BEGIN_PREP */
3220 #define ISERT_MAX_CONN 8
3221 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
3222 #define ISER_MAX_TX_CQ_LEN \
3223 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
3224 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
3225 ISERT_MAX_CONN)
3226 /* LDV_COMMENT_END_PREP */
3227 /* LDV_COMMENT_FUNCTION_CALL Function from field "iscsit_aborted_task" from driver structure with callbacks "iser_target_transport" */
3228 ldv_handler_precall();
3229 isert_aborted_task( var_group3, var_group5);
3230
3231
3232
3233
3234 }
3235
3236 break;
3237 case 13: {
3238
3239 /** STRUCT: struct type: iscsit_transport, struct name: iser_target_transport **/
3240
3241
3242 /* content: static void isert_get_rx_pdu(struct iscsi_conn *conn)*/
3243 /* LDV_COMMENT_BEGIN_PREP */
3244 #define ISERT_MAX_CONN 8
3245 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
3246 #define ISER_MAX_TX_CQ_LEN \
3247 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
3248 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
3249 ISERT_MAX_CONN)
3250 /* LDV_COMMENT_END_PREP */
3251 /* LDV_COMMENT_FUNCTION_CALL Function from field "iscsit_get_rx_pdu" from driver structure with callbacks "iser_target_transport" */
3252 ldv_handler_precall();
3253 isert_get_rx_pdu( var_group3);
3254
3255
3256
3257
3258 }
3259
3260 break;
3261 case 14: {
3262
3263 /** STRUCT: struct type: iscsit_transport, struct name: iser_target_transport **/
3264
3265
3266 /* content: static enum target_prot_op isert_get_sup_prot_ops(struct iscsi_conn *conn)*/
3267 /* LDV_COMMENT_BEGIN_PREP */
3268 #define ISERT_MAX_CONN 8
3269 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
3270 #define ISER_MAX_TX_CQ_LEN \
3271 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
3272 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
3273 ISERT_MAX_CONN)
3274 /* LDV_COMMENT_END_PREP */
3275 /* LDV_COMMENT_FUNCTION_CALL Function from field "iscsit_get_sup_prot_ops" from driver structure with callbacks "iser_target_transport" */
3276 ldv_handler_precall();
3277 isert_get_sup_prot_ops( var_group3);
3278
3279
3280
3281
3282 }
3283
3284 break;
3285 default: break;
3286
3287 }
3288
3289 }
3290
3291 ldv_module_exit:
3292
3293 /** INIT: init_type: ST_MODULE_EXIT **/
3294 /* content: static void __exit isert_exit(void)*/
3295 /* LDV_COMMENT_BEGIN_PREP */
3296 #define ISERT_MAX_CONN 8
3297 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
3298 #define ISER_MAX_TX_CQ_LEN \
3299 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
3300 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
3301 ISERT_MAX_CONN)
3302 /* LDV_COMMENT_END_PREP */
3303 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver release function before driver will be uploaded from kernel. This function declared as "MODULE_EXIT(function name)". */
3304 ldv_handler_precall();
3305 isert_exit();
3306
3307 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
3308 ldv_final: ldv_check_final_state();
3309
3310 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
3311 return;
3312
3313 }
3314 #endif
3315
3316 /* LDV_COMMENT_END_MAIN */
3317
3318 #line 10 "/home/ldvuser/ldv/ref_launches/work/current--X--drivers--X--defaultlinux-4.8-rc1.tar.xz--X--331_1a--X--cpachecker/linux-4.8-rc1.tar.xz/csd_deg_dscv/1067/dscv_tempdir/dscv/ri/331_1a/drivers/infiniband/ulp/isert/ib_isert.o.c.prepared" 1
2 #include <verifier/rcv.h>
3 #include <kernel-model/ERR.inc>
4
5 int LDV_DMA_MAP_CALLS = 0;
6
7 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_dma_map_page') maps page */
8 void ldv_dma_map_page(void) {
9 /* LDV_COMMENT_ASSERT Check that previos dma_mapping call was checked */
10 ldv_assert(LDV_DMA_MAP_CALLS == 0);
11 /* LDV_COMMENT_CHANGE_STATE Increase dma_mapping counter */
12 LDV_DMA_MAP_CALLS++;
13 }
14
15 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_dma_mapping_error') unmaps page */
16 void ldv_dma_mapping_error(void) {
17 /* LDV_COMMENT_ASSERT No dma_mapping calls to verify */
18 ldv_assert(LDV_DMA_MAP_CALLS != 0);
19 /* LDV_COMMENT_CHANGE_STATE Check that previos dma_mapping call was checked */
20 LDV_DMA_MAP_CALLS--;
21 }
22
23 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_check_final_state') Check that all module reference counters have their initial values at the end */
24 void ldv_check_final_state(void) {
25 /* LDV_COMMENT_ASSERT All incremented module reference counters should be decremented before module unloading*/
26 ldv_assert(LDV_DMA_MAP_CALLS == 0);
27 } 1 #ifndef _LDV_RCV_H_
2 #define _LDV_RCV_H_
3
4 /* If expr evaluates to zero, ldv_assert() causes a program to reach the error
5 label like the standard assert(). */
6 #define ldv_assert(expr) ((expr) ? 0 : ldv_error())
7
8 /* The error label wrapper. It is used because of some static verifiers (like
9 BLAST) don't accept multiple error labels through a program. */
10 static inline void ldv_error(void)
11 {
12 LDV_ERROR: goto LDV_ERROR;
13 }
14
15 /* If expr evaluates to zero, ldv_assume() causes an infinite loop that is
16 avoided by verifiers. */
17 #define ldv_assume(expr) ((expr) ? 0 : ldv_stop())
18
19 /* Infinite loop, that causes verifiers to skip such paths. */
20 static inline void ldv_stop(void) {
21 LDV_STOP: goto LDV_STOP;
22 }
23
24 /* Special nondeterministic functions. */
25 int ldv_undef_int(void);
26 void *ldv_undef_ptr(void);
27 unsigned long ldv_undef_ulong(void);
28 long ldv_undef_long(void);
29 /* Return nondeterministic negative integer number. */
30 static inline int ldv_undef_int_negative(void)
31 {
32 int ret = ldv_undef_int();
33
34 ldv_assume(ret < 0);
35
36 return ret;
37 }
38 /* Return nondeterministic nonpositive integer number. */
39 static inline int ldv_undef_int_nonpositive(void)
40 {
41 int ret = ldv_undef_int();
42
43 ldv_assume(ret <= 0);
44
45 return ret;
46 }
47
48 /* Add explicit model for __builin_expect GCC function. Without the model a
49 return value will be treated as nondetermined by verifiers. */
50 static inline long __builtin_expect(long exp, long c)
51 {
52 return exp;
53 }
54
55 /* This function causes the program to exit abnormally. GCC implements this
56 function by using a target-dependent mechanism (such as intentionally executing
57 an illegal instruction) or by calling abort. The mechanism used may vary from
58 release to release so you should not rely on any particular implementation.
59 http://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html */
60 static inline void __builtin_trap(void)
61 {
62 ldv_assert(0);
63 }
64
65 /* The constant is for simulating an error of ldv_undef_ptr() function. */
66 #define LDV_PTR_MAX 2012
67
68 #endif /* _LDV_RCV_H_ */ 1 #ifndef __LINUX_BITMAP_H
2 #define __LINUX_BITMAP_H
3
4 #ifndef __ASSEMBLY__
5
6 #include <linux/types.h>
7 #include <linux/bitops.h>
8 #include <linux/string.h>
9 #include <linux/kernel.h>
10
11 /*
12 * bitmaps provide bit arrays that consume one or more unsigned
13 * longs. The bitmap interface and available operations are listed
14 * here, in bitmap.h
15 *
16 * Function implementations generic to all architectures are in
17 * lib/bitmap.c. Functions implementations that are architecture
18 * specific are in various include/asm-<arch>/bitops.h headers
19 * and other arch/<arch> specific files.
20 *
21 * See lib/bitmap.c for more details.
22 */
23
24 /*
25 * The available bitmap operations and their rough meaning in the
26 * case that the bitmap is a single unsigned long are thus:
27 *
28 * Note that nbits should be always a compile time evaluable constant.
29 * Otherwise many inlines will generate horrible code.
30 *
31 * bitmap_zero(dst, nbits) *dst = 0UL
32 * bitmap_fill(dst, nbits) *dst = ~0UL
33 * bitmap_copy(dst, src, nbits) *dst = *src
34 * bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2
35 * bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2
36 * bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2
37 * bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2)
38 * bitmap_complement(dst, src, nbits) *dst = ~(*src)
39 * bitmap_equal(src1, src2, nbits) Are *src1 and *src2 equal?
40 * bitmap_intersects(src1, src2, nbits) Do *src1 and *src2 overlap?
41 * bitmap_subset(src1, src2, nbits) Is *src1 a subset of *src2?
42 * bitmap_empty(src, nbits) Are all bits zero in *src?
43 * bitmap_full(src, nbits) Are all bits set in *src?
44 * bitmap_weight(src, nbits) Hamming Weight: number set bits
45 * bitmap_set(dst, pos, nbits) Set specified bit area
46 * bitmap_clear(dst, pos, nbits) Clear specified bit area
47 * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
48 * bitmap_find_next_zero_area_off(buf, len, pos, n, mask) as above
49 * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n
50 * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n
51 * bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src)
52 * bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit)
53 * bitmap_onto(dst, orig, relmap, nbits) *dst = orig relative to relmap
54 * bitmap_fold(dst, orig, sz, nbits) dst bits = orig bits mod sz
55 * bitmap_parse(buf, buflen, dst, nbits) Parse bitmap dst from kernel buf
56 * bitmap_parse_user(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf
57 * bitmap_parselist(buf, dst, nbits) Parse bitmap dst from kernel buf
58 * bitmap_parselist_user(buf, dst, nbits) Parse bitmap dst from user buf
59 * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region
60 * bitmap_release_region(bitmap, pos, order) Free specified bit region
61 * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region
62 * bitmap_from_u32array(dst, nbits, buf, nwords) *dst = *buf (nwords 32b words)
63 * bitmap_to_u32array(buf, nwords, src, nbits) *buf = *dst (nwords 32b words)
64 */
65
66 /*
67 * Also the following operations in asm/bitops.h apply to bitmaps.
68 *
69 * set_bit(bit, addr) *addr |= bit
70 * clear_bit(bit, addr) *addr &= ~bit
71 * change_bit(bit, addr) *addr ^= bit
72 * test_bit(bit, addr) Is bit set in *addr?
73 * test_and_set_bit(bit, addr) Set bit and return old value
74 * test_and_clear_bit(bit, addr) Clear bit and return old value
75 * test_and_change_bit(bit, addr) Change bit and return old value
76 * find_first_zero_bit(addr, nbits) Position first zero bit in *addr
77 * find_first_bit(addr, nbits) Position first set bit in *addr
78 * find_next_zero_bit(addr, nbits, bit) Position next zero bit in *addr >= bit
79 * find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit
80 */
81
82 /*
83 * The DECLARE_BITMAP(name,bits) macro, in linux/types.h, can be used
84 * to declare an array named 'name' of just enough unsigned longs to
85 * contain all bit positions from 0 to 'bits' - 1.
86 */
87
88 /*
89 * lib/bitmap.c provides these functions:
90 */
91
92 extern int __bitmap_empty(const unsigned long *bitmap, unsigned int nbits);
93 extern int __bitmap_full(const unsigned long *bitmap, unsigned int nbits);
94 extern int __bitmap_equal(const unsigned long *bitmap1,
95 const unsigned long *bitmap2, unsigned int nbits);
96 extern void __bitmap_complement(unsigned long *dst, const unsigned long *src,
97 unsigned int nbits);
98 extern void __bitmap_shift_right(unsigned long *dst, const unsigned long *src,
99 unsigned int shift, unsigned int nbits);
100 extern void __bitmap_shift_left(unsigned long *dst, const unsigned long *src,
101 unsigned int shift, unsigned int nbits);
102 extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
103 const unsigned long *bitmap2, unsigned int nbits);
104 extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
105 const unsigned long *bitmap2, unsigned int nbits);
106 extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
107 const unsigned long *bitmap2, unsigned int nbits);
108 extern int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
109 const unsigned long *bitmap2, unsigned int nbits);
110 extern int __bitmap_intersects(const unsigned long *bitmap1,
111 const unsigned long *bitmap2, unsigned int nbits);
112 extern int __bitmap_subset(const unsigned long *bitmap1,
113 const unsigned long *bitmap2, unsigned int nbits);
114 extern int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits);
115
116 extern void bitmap_set(unsigned long *map, unsigned int start, int len);
117 extern void bitmap_clear(unsigned long *map, unsigned int start, int len);
118
119 extern unsigned long bitmap_find_next_zero_area_off(unsigned long *map,
120 unsigned long size,
121 unsigned long start,
122 unsigned int nr,
123 unsigned long align_mask,
124 unsigned long align_offset);
125
126 /**
127 * bitmap_find_next_zero_area - find a contiguous aligned zero area
128 * @map: The address to base the search on
129 * @size: The bitmap size in bits
130 * @start: The bitnumber to start searching at
131 * @nr: The number of zeroed bits we're looking for
132 * @align_mask: Alignment mask for zero area
133 *
134 * The @align_mask should be one less than a power of 2; the effect is that
135 * the bit offset of all zero areas this function finds is multiples of that
136 * power of 2. A @align_mask of 0 means no alignment is required.
137 */
138 static inline unsigned long
139 bitmap_find_next_zero_area(unsigned long *map,
140 unsigned long size,
141 unsigned long start,
142 unsigned int nr,
143 unsigned long align_mask)
144 {
145 return bitmap_find_next_zero_area_off(map, size, start, nr,
146 align_mask, 0);
147 }
148
149 extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user,
150 unsigned long *dst, int nbits);
151 extern int bitmap_parse_user(const char __user *ubuf, unsigned int ulen,
152 unsigned long *dst, int nbits);
153 extern int bitmap_parselist(const char *buf, unsigned long *maskp,
154 int nmaskbits);
155 extern int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen,
156 unsigned long *dst, int nbits);
157 extern void bitmap_remap(unsigned long *dst, const unsigned long *src,
158 const unsigned long *old, const unsigned long *new, unsigned int nbits);
159 extern int bitmap_bitremap(int oldbit,
160 const unsigned long *old, const unsigned long *new, int bits);
161 extern void bitmap_onto(unsigned long *dst, const unsigned long *orig,
162 const unsigned long *relmap, unsigned int bits);
163 extern void bitmap_fold(unsigned long *dst, const unsigned long *orig,
164 unsigned int sz, unsigned int nbits);
165 extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order);
166 extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order);
167 extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order);
168 extern unsigned int bitmap_from_u32array(unsigned long *bitmap,
169 unsigned int nbits,
170 const u32 *buf,
171 unsigned int nwords);
172 extern unsigned int bitmap_to_u32array(u32 *buf,
173 unsigned int nwords,
174 const unsigned long *bitmap,
175 unsigned int nbits);
176 #ifdef __BIG_ENDIAN
177 extern void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits);
178 #else
179 #define bitmap_copy_le bitmap_copy
180 #endif
181 extern unsigned int bitmap_ord_to_pos(const unsigned long *bitmap, unsigned int ord, unsigned int nbits);
182 extern int bitmap_print_to_pagebuf(bool list, char *buf,
183 const unsigned long *maskp, int nmaskbits);
184
185 #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
186 #define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))
187
188 #define small_const_nbits(nbits) \
189 (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG)
190
191 static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
192 {
193 if (small_const_nbits(nbits))
194 *dst = 0UL;
195 else {
196 unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
197 memset(dst, 0, len);
198 }
199 }
200
201 static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
202 {
203 unsigned int nlongs = BITS_TO_LONGS(nbits);
204 if (!small_const_nbits(nbits)) {
205 unsigned int len = (nlongs - 1) * sizeof(unsigned long);
206 memset(dst, 0xff, len);
207 }
208 dst[nlongs - 1] = BITMAP_LAST_WORD_MASK(nbits);
209 }
210
211 static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
212 unsigned int nbits)
213 {
214 if (small_const_nbits(nbits))
215 *dst = *src;
216 else {
217 unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
218 memcpy(dst, src, len);
219 }
220 }
221
222 static inline int bitmap_and(unsigned long *dst, const unsigned long *src1,
223 const unsigned long *src2, unsigned int nbits)
224 {
225 if (small_const_nbits(nbits))
226 return (*dst = *src1 & *src2 & BITMAP_LAST_WORD_MASK(nbits)) != 0;
227 return __bitmap_and(dst, src1, src2, nbits);
228 }
229
230 static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
231 const unsigned long *src2, unsigned int nbits)
232 {
233 if (small_const_nbits(nbits))
234 *dst = *src1 | *src2;
235 else
236 __bitmap_or(dst, src1, src2, nbits);
237 }
238
239 static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
240 const unsigned long *src2, unsigned int nbits)
241 {
242 if (small_const_nbits(nbits))
243 *dst = *src1 ^ *src2;
244 else
245 __bitmap_xor(dst, src1, src2, nbits);
246 }
247
248 static inline int bitmap_andnot(unsigned long *dst, const unsigned long *src1,
249 const unsigned long *src2, unsigned int nbits)
250 {
251 if (small_const_nbits(nbits))
252 return (*dst = *src1 & ~(*src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
253 return __bitmap_andnot(dst, src1, src2, nbits);
254 }
255
256 static inline void bitmap_complement(unsigned long *dst, const unsigned long *src,
257 unsigned int nbits)
258 {
259 if (small_const_nbits(nbits))
260 *dst = ~(*src);
261 else
262 __bitmap_complement(dst, src, nbits);
263 }
264
265 static inline int bitmap_equal(const unsigned long *src1,
266 const unsigned long *src2, unsigned int nbits)
267 {
268 if (small_const_nbits(nbits))
269 return !((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits));
270 #ifdef CONFIG_S390
271 if (__builtin_constant_p(nbits) && (nbits % BITS_PER_LONG) == 0)
272 return !memcmp(src1, src2, nbits / 8);
273 #endif
274 return __bitmap_equal(src1, src2, nbits);
275 }
276
277 static inline int bitmap_intersects(const unsigned long *src1,
278 const unsigned long *src2, unsigned int nbits)
279 {
280 if (small_const_nbits(nbits))
281 return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
282 else
283 return __bitmap_intersects(src1, src2, nbits);
284 }
285
286 static inline int bitmap_subset(const unsigned long *src1,
287 const unsigned long *src2, unsigned int nbits)
288 {
289 if (small_const_nbits(nbits))
290 return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits));
291 else
292 return __bitmap_subset(src1, src2, nbits);
293 }
294
295 static inline int bitmap_empty(const unsigned long *src, unsigned nbits)
296 {
297 if (small_const_nbits(nbits))
298 return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
299
300 return find_first_bit(src, nbits) == nbits;
301 }
302
303 static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
304 {
305 if (small_const_nbits(nbits))
306 return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits));
307
308 return find_first_zero_bit(src, nbits) == nbits;
309 }
310
311 static __always_inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
312 {
313 if (small_const_nbits(nbits))
314 return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
315 return __bitmap_weight(src, nbits);
316 }
317
318 static inline void bitmap_shift_right(unsigned long *dst, const unsigned long *src,
319 unsigned int shift, int nbits)
320 {
321 if (small_const_nbits(nbits))
322 *dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> shift;
323 else
324 __bitmap_shift_right(dst, src, shift, nbits);
325 }
326
327 static inline void bitmap_shift_left(unsigned long *dst, const unsigned long *src,
328 unsigned int shift, unsigned int nbits)
329 {
330 if (small_const_nbits(nbits))
331 *dst = (*src << shift) & BITMAP_LAST_WORD_MASK(nbits);
332 else
333 __bitmap_shift_left(dst, src, shift, nbits);
334 }
335
336 static inline int bitmap_parse(const char *buf, unsigned int buflen,
337 unsigned long *maskp, int nmaskbits)
338 {
339 return __bitmap_parse(buf, buflen, 0, maskp, nmaskbits);
340 }
341
342 #endif /* __ASSEMBLY__ */
343
344 #endif /* __LINUX_BITMAP_H */ 1 #ifndef __LINUX_CPUMASK_H
2 #define __LINUX_CPUMASK_H
3
4 /*
5 * Cpumasks provide a bitmap suitable for representing the
6 * set of CPU's in a system, one bit position per CPU number. In general,
7 * only nr_cpu_ids (<= NR_CPUS) bits are valid.
8 */
9 #include <linux/kernel.h>
10 #include <linux/threads.h>
11 #include <linux/bitmap.h>
12 #include <linux/bug.h>
13
14 /* Don't assign or return these: may not be this big! */
15 typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
16
17 /**
18 * cpumask_bits - get the bits in a cpumask
19 * @maskp: the struct cpumask *
20 *
21 * You should only assume nr_cpu_ids bits of this mask are valid. This is
22 * a macro so it's const-correct.
23 */
24 #define cpumask_bits(maskp) ((maskp)->bits)
25
26 /**
27 * cpumask_pr_args - printf args to output a cpumask
28 * @maskp: cpumask to be printed
29 *
30 * Can be used to provide arguments for '%*pb[l]' when printing a cpumask.
31 */
32 #define cpumask_pr_args(maskp) nr_cpu_ids, cpumask_bits(maskp)
33
34 #if NR_CPUS == 1
35 #define nr_cpu_ids 1
36 #else
37 extern int nr_cpu_ids;
38 #endif
39
40 #ifdef CONFIG_CPUMASK_OFFSTACK
41 /* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also,
42 * not all bits may be allocated. */
43 #define nr_cpumask_bits nr_cpu_ids
44 #else
45 #define nr_cpumask_bits NR_CPUS
46 #endif
47
48 /*
49 * The following particular system cpumasks and operations manage
50 * possible, present, active and online cpus.
51 *
52 * cpu_possible_mask- has bit 'cpu' set iff cpu is populatable
53 * cpu_present_mask - has bit 'cpu' set iff cpu is populated
54 * cpu_online_mask - has bit 'cpu' set iff cpu available to scheduler
55 * cpu_active_mask - has bit 'cpu' set iff cpu available to migration
56 *
57 * If !CONFIG_HOTPLUG_CPU, present == possible, and active == online.
58 *
59 * The cpu_possible_mask is fixed at boot time, as the set of CPU id's
60 * that it is possible might ever be plugged in at anytime during the
61 * life of that system boot. The cpu_present_mask is dynamic(*),
62 * representing which CPUs are currently plugged in. And
63 * cpu_online_mask is the dynamic subset of cpu_present_mask,
64 * indicating those CPUs available for scheduling.
65 *
66 * If HOTPLUG is enabled, then cpu_possible_mask is forced to have
67 * all NR_CPUS bits set, otherwise it is just the set of CPUs that
68 * ACPI reports present at boot.
69 *
70 * If HOTPLUG is enabled, then cpu_present_mask varies dynamically,
71 * depending on what ACPI reports as currently plugged in, otherwise
72 * cpu_present_mask is just a copy of cpu_possible_mask.
73 *
74 * (*) Well, cpu_present_mask is dynamic in the hotplug case. If not
75 * hotplug, it's a copy of cpu_possible_mask, hence fixed at boot.
76 *
77 * Subtleties:
78 * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode
79 * assumption that their single CPU is online. The UP
80 * cpu_{online,possible,present}_masks are placebos. Changing them
81 * will have no useful affect on the following num_*_cpus()
82 * and cpu_*() macros in the UP case. This ugliness is a UP
83 * optimization - don't waste any instructions or memory references
84 * asking if you're online or how many CPUs there are if there is
85 * only one CPU.
86 */
87
88 extern struct cpumask __cpu_possible_mask;
89 extern struct cpumask __cpu_online_mask;
90 extern struct cpumask __cpu_present_mask;
91 extern struct cpumask __cpu_active_mask;
92 #define cpu_possible_mask ((const struct cpumask *)&__cpu_possible_mask)
93 #define cpu_online_mask ((const struct cpumask *)&__cpu_online_mask)
94 #define cpu_present_mask ((const struct cpumask *)&__cpu_present_mask)
95 #define cpu_active_mask ((const struct cpumask *)&__cpu_active_mask)
96
97 #if NR_CPUS > 1
98 #define num_online_cpus() cpumask_weight(cpu_online_mask)
99 #define num_possible_cpus() cpumask_weight(cpu_possible_mask)
100 #define num_present_cpus() cpumask_weight(cpu_present_mask)
101 #define num_active_cpus() cpumask_weight(cpu_active_mask)
102 #define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask)
103 #define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask)
104 #define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask)
105 #define cpu_active(cpu) cpumask_test_cpu((cpu), cpu_active_mask)
106 #else
107 #define num_online_cpus() 1U
108 #define num_possible_cpus() 1U
109 #define num_present_cpus() 1U
110 #define num_active_cpus() 1U
111 #define cpu_online(cpu) ((cpu) == 0)
112 #define cpu_possible(cpu) ((cpu) == 0)
113 #define cpu_present(cpu) ((cpu) == 0)
114 #define cpu_active(cpu) ((cpu) == 0)
115 #endif
116
117 /* verify cpu argument to cpumask_* operators */
118 static inline unsigned int cpumask_check(unsigned int cpu)
119 {
120 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
121 WARN_ON_ONCE(cpu >= nr_cpumask_bits);
122 #endif /* CONFIG_DEBUG_PER_CPU_MAPS */
123 return cpu;
124 }
125
126 #if NR_CPUS == 1
127 /* Uniprocessor. Assume all masks are "1". */
128 static inline unsigned int cpumask_first(const struct cpumask *srcp)
129 {
130 return 0;
131 }
132
133 /* Valid inputs for n are -1 and 0. */
134 static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
135 {
136 return n+1;
137 }
138
139 static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
140 {
141 return n+1;
142 }
143
144 static inline unsigned int cpumask_next_and(int n,
145 const struct cpumask *srcp,
146 const struct cpumask *andp)
147 {
148 return n+1;
149 }
150
151 /* cpu must be a valid cpu, ie 0, so there's no other choice. */
152 static inline unsigned int cpumask_any_but(const struct cpumask *mask,
153 unsigned int cpu)
154 {
155 return 1;
156 }
157
158 static inline unsigned int cpumask_local_spread(unsigned int i, int node)
159 {
160 return 0;
161 }
162
163 #define for_each_cpu(cpu, mask) \
164 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
165 #define for_each_cpu_not(cpu, mask) \
166 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
167 #define for_each_cpu_and(cpu, mask, and) \
168 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)and)
169 #else
170 /**
171 * cpumask_first - get the first cpu in a cpumask
172 * @srcp: the cpumask pointer
173 *
174 * Returns >= nr_cpu_ids if no cpus set.
175 */
176 static inline unsigned int cpumask_first(const struct cpumask *srcp)
177 {
178 return find_first_bit(cpumask_bits(srcp), nr_cpumask_bits);
179 }
180
181 /**
182 * cpumask_next - get the next cpu in a cpumask
183 * @n: the cpu prior to the place to search (ie. return will be > @n)
184 * @srcp: the cpumask pointer
185 *
186 * Returns >= nr_cpu_ids if no further cpus set.
187 */
188 static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
189 {
190 /* -1 is a legal arg here. */
191 if (n != -1)
192 cpumask_check(n);
193 return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
194 }
195
196 /**
197 * cpumask_next_zero - get the next unset cpu in a cpumask
198 * @n: the cpu prior to the place to search (ie. return will be > @n)
199 * @srcp: the cpumask pointer
200 *
201 * Returns >= nr_cpu_ids if no further cpus unset.
202 */
203 static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
204 {
205 /* -1 is a legal arg here. */
206 if (n != -1)
207 cpumask_check(n);
208 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
209 }
210
211 int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
212 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
213 unsigned int cpumask_local_spread(unsigned int i, int node);
214
215 /**
216 * for_each_cpu - iterate over every cpu in a mask
217 * @cpu: the (optionally unsigned) integer iterator
218 * @mask: the cpumask pointer
219 *
220 * After the loop, cpu is >= nr_cpu_ids.
221 */
222 #define for_each_cpu(cpu, mask) \
223 for ((cpu) = -1; \
224 (cpu) = cpumask_next((cpu), (mask)), \
225 (cpu) < nr_cpu_ids;)
226
227 /**
228 * for_each_cpu_not - iterate over every cpu in a complemented mask
229 * @cpu: the (optionally unsigned) integer iterator
230 * @mask: the cpumask pointer
231 *
232 * After the loop, cpu is >= nr_cpu_ids.
233 */
234 #define for_each_cpu_not(cpu, mask) \
235 for ((cpu) = -1; \
236 (cpu) = cpumask_next_zero((cpu), (mask)), \
237 (cpu) < nr_cpu_ids;)
238
239 /**
240 * for_each_cpu_and - iterate over every cpu in both masks
241 * @cpu: the (optionally unsigned) integer iterator
242 * @mask: the first cpumask pointer
243 * @and: the second cpumask pointer
244 *
245 * This saves a temporary CPU mask in many places. It is equivalent to:
246 * struct cpumask tmp;
247 * cpumask_and(&tmp, &mask, &and);
248 * for_each_cpu(cpu, &tmp)
249 * ...
250 *
251 * After the loop, cpu is >= nr_cpu_ids.
252 */
253 #define for_each_cpu_and(cpu, mask, and) \
254 for ((cpu) = -1; \
255 (cpu) = cpumask_next_and((cpu), (mask), (and)), \
256 (cpu) < nr_cpu_ids;)
257 #endif /* SMP */
258
259 #define CPU_BITS_NONE \
260 { \
261 [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
262 }
263
264 #define CPU_BITS_CPU0 \
265 { \
266 [0] = 1UL \
267 }
268
269 /**
270 * cpumask_set_cpu - set a cpu in a cpumask
271 * @cpu: cpu number (< nr_cpu_ids)
272 * @dstp: the cpumask pointer
273 */
274 static inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
275 {
276 set_bit(cpumask_check(cpu), cpumask_bits(dstp));
277 }
278
279 /**
280 * cpumask_clear_cpu - clear a cpu in a cpumask
281 * @cpu: cpu number (< nr_cpu_ids)
282 * @dstp: the cpumask pointer
283 */
284 static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
285 {
286 clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
287 }
288
289 /**
290 * cpumask_test_cpu - test for a cpu in a cpumask
291 * @cpu: cpu number (< nr_cpu_ids)
292 * @cpumask: the cpumask pointer
293 *
294 * Returns 1 if @cpu is set in @cpumask, else returns 0
295 */
296 static inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
297 {
298 return test_bit(cpumask_check(cpu), cpumask_bits((cpumask)));
299 }
300
301 /**
302 * cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask
303 * @cpu: cpu number (< nr_cpu_ids)
304 * @cpumask: the cpumask pointer
305 *
306 * Returns 1 if @cpu is set in old bitmap of @cpumask, else returns 0
307 *
308 * test_and_set_bit wrapper for cpumasks.
309 */
310 static inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
311 {
312 return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask));
313 }
314
315 /**
316 * cpumask_test_and_clear_cpu - atomically test and clear a cpu in a cpumask
317 * @cpu: cpu number (< nr_cpu_ids)
318 * @cpumask: the cpumask pointer
319 *
320 * Returns 1 if @cpu is set in old bitmap of @cpumask, else returns 0
321 *
322 * test_and_clear_bit wrapper for cpumasks.
323 */
324 static inline int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
325 {
326 return test_and_clear_bit(cpumask_check(cpu), cpumask_bits(cpumask));
327 }
328
329 /**
330 * cpumask_setall - set all cpus (< nr_cpu_ids) in a cpumask
331 * @dstp: the cpumask pointer
332 */
333 static inline void cpumask_setall(struct cpumask *dstp)
334 {
335 bitmap_fill(cpumask_bits(dstp), nr_cpumask_bits);
336 }
337
338 /**
339 * cpumask_clear - clear all cpus (< nr_cpu_ids) in a cpumask
340 * @dstp: the cpumask pointer
341 */
342 static inline void cpumask_clear(struct cpumask *dstp)
343 {
344 bitmap_zero(cpumask_bits(dstp), nr_cpumask_bits);
345 }
346
347 /**
348 * cpumask_and - *dstp = *src1p & *src2p
349 * @dstp: the cpumask result
350 * @src1p: the first input
351 * @src2p: the second input
352 *
353 * If *@dstp is empty, returns 0, else returns 1
354 */
355 static inline int cpumask_and(struct cpumask *dstp,
356 const struct cpumask *src1p,
357 const struct cpumask *src2p)
358 {
359 return bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p),
360 cpumask_bits(src2p), nr_cpumask_bits);
361 }
362
363 /**
364 * cpumask_or - *dstp = *src1p | *src2p
365 * @dstp: the cpumask result
366 * @src1p: the first input
367 * @src2p: the second input
368 */
369 static inline void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p,
370 const struct cpumask *src2p)
371 {
372 bitmap_or(cpumask_bits(dstp), cpumask_bits(src1p),
373 cpumask_bits(src2p), nr_cpumask_bits);
374 }
375
376 /**
377 * cpumask_xor - *dstp = *src1p ^ *src2p
378 * @dstp: the cpumask result
379 * @src1p: the first input
380 * @src2p: the second input
381 */
382 static inline void cpumask_xor(struct cpumask *dstp,
383 const struct cpumask *src1p,
384 const struct cpumask *src2p)
385 {
386 bitmap_xor(cpumask_bits(dstp), cpumask_bits(src1p),
387 cpumask_bits(src2p), nr_cpumask_bits);
388 }
389
390 /**
391 * cpumask_andnot - *dstp = *src1p & ~*src2p
392 * @dstp: the cpumask result
393 * @src1p: the first input
394 * @src2p: the second input
395 *
396 * If *@dstp is empty, returns 0, else returns 1
397 */
398 static inline int cpumask_andnot(struct cpumask *dstp,
399 const struct cpumask *src1p,
400 const struct cpumask *src2p)
401 {
402 return bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p),
403 cpumask_bits(src2p), nr_cpumask_bits);
404 }
405
406 /**
407 * cpumask_complement - *dstp = ~*srcp
408 * @dstp: the cpumask result
409 * @srcp: the input to invert
410 */
411 static inline void cpumask_complement(struct cpumask *dstp,
412 const struct cpumask *srcp)
413 {
414 bitmap_complement(cpumask_bits(dstp), cpumask_bits(srcp),
415 nr_cpumask_bits);
416 }
417
418 /**
419 * cpumask_equal - *src1p == *src2p
420 * @src1p: the first input
421 * @src2p: the second input
422 */
423 static inline bool cpumask_equal(const struct cpumask *src1p,
424 const struct cpumask *src2p)
425 {
426 return bitmap_equal(cpumask_bits(src1p), cpumask_bits(src2p),
427 nr_cpumask_bits);
428 }
429
430 /**
431 * cpumask_intersects - (*src1p & *src2p) != 0
432 * @src1p: the first input
433 * @src2p: the second input
434 */
435 static inline bool cpumask_intersects(const struct cpumask *src1p,
436 const struct cpumask *src2p)
437 {
438 return bitmap_intersects(cpumask_bits(src1p), cpumask_bits(src2p),
439 nr_cpumask_bits);
440 }
441
442 /**
443 * cpumask_subset - (*src1p & ~*src2p) == 0
444 * @src1p: the first input
445 * @src2p: the second input
446 *
447 * Returns 1 if *@src1p is a subset of *@src2p, else returns 0
448 */
449 static inline int cpumask_subset(const struct cpumask *src1p,
450 const struct cpumask *src2p)
451 {
452 return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p),
453 nr_cpumask_bits);
454 }
455
456 /**
457 * cpumask_empty - *srcp == 0
458 * @srcp: the cpumask to that all cpus < nr_cpu_ids are clear.
459 */
460 static inline bool cpumask_empty(const struct cpumask *srcp)
461 {
462 return bitmap_empty(cpumask_bits(srcp), nr_cpumask_bits);
463 }
464
465 /**
466 * cpumask_full - *srcp == 0xFFFFFFFF...
467 * @srcp: the cpumask to that all cpus < nr_cpu_ids are set.
468 */
469 static inline bool cpumask_full(const struct cpumask *srcp)
470 {
471 return bitmap_full(cpumask_bits(srcp), nr_cpumask_bits);
472 }
473
474 /**
475 * cpumask_weight - Count of bits in *srcp
476 * @srcp: the cpumask to count bits (< nr_cpu_ids) in.
477 */
478 static inline unsigned int cpumask_weight(const struct cpumask *srcp)
479 {
480 return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits);
481 }
482
483 /**
484 * cpumask_shift_right - *dstp = *srcp >> n
485 * @dstp: the cpumask result
486 * @srcp: the input to shift
487 * @n: the number of bits to shift by
488 */
489 static inline void cpumask_shift_right(struct cpumask *dstp,
490 const struct cpumask *srcp, int n)
491 {
492 bitmap_shift_right(cpumask_bits(dstp), cpumask_bits(srcp), n,
493 nr_cpumask_bits);
494 }
495
496 /**
497 * cpumask_shift_left - *dstp = *srcp << n
498 * @dstp: the cpumask result
499 * @srcp: the input to shift
500 * @n: the number of bits to shift by
501 */
502 static inline void cpumask_shift_left(struct cpumask *dstp,
503 const struct cpumask *srcp, int n)
504 {
505 bitmap_shift_left(cpumask_bits(dstp), cpumask_bits(srcp), n,
506 nr_cpumask_bits);
507 }
508
509 /**
510 * cpumask_copy - *dstp = *srcp
511 * @dstp: the result
512 * @srcp: the input cpumask
513 */
514 static inline void cpumask_copy(struct cpumask *dstp,
515 const struct cpumask *srcp)
516 {
517 bitmap_copy(cpumask_bits(dstp), cpumask_bits(srcp), nr_cpumask_bits);
518 }
519
520 /**
521 * cpumask_any - pick a "random" cpu from *srcp
522 * @srcp: the input cpumask
523 *
524 * Returns >= nr_cpu_ids if no cpus set.
525 */
526 #define cpumask_any(srcp) cpumask_first(srcp)
527
528 /**
529 * cpumask_first_and - return the first cpu from *srcp1 & *srcp2
530 * @src1p: the first input
531 * @src2p: the second input
532 *
533 * Returns >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and().
534 */
535 #define cpumask_first_and(src1p, src2p) cpumask_next_and(-1, (src1p), (src2p))
536
537 /**
538 * cpumask_any_and - pick a "random" cpu from *mask1 & *mask2
539 * @mask1: the first input cpumask
540 * @mask2: the second input cpumask
541 *
542 * Returns >= nr_cpu_ids if no cpus set.
543 */
544 #define cpumask_any_and(mask1, mask2) cpumask_first_and((mask1), (mask2))
545
546 /**
547 * cpumask_of - the cpumask containing just a given cpu
548 * @cpu: the cpu (<= nr_cpu_ids)
549 */
550 #define cpumask_of(cpu) (get_cpu_mask(cpu))
551
552 /**
553 * cpumask_parse_user - extract a cpumask from a user string
554 * @buf: the buffer to extract from
555 * @len: the length of the buffer
556 * @dstp: the cpumask to set.
557 *
558 * Returns -errno, or 0 for success.
559 */
560 static inline int cpumask_parse_user(const char __user *buf, int len,
561 struct cpumask *dstp)
562 {
563 return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpu_ids);
564 }
565
566 /**
567 * cpumask_parselist_user - extract a cpumask from a user string
568 * @buf: the buffer to extract from
569 * @len: the length of the buffer
570 * @dstp: the cpumask to set.
571 *
572 * Returns -errno, or 0 for success.
573 */
574 static inline int cpumask_parselist_user(const char __user *buf, int len,
575 struct cpumask *dstp)
576 {
577 return bitmap_parselist_user(buf, len, cpumask_bits(dstp),
578 nr_cpu_ids);
579 }
580
581 /**
582 * cpumask_parse - extract a cpumask from a string
583 * @buf: the buffer to extract from
584 * @dstp: the cpumask to set.
585 *
586 * Returns -errno, or 0 for success.
587 */
588 static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
589 {
590 char *nl = strchr(buf, '\n');
591 unsigned int len = nl ? (unsigned int)(nl - buf) : strlen(buf);
592
593 return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpu_ids);
594 }
595
596 /**
597 * cpulist_parse - extract a cpumask from a user string of ranges
598 * @buf: the buffer to extract from
599 * @dstp: the cpumask to set.
600 *
601 * Returns -errno, or 0 for success.
602 */
603 static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
604 {
605 return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpu_ids);
606 }
607
608 /**
609 * cpumask_size - size to allocate for a 'struct cpumask' in bytes
610 */
611 static inline size_t cpumask_size(void)
612 {
613 return BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long);
614 }
615
616 /*
617 * cpumask_var_t: struct cpumask for stack usage.
618 *
619 * Oh, the wicked games we play! In order to make kernel coding a
620 * little more difficult, we typedef cpumask_var_t to an array or a
621 * pointer: doing &mask on an array is a noop, so it still works.
622 *
623 * ie.
624 * cpumask_var_t tmpmask;
625 * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
626 * return -ENOMEM;
627 *
628 * ... use 'tmpmask' like a normal struct cpumask * ...
629 *
630 * free_cpumask_var(tmpmask);
631 *
632 *
633 * However, one notable exception is there. alloc_cpumask_var() allocates
634 * only nr_cpumask_bits bits (in the other hand, real cpumask_t always has
635 * NR_CPUS bits). Therefore you don't have to dereference cpumask_var_t.
636 *
637 * cpumask_var_t tmpmask;
638 * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
639 * return -ENOMEM;
640 *
641 * var = *tmpmask;
642 *
643 * This code makes NR_CPUS length memcopy and brings to a memory corruption.
644 * cpumask_copy() provide safe copy functionality.
645 *
646 * Note that there is another evil here: If you define a cpumask_var_t
647 * as a percpu variable then the way to obtain the address of the cpumask
648 * structure differently influences what this_cpu_* operation needs to be
649 * used. Please use this_cpu_cpumask_var_t in those cases. The direct use
650 * of this_cpu_ptr() or this_cpu_read() will lead to failures when the
651 * other type of cpumask_var_t implementation is configured.
652 */
653 #ifdef CONFIG_CPUMASK_OFFSTACK
654 typedef struct cpumask *cpumask_var_t;
655
656 #define this_cpu_cpumask_var_ptr(x) this_cpu_read(x)
657
658 bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
659 bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
660 bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
661 bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
662 void alloc_bootmem_cpumask_var(cpumask_var_t *mask);
663 void free_cpumask_var(cpumask_var_t mask);
664 void free_bootmem_cpumask_var(cpumask_var_t mask);
665
666 #else
667 typedef struct cpumask cpumask_var_t[1];
668
669 #define this_cpu_cpumask_var_ptr(x) this_cpu_ptr(x)
670
671 static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
672 {
673 return true;
674 }
675
676 static inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
677 int node)
678 {
679 return true;
680 }
681
682 static inline bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
683 {
684 cpumask_clear(*mask);
685 return true;
686 }
687
688 static inline bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
689 int node)
690 {
691 cpumask_clear(*mask);
692 return true;
693 }
694
695 static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask)
696 {
697 }
698
699 static inline void free_cpumask_var(cpumask_var_t mask)
700 {
701 }
702
703 static inline void free_bootmem_cpumask_var(cpumask_var_t mask)
704 {
705 }
706 #endif /* CONFIG_CPUMASK_OFFSTACK */
707
708 /* It's common to want to use cpu_all_mask in struct member initializers,
709 * so it has to refer to an address rather than a pointer. */
710 extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS);
711 #define cpu_all_mask to_cpumask(cpu_all_bits)
712
713 /* First bits of cpu_bit_bitmap are in fact unset. */
714 #define cpu_none_mask to_cpumask(cpu_bit_bitmap[0])
715
716 #define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask)
717 #define for_each_online_cpu(cpu) for_each_cpu((cpu), cpu_online_mask)
718 #define for_each_present_cpu(cpu) for_each_cpu((cpu), cpu_present_mask)
719
720 /* Wrappers for arch boot code to manipulate normally-constant masks */
721 void init_cpu_present(const struct cpumask *src);
722 void init_cpu_possible(const struct cpumask *src);
723 void init_cpu_online(const struct cpumask *src);
724
725 static inline void
726 set_cpu_possible(unsigned int cpu, bool possible)
727 {
728 if (possible)
729 cpumask_set_cpu(cpu, &__cpu_possible_mask);
730 else
731 cpumask_clear_cpu(cpu, &__cpu_possible_mask);
732 }
733
734 static inline void
735 set_cpu_present(unsigned int cpu, bool present)
736 {
737 if (present)
738 cpumask_set_cpu(cpu, &__cpu_present_mask);
739 else
740 cpumask_clear_cpu(cpu, &__cpu_present_mask);
741 }
742
743 static inline void
744 set_cpu_online(unsigned int cpu, bool online)
745 {
746 if (online)
747 cpumask_set_cpu(cpu, &__cpu_online_mask);
748 else
749 cpumask_clear_cpu(cpu, &__cpu_online_mask);
750 }
751
752 static inline void
753 set_cpu_active(unsigned int cpu, bool active)
754 {
755 if (active)
756 cpumask_set_cpu(cpu, &__cpu_active_mask);
757 else
758 cpumask_clear_cpu(cpu, &__cpu_active_mask);
759 }
760
761
762 /**
763 * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask *
764 * @bitmap: the bitmap
765 *
766 * There are a few places where cpumask_var_t isn't appropriate and
767 * static cpumasks must be used (eg. very early boot), yet we don't
768 * expose the definition of 'struct cpumask'.
769 *
770 * This does the conversion, and can be used as a constant initializer.
771 */
772 #define to_cpumask(bitmap) \
773 ((struct cpumask *)(1 ? (bitmap) \
774 : (void *)sizeof(__check_is_bitmap(bitmap))))
775
776 static inline int __check_is_bitmap(const unsigned long *bitmap)
777 {
778 return 1;
779 }
780
781 /*
782 * Special-case data structure for "single bit set only" constant CPU masks.
783 *
784 * We pre-generate all the 64 (or 32) possible bit positions, with enough
785 * padding to the left and the right, and return the constant pointer
786 * appropriately offset.
787 */
788 extern const unsigned long
789 cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)];
790
791 static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
792 {
793 const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG];
794 p -= cpu / BITS_PER_LONG;
795 return to_cpumask(p);
796 }
797
798 #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
799
800 #if NR_CPUS <= BITS_PER_LONG
801 #define CPU_BITS_ALL \
802 { \
803 [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
804 }
805
806 #else /* NR_CPUS > BITS_PER_LONG */
807
808 #define CPU_BITS_ALL \
809 { \
810 [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
811 [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
812 }
813 #endif /* NR_CPUS > BITS_PER_LONG */
814
815 /**
816 * cpumap_print_to_pagebuf - copies the cpumask into the buffer either
817 * as comma-separated list of cpus or hex values of cpumask
818 * @list: indicates whether the cpumap must be list
819 * @mask: the cpumask to copy
820 * @buf: the buffer to copy into
821 *
822 * Returns the length of the (null-terminated) @buf string, zero if
823 * nothing is copied.
824 */
825 static inline ssize_t
826 cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
827 {
828 return bitmap_print_to_pagebuf(list, buf, cpumask_bits(mask),
829 nr_cpu_ids);
830 }
831
832 #if NR_CPUS <= BITS_PER_LONG
833 #define CPU_MASK_ALL \
834 (cpumask_t) { { \
835 [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
836 } }
837 #else
838 #define CPU_MASK_ALL \
839 (cpumask_t) { { \
840 [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
841 [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
842 } }
843 #endif /* NR_CPUS > BITS_PER_LONG */
844
845 #define CPU_MASK_NONE \
846 (cpumask_t) { { \
847 [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
848 } }
849
850 #define CPU_MASK_CPU0 \
851 (cpumask_t) { { \
852 [0] = 1UL \
853 } }
854
855 #endif /* __LINUX_CPUMASK_H */ 1 #ifndef _LINUX_DMA_MAPPING_H
2 #define _LINUX_DMA_MAPPING_H
3
4 #include <linux/sizes.h>
5 #include <linux/string.h>
6 #include <linux/device.h>
7 #include <linux/err.h>
8 #include <linux/dma-debug.h>
9 #include <linux/dma-direction.h>
10 #include <linux/scatterlist.h>
11 #include <linux/kmemcheck.h>
12 #include <linux/bug.h>
13
14 /**
15 * List of possible attributes associated with a DMA mapping. The semantics
16 * of each attribute should be defined in Documentation/DMA-attributes.txt.
17 *
18 * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute
19 * forces all pending DMA writes to complete.
20 */
21 #define DMA_ATTR_WRITE_BARRIER (1UL << 0)
22 /*
23 * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
24 * may be weakly ordered, that is that reads and writes may pass each other.
25 */
26 #define DMA_ATTR_WEAK_ORDERING (1UL << 1)
27 /*
28 * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
29 * buffered to improve performance.
30 */
31 #define DMA_ATTR_WRITE_COMBINE (1UL << 2)
32 /*
33 * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either
34 * consistent or non-consistent memory as it sees fit.
35 */
36 #define DMA_ATTR_NON_CONSISTENT (1UL << 3)
37 /*
38 * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
39 * virtual mapping for the allocated buffer.
40 */
41 #define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4)
42 /*
43 * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
44 * the CPU cache for the given buffer assuming that it has been already
45 * transferred to 'device' domain.
46 */
47 #define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5)
48 /*
49 * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
50 * in physical memory.
51 */
52 #define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6)
53 /*
54 * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
55 * that it's probably not worth the time to try to allocate memory to in a way
56 * that gives better TLB efficiency.
57 */
58 #define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
59
60 /*
61 * A dma_addr_t can hold any valid DMA or bus address for the platform.
62 * It can be given to a device to use as a DMA source or target. A CPU cannot
63 * reference a dma_addr_t directly because there may be translation between
64 * its physical address space and the bus address space.
65 */
66 struct dma_map_ops {
67 void* (*alloc)(struct device *dev, size_t size,
68 dma_addr_t *dma_handle, gfp_t gfp,
69 unsigned long attrs);
70 void (*free)(struct device *dev, size_t size,
71 void *vaddr, dma_addr_t dma_handle,
72 unsigned long attrs);
73 int (*mmap)(struct device *, struct vm_area_struct *,
74 void *, dma_addr_t, size_t,
75 unsigned long attrs);
76
77 int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
78 dma_addr_t, size_t, unsigned long attrs);
79
80 dma_addr_t (*map_page)(struct device *dev, struct page *page,
81 unsigned long offset, size_t size,
82 enum dma_data_direction dir,
83 unsigned long attrs);
84 void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
85 size_t size, enum dma_data_direction dir,
86 unsigned long attrs);
87 /*
88 * map_sg returns 0 on error and a value > 0 on success.
89 * It should never return a value < 0.
90 */
91 int (*map_sg)(struct device *dev, struct scatterlist *sg,
92 int nents, enum dma_data_direction dir,
93 unsigned long attrs);
94 void (*unmap_sg)(struct device *dev,
95 struct scatterlist *sg, int nents,
96 enum dma_data_direction dir,
97 unsigned long attrs);
98 void (*sync_single_for_cpu)(struct device *dev,
99 dma_addr_t dma_handle, size_t size,
100 enum dma_data_direction dir);
101 void (*sync_single_for_device)(struct device *dev,
102 dma_addr_t dma_handle, size_t size,
103 enum dma_data_direction dir);
104 void (*sync_sg_for_cpu)(struct device *dev,
105 struct scatterlist *sg, int nents,
106 enum dma_data_direction dir);
107 void (*sync_sg_for_device)(struct device *dev,
108 struct scatterlist *sg, int nents,
109 enum dma_data_direction dir);
110 int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
111 int (*dma_supported)(struct device *dev, u64 mask);
112 int (*set_dma_mask)(struct device *dev, u64 mask);
113 #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
114 u64 (*get_required_mask)(struct device *dev);
115 #endif
116 int is_phys;
117 };
118
119 extern struct dma_map_ops dma_noop_ops;
120
121 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
122
123 #define DMA_MASK_NONE 0x0ULL
124
125 static inline int valid_dma_direction(int dma_direction)
126 {
127 return ((dma_direction == DMA_BIDIRECTIONAL) ||
128 (dma_direction == DMA_TO_DEVICE) ||
129 (dma_direction == DMA_FROM_DEVICE));
130 }
131
132 static inline int is_device_dma_capable(struct device *dev)
133 {
134 return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
135 }
136
137 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
138 /*
139 * These three functions are only for dma allocator.
140 * Don't use them in device drivers.
141 */
142 int dma_alloc_from_coherent(struct device *dev, ssize_t size,
143 dma_addr_t *dma_handle, void **ret);
144 int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
145
146 int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
147 void *cpu_addr, size_t size, int *ret);
148 #else
149 #define dma_alloc_from_coherent(dev, size, handle, ret) (0)
150 #define dma_release_from_coherent(dev, order, vaddr) (0)
151 #define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
152 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
153
154 #ifdef CONFIG_HAS_DMA
155 #include <asm/dma-mapping.h>
156 #else
157 /*
158 * Define the dma api to allow compilation but not linking of
159 * dma dependent code. Code that depends on the dma-mapping
160 * API needs to set 'depends on HAS_DMA' in its Kconfig
161 */
162 extern struct dma_map_ops bad_dma_ops;
163 static inline struct dma_map_ops *get_dma_ops(struct device *dev)
164 {
165 return &bad_dma_ops;
166 }
167 #endif
168
169 static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
170 size_t size,
171 enum dma_data_direction dir,
172 unsigned long attrs)
173 {
174 struct dma_map_ops *ops = get_dma_ops(dev);
175 dma_addr_t addr;
176
177 kmemcheck_mark_initialized(ptr, size);
178 BUG_ON(!valid_dma_direction(dir));
179 addr = ops->map_page(dev, virt_to_page(ptr),
180 offset_in_page(ptr), size,
181 dir, attrs);
182 debug_dma_map_page(dev, virt_to_page(ptr),
183 offset_in_page(ptr), size,
184 dir, addr, true);
185 return addr;
186 }
187
188 static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
189 size_t size,
190 enum dma_data_direction dir,
191 unsigned long attrs)
192 {
193 struct dma_map_ops *ops = get_dma_ops(dev);
194
195 BUG_ON(!valid_dma_direction(dir));
196 if (ops->unmap_page)
197 ops->unmap_page(dev, addr, size, dir, attrs);
198 debug_dma_unmap_page(dev, addr, size, dir, true);
199 }
200
201 /*
202 * dma_maps_sg_attrs returns 0 on error and > 0 on success.
203 * It should never return a value < 0.
204 */
205 static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
206 int nents, enum dma_data_direction dir,
207 unsigned long attrs)
208 {
209 struct dma_map_ops *ops = get_dma_ops(dev);
210 int i, ents;
211 struct scatterlist *s;
212
213 for_each_sg(sg, s, nents, i)
214 kmemcheck_mark_initialized(sg_virt(s), s->length);
215 BUG_ON(!valid_dma_direction(dir));
216 ents = ops->map_sg(dev, sg, nents, dir, attrs);
217 BUG_ON(ents < 0);
218 debug_dma_map_sg(dev, sg, nents, ents, dir);
219
220 return ents;
221 }
222
223 static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
224 int nents, enum dma_data_direction dir,
225 unsigned long attrs)
226 {
227 struct dma_map_ops *ops = get_dma_ops(dev);
228
229 BUG_ON(!valid_dma_direction(dir));
230 debug_dma_unmap_sg(dev, sg, nents, dir);
231 if (ops->unmap_sg)
232 ops->unmap_sg(dev, sg, nents, dir, attrs);
233 }
234
235 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
236 size_t offset, size_t size,
237 enum dma_data_direction dir)
238 {
239 struct dma_map_ops *ops = get_dma_ops(dev);
240 dma_addr_t addr;
241
242 kmemcheck_mark_initialized(page_address(page) + offset, size);
243 BUG_ON(!valid_dma_direction(dir));
244 addr = ops->map_page(dev, page, offset, size, dir, 0);
245 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
246
247 return addr;
248 }
249
250 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
251 size_t size, enum dma_data_direction dir)
252 {
253 struct dma_map_ops *ops = get_dma_ops(dev);
254
255 BUG_ON(!valid_dma_direction(dir));
256 if (ops->unmap_page)
257 ops->unmap_page(dev, addr, size, dir, 0);
258 debug_dma_unmap_page(dev, addr, size, dir, false);
259 }
260
261 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
262 size_t size,
263 enum dma_data_direction dir)
264 {
265 struct dma_map_ops *ops = get_dma_ops(dev);
266
267 BUG_ON(!valid_dma_direction(dir));
268 if (ops->sync_single_for_cpu)
269 ops->sync_single_for_cpu(dev, addr, size, dir);
270 debug_dma_sync_single_for_cpu(dev, addr, size, dir);
271 }
272
273 static inline void dma_sync_single_for_device(struct device *dev,
274 dma_addr_t addr, size_t size,
275 enum dma_data_direction dir)
276 {
277 struct dma_map_ops *ops = get_dma_ops(dev);
278
279 BUG_ON(!valid_dma_direction(dir));
280 if (ops->sync_single_for_device)
281 ops->sync_single_for_device(dev, addr, size, dir);
282 debug_dma_sync_single_for_device(dev, addr, size, dir);
283 }
284
285 static inline void dma_sync_single_range_for_cpu(struct device *dev,
286 dma_addr_t addr,
287 unsigned long offset,
288 size_t size,
289 enum dma_data_direction dir)
290 {
291 const struct dma_map_ops *ops = get_dma_ops(dev);
292
293 BUG_ON(!valid_dma_direction(dir));
294 if (ops->sync_single_for_cpu)
295 ops->sync_single_for_cpu(dev, addr + offset, size, dir);
296 debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
297 }
298
299 static inline void dma_sync_single_range_for_device(struct device *dev,
300 dma_addr_t addr,
301 unsigned long offset,
302 size_t size,
303 enum dma_data_direction dir)
304 {
305 const struct dma_map_ops *ops = get_dma_ops(dev);
306
307 BUG_ON(!valid_dma_direction(dir));
308 if (ops->sync_single_for_device)
309 ops->sync_single_for_device(dev, addr + offset, size, dir);
310 debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
311 }
312
313 static inline void
314 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
315 int nelems, enum dma_data_direction dir)
316 {
317 struct dma_map_ops *ops = get_dma_ops(dev);
318
319 BUG_ON(!valid_dma_direction(dir));
320 if (ops->sync_sg_for_cpu)
321 ops->sync_sg_for_cpu(dev, sg, nelems, dir);
322 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
323 }
324
325 static inline void
326 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
327 int nelems, enum dma_data_direction dir)
328 {
329 struct dma_map_ops *ops = get_dma_ops(dev);
330
331 BUG_ON(!valid_dma_direction(dir));
332 if (ops->sync_sg_for_device)
333 ops->sync_sg_for_device(dev, sg, nelems, dir);
334 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
335
336 }
337
338 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
339 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
340 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
341 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
342
343 extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
344 void *cpu_addr, dma_addr_t dma_addr, size_t size);
345
346 void *dma_common_contiguous_remap(struct page *page, size_t size,
347 unsigned long vm_flags,
348 pgprot_t prot, const void *caller);
349
350 void *dma_common_pages_remap(struct page **pages, size_t size,
351 unsigned long vm_flags, pgprot_t prot,
352 const void *caller);
353 void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
354
355 /**
356 * dma_mmap_attrs - map a coherent DMA allocation into user space
357 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
358 * @vma: vm_area_struct describing requested user mapping
359 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
360 * @handle: device-view address returned from dma_alloc_attrs
361 * @size: size of memory originally requested in dma_alloc_attrs
362 * @attrs: attributes of mapping properties requested in dma_alloc_attrs
363 *
364 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
365 * into user space. The coherent DMA buffer must not be freed by the
366 * driver until the user space mapping has been released.
367 */
368 static inline int
369 dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
370 dma_addr_t dma_addr, size_t size, unsigned long attrs)
371 {
372 struct dma_map_ops *ops = get_dma_ops(dev);
373 BUG_ON(!ops);
374 if (ops->mmap)
375 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
376 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
377 }
378
379 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
380
381 int
382 dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
383 void *cpu_addr, dma_addr_t dma_addr, size_t size);
384
385 static inline int
386 dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
387 dma_addr_t dma_addr, size_t size,
388 unsigned long attrs)
389 {
390 struct dma_map_ops *ops = get_dma_ops(dev);
391 BUG_ON(!ops);
392 if (ops->get_sgtable)
393 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
394 attrs);
395 return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
396 }
397
398 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
399
400 #ifndef arch_dma_alloc_attrs
401 #define arch_dma_alloc_attrs(dev, flag) (true)
402 #endif
403
404 static inline void *dma_alloc_attrs(struct device *dev, size_t size,
405 dma_addr_t *dma_handle, gfp_t flag,
406 unsigned long attrs)
407 {
408 struct dma_map_ops *ops = get_dma_ops(dev);
409 void *cpu_addr;
410
411 BUG_ON(!ops);
412
413 if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr))
414 return cpu_addr;
415
416 if (!arch_dma_alloc_attrs(&dev, &flag))
417 return NULL;
418 if (!ops->alloc)
419 return NULL;
420
421 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
422 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
423 return cpu_addr;
424 }
425
426 static inline void dma_free_attrs(struct device *dev, size_t size,
427 void *cpu_addr, dma_addr_t dma_handle,
428 unsigned long attrs)
429 {
430 struct dma_map_ops *ops = get_dma_ops(dev);
431
432 BUG_ON(!ops);
433 WARN_ON(irqs_disabled());
434
435 if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
436 return;
437
438 if (!ops->free || !cpu_addr)
439 return;
440
441 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
442 ops->free(dev, size, cpu_addr, dma_handle, attrs);
443 }
444
445 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
446 dma_addr_t *dma_handle, gfp_t flag)
447 {
448 return dma_alloc_attrs(dev, size, dma_handle, flag, 0);
449 }
450
451 static inline void dma_free_coherent(struct device *dev, size_t size,
452 void *cpu_addr, dma_addr_t dma_handle)
453 {
454 return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
455 }
456
457 static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
458 dma_addr_t *dma_handle, gfp_t gfp)
459 {
460 return dma_alloc_attrs(dev, size, dma_handle, gfp,
461 DMA_ATTR_NON_CONSISTENT);
462 }
463
464 static inline void dma_free_noncoherent(struct device *dev, size_t size,
465 void *cpu_addr, dma_addr_t dma_handle)
466 {
467 dma_free_attrs(dev, size, cpu_addr, dma_handle,
468 DMA_ATTR_NON_CONSISTENT);
469 }
470
471 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
472 {
473 debug_dma_mapping_error(dev, dma_addr);
474
475 if (get_dma_ops(dev)->mapping_error)
476 return get_dma_ops(dev)->mapping_error(dev, dma_addr);
477
478 #ifdef DMA_ERROR_CODE
479 return dma_addr == DMA_ERROR_CODE;
480 #else
481 return 0;
482 #endif
483 }
484
485 #ifndef HAVE_ARCH_DMA_SUPPORTED
486 static inline int dma_supported(struct device *dev, u64 mask)
487 {
488 struct dma_map_ops *ops = get_dma_ops(dev);
489
490 if (!ops)
491 return 0;
492 if (!ops->dma_supported)
493 return 1;
494 return ops->dma_supported(dev, mask);
495 }
496 #endif
497
498 #ifndef HAVE_ARCH_DMA_SET_MASK
499 static inline int dma_set_mask(struct device *dev, u64 mask)
500 {
501 struct dma_map_ops *ops = get_dma_ops(dev);
502
503 if (ops->set_dma_mask)
504 return ops->set_dma_mask(dev, mask);
505
506 if (!dev->dma_mask || !dma_supported(dev, mask))
507 return -EIO;
508 *dev->dma_mask = mask;
509 return 0;
510 }
511 #endif
512
513 static inline u64 dma_get_mask(struct device *dev)
514 {
515 if (dev && dev->dma_mask && *dev->dma_mask)
516 return *dev->dma_mask;
517 return DMA_BIT_MASK(32);
518 }
519
520 #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
521 int dma_set_coherent_mask(struct device *dev, u64 mask);
522 #else
523 static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
524 {
525 if (!dma_supported(dev, mask))
526 return -EIO;
527 dev->coherent_dma_mask = mask;
528 return 0;
529 }
530 #endif
531
532 /*
533 * Set both the DMA mask and the coherent DMA mask to the same thing.
534 * Note that we don't check the return value from dma_set_coherent_mask()
535 * as the DMA API guarantees that the coherent DMA mask can be set to
536 * the same or smaller than the streaming DMA mask.
537 */
538 static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
539 {
540 int rc = dma_set_mask(dev, mask);
541 if (rc == 0)
542 dma_set_coherent_mask(dev, mask);
543 return rc;
544 }
545
546 /*
547 * Similar to the above, except it deals with the case where the device
548 * does not have dev->dma_mask appropriately setup.
549 */
550 static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
551 {
552 dev->dma_mask = &dev->coherent_dma_mask;
553 return dma_set_mask_and_coherent(dev, mask);
554 }
555
556 extern u64 dma_get_required_mask(struct device *dev);
557
558 #ifndef arch_setup_dma_ops
559 static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
560 u64 size, const struct iommu_ops *iommu,
561 bool coherent) { }
562 #endif
563
564 #ifndef arch_teardown_dma_ops
565 static inline void arch_teardown_dma_ops(struct device *dev) { }
566 #endif
567
568 static inline unsigned int dma_get_max_seg_size(struct device *dev)
569 {
570 if (dev->dma_parms && dev->dma_parms->max_segment_size)
571 return dev->dma_parms->max_segment_size;
572 return SZ_64K;
573 }
574
575 static inline unsigned int dma_set_max_seg_size(struct device *dev,
576 unsigned int size)
577 {
578 if (dev->dma_parms) {
579 dev->dma_parms->max_segment_size = size;
580 return 0;
581 }
582 return -EIO;
583 }
584
585 static inline unsigned long dma_get_seg_boundary(struct device *dev)
586 {
587 if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
588 return dev->dma_parms->segment_boundary_mask;
589 return DMA_BIT_MASK(32);
590 }
591
592 static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
593 {
594 if (dev->dma_parms) {
595 dev->dma_parms->segment_boundary_mask = mask;
596 return 0;
597 }
598 return -EIO;
599 }
600
601 #ifndef dma_max_pfn
602 static inline unsigned long dma_max_pfn(struct device *dev)
603 {
604 return *dev->dma_mask >> PAGE_SHIFT;
605 }
606 #endif
607
608 static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
609 dma_addr_t *dma_handle, gfp_t flag)
610 {
611 void *ret = dma_alloc_coherent(dev, size, dma_handle,
612 flag | __GFP_ZERO);
613 return ret;
614 }
615
616 #ifdef CONFIG_HAS_DMA
617 static inline int dma_get_cache_alignment(void)
618 {
619 #ifdef ARCH_DMA_MINALIGN
620 return ARCH_DMA_MINALIGN;
621 #endif
622 return 1;
623 }
624 #endif
625
626 /* flags for the coherent memory api */
627 #define DMA_MEMORY_MAP 0x01
628 #define DMA_MEMORY_IO 0x02
629 #define DMA_MEMORY_INCLUDES_CHILDREN 0x04
630 #define DMA_MEMORY_EXCLUSIVE 0x08
631
632 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
633 int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
634 dma_addr_t device_addr, size_t size, int flags);
635 void dma_release_declared_memory(struct device *dev);
636 void *dma_mark_declared_memory_occupied(struct device *dev,
637 dma_addr_t device_addr, size_t size);
638 #else
639 static inline int
640 dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
641 dma_addr_t device_addr, size_t size, int flags)
642 {
643 return 0;
644 }
645
646 static inline void
647 dma_release_declared_memory(struct device *dev)
648 {
649 }
650
651 static inline void *
652 dma_mark_declared_memory_occupied(struct device *dev,
653 dma_addr_t device_addr, size_t size)
654 {
655 return ERR_PTR(-EBUSY);
656 }
657 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
658
659 /*
660 * Managed DMA API
661 */
662 extern void *dmam_alloc_coherent(struct device *dev, size_t size,
663 dma_addr_t *dma_handle, gfp_t gfp);
664 extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
665 dma_addr_t dma_handle);
666 extern void *dmam_alloc_noncoherent(struct device *dev, size_t size,
667 dma_addr_t *dma_handle, gfp_t gfp);
668 extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
669 dma_addr_t dma_handle);
670 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
671 extern int dmam_declare_coherent_memory(struct device *dev,
672 phys_addr_t phys_addr,
673 dma_addr_t device_addr, size_t size,
674 int flags);
675 extern void dmam_release_declared_memory(struct device *dev);
676 #else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
677 static inline int dmam_declare_coherent_memory(struct device *dev,
678 phys_addr_t phys_addr, dma_addr_t device_addr,
679 size_t size, gfp_t gfp)
680 {
681 return 0;
682 }
683
684 static inline void dmam_release_declared_memory(struct device *dev)
685 {
686 }
687 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
688
689 static inline void *dma_alloc_wc(struct device *dev, size_t size,
690 dma_addr_t *dma_addr, gfp_t gfp)
691 {
692 return dma_alloc_attrs(dev, size, dma_addr, gfp,
693 DMA_ATTR_WRITE_COMBINE);
694 }
695 #ifndef dma_alloc_writecombine
696 #define dma_alloc_writecombine dma_alloc_wc
697 #endif
698
699 static inline void dma_free_wc(struct device *dev, size_t size,
700 void *cpu_addr, dma_addr_t dma_addr)
701 {
702 return dma_free_attrs(dev, size, cpu_addr, dma_addr,
703 DMA_ATTR_WRITE_COMBINE);
704 }
705 #ifndef dma_free_writecombine
706 #define dma_free_writecombine dma_free_wc
707 #endif
708
709 static inline int dma_mmap_wc(struct device *dev,
710 struct vm_area_struct *vma,
711 void *cpu_addr, dma_addr_t dma_addr,
712 size_t size)
713 {
714 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
715 DMA_ATTR_WRITE_COMBINE);
716 }
717 #ifndef dma_mmap_writecombine
718 #define dma_mmap_writecombine dma_mmap_wc
719 #endif
720
721 #ifdef CONFIG_NEED_DMA_MAP_STATE
722 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
723 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
724 #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
725 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
726 #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
727 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
728 #else
729 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
730 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
731 #define dma_unmap_addr(PTR, ADDR_NAME) (0)
732 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
733 #define dma_unmap_len(PTR, LEN_NAME) (0)
734 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
735 #endif
736
737 #endif 1 #ifndef LINUX_KMEMCHECK_H
2 #define LINUX_KMEMCHECK_H
3
4 #include <linux/mm_types.h>
5 #include <linux/types.h>
6
7 #ifdef CONFIG_KMEMCHECK
8 extern int kmemcheck_enabled;
9
10 /* The slab-related functions. */
11 void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node);
12 void kmemcheck_free_shadow(struct page *page, int order);
13 void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
14 size_t size);
15 void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size);
16
17 void kmemcheck_pagealloc_alloc(struct page *p, unsigned int order,
18 gfp_t gfpflags);
19
20 void kmemcheck_show_pages(struct page *p, unsigned int n);
21 void kmemcheck_hide_pages(struct page *p, unsigned int n);
22
23 bool kmemcheck_page_is_tracked(struct page *p);
24
25 void kmemcheck_mark_unallocated(void *address, unsigned int n);
26 void kmemcheck_mark_uninitialized(void *address, unsigned int n);
27 void kmemcheck_mark_initialized(void *address, unsigned int n);
28 void kmemcheck_mark_freed(void *address, unsigned int n);
29
30 void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n);
31 void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n);
32 void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n);
33
34 int kmemcheck_show_addr(unsigned long address);
35 int kmemcheck_hide_addr(unsigned long address);
36
37 bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size);
38
39 /*
40 * Bitfield annotations
41 *
42 * How to use: If you have a struct using bitfields, for example
43 *
44 * struct a {
45 * int x:8, y:8;
46 * };
47 *
48 * then this should be rewritten as
49 *
50 * struct a {
51 * kmemcheck_bitfield_begin(flags);
52 * int x:8, y:8;
53 * kmemcheck_bitfield_end(flags);
54 * };
55 *
56 * Now the "flags_begin" and "flags_end" members may be used to refer to the
57 * beginning and end, respectively, of the bitfield (and things like
58 * &x.flags_begin is allowed). As soon as the struct is allocated, the bit-
59 * fields should be annotated:
60 *
61 * struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL);
62 * kmemcheck_annotate_bitfield(a, flags);
63 */
64 #define kmemcheck_bitfield_begin(name) \
65 int name##_begin[0];
66
67 #define kmemcheck_bitfield_end(name) \
68 int name##_end[0];
69
70 #define kmemcheck_annotate_bitfield(ptr, name) \
71 do { \
72 int _n; \
73 \
74 if (!ptr) \
75 break; \
76 \
77 _n = (long) &((ptr)->name##_end) \
78 - (long) &((ptr)->name##_begin); \
79 BUILD_BUG_ON(_n < 0); \
80 \
81 kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \
82 } while (0)
83
84 #define kmemcheck_annotate_variable(var) \
85 do { \
86 kmemcheck_mark_initialized(&(var), sizeof(var)); \
87 } while (0) \
88
89 #else
90 #define kmemcheck_enabled 0
91
92 static inline void
93 kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
94 {
95 }
96
97 static inline void
98 kmemcheck_free_shadow(struct page *page, int order)
99 {
100 }
101
102 static inline void
103 kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
104 size_t size)
105 {
106 }
107
108 static inline void kmemcheck_slab_free(struct kmem_cache *s, void *object,
109 size_t size)
110 {
111 }
112
113 static inline void kmemcheck_pagealloc_alloc(struct page *p,
114 unsigned int order, gfp_t gfpflags)
115 {
116 }
117
118 static inline bool kmemcheck_page_is_tracked(struct page *p)
119 {
120 return false;
121 }
122
123 static inline void kmemcheck_mark_unallocated(void *address, unsigned int n)
124 {
125 }
126
127 static inline void kmemcheck_mark_uninitialized(void *address, unsigned int n)
128 {
129 }
130
131 static inline void kmemcheck_mark_initialized(void *address, unsigned int n)
132 {
133 }
134
135 static inline void kmemcheck_mark_freed(void *address, unsigned int n)
136 {
137 }
138
139 static inline void kmemcheck_mark_unallocated_pages(struct page *p,
140 unsigned int n)
141 {
142 }
143
144 static inline void kmemcheck_mark_uninitialized_pages(struct page *p,
145 unsigned int n)
146 {
147 }
148
149 static inline void kmemcheck_mark_initialized_pages(struct page *p,
150 unsigned int n)
151 {
152 }
153
154 static inline bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size)
155 {
156 return true;
157 }
158
159 #define kmemcheck_bitfield_begin(name)
160 #define kmemcheck_bitfield_end(name)
161 #define kmemcheck_annotate_bitfield(ptr, name) \
162 do { \
163 } while (0)
164
165 #define kmemcheck_annotate_variable(var) \
166 do { \
167 } while (0)
168
169 #endif /* CONFIG_KMEMCHECK */
170
171 #endif /* LINUX_KMEMCHECK_H */ 1 #ifndef _LINUX_UNALIGNED_ACCESS_OK_H
2 #define _LINUX_UNALIGNED_ACCESS_OK_H
3
4 #include <linux/kernel.h>
5 #include <asm/byteorder.h>
6
7 static __always_inline u16 get_unaligned_le16(const void *p)
8 {
9 return le16_to_cpup((__le16 *)p);
10 }
11
12 static __always_inline u32 get_unaligned_le32(const void *p)
13 {
14 return le32_to_cpup((__le32 *)p);
15 }
16
17 static __always_inline u64 get_unaligned_le64(const void *p)
18 {
19 return le64_to_cpup((__le64 *)p);
20 }
21
22 static __always_inline u16 get_unaligned_be16(const void *p)
23 {
24 return be16_to_cpup((__be16 *)p);
25 }
26
27 static __always_inline u32 get_unaligned_be32(const void *p)
28 {
29 return be32_to_cpup((__be32 *)p);
30 }
31
32 static __always_inline u64 get_unaligned_be64(const void *p)
33 {
34 return be64_to_cpup((__be64 *)p);
35 }
36
37 static __always_inline void put_unaligned_le16(u16 val, void *p)
38 {
39 *((__le16 *)p) = cpu_to_le16(val);
40 }
41
42 static __always_inline void put_unaligned_le32(u32 val, void *p)
43 {
44 *((__le32 *)p) = cpu_to_le32(val);
45 }
46
47 static __always_inline void put_unaligned_le64(u64 val, void *p)
48 {
49 *((__le64 *)p) = cpu_to_le64(val);
50 }
51
52 static __always_inline void put_unaligned_be16(u16 val, void *p)
53 {
54 *((__be16 *)p) = cpu_to_be16(val);
55 }
56
57 static __always_inline void put_unaligned_be32(u32 val, void *p)
58 {
59 *((__be32 *)p) = cpu_to_be32(val);
60 }
61
62 static __always_inline void put_unaligned_be64(u64 val, void *p)
63 {
64 *((__be64 *)p) = cpu_to_be64(val);
65 }
66
67 #endif /* _LINUX_UNALIGNED_ACCESS_OK_H */ 1 /*
2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
9 *
10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
15 *
16 * Redistribution and use in source and binary forms, with or
17 * without modification, are permitted provided that the following
18 * conditions are met:
19 *
20 * - Redistributions of source code must retain the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer.
23 *
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials
27 * provided with the distribution.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * SOFTWARE.
37 */
38
39 #if !defined(IB_VERBS_H)
40 #define IB_VERBS_H
41
42 #include <linux/types.h>
43 #include <linux/device.h>
44 #include <linux/mm.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/kref.h>
47 #include <linux/list.h>
48 #include <linux/rwsem.h>
49 #include <linux/scatterlist.h>
50 #include <linux/workqueue.h>
51 #include <linux/socket.h>
52 #include <linux/irq_poll.h>
53 #include <uapi/linux/if_ether.h>
54 #include <net/ipv6.h>
55 #include <net/ip.h>
56 #include <linux/string.h>
57 #include <linux/slab.h>
58
59 #include <linux/if_link.h>
60 #include <linux/atomic.h>
61 #include <linux/mmu_notifier.h>
62 #include <asm/uaccess.h>
63
64 extern struct workqueue_struct *ib_wq;
65 extern struct workqueue_struct *ib_comp_wq;
66
67 union ib_gid {
68 u8 raw[16];
69 struct {
70 __be64 subnet_prefix;
71 __be64 interface_id;
72 } global;
73 };
74
75 extern union ib_gid zgid;
76
77 enum ib_gid_type {
78 /* If link layer is Ethernet, this is RoCE V1 */
79 IB_GID_TYPE_IB = 0,
80 IB_GID_TYPE_ROCE = 0,
81 IB_GID_TYPE_ROCE_UDP_ENCAP = 1,
82 IB_GID_TYPE_SIZE
83 };
84
85 #define ROCE_V2_UDP_DPORT 4791
86 struct ib_gid_attr {
87 enum ib_gid_type gid_type;
88 struct net_device *ndev;
89 };
90
91 enum rdma_node_type {
92 /* IB values map to NodeInfo:NodeType. */
93 RDMA_NODE_IB_CA = 1,
94 RDMA_NODE_IB_SWITCH,
95 RDMA_NODE_IB_ROUTER,
96 RDMA_NODE_RNIC,
97 RDMA_NODE_USNIC,
98 RDMA_NODE_USNIC_UDP,
99 };
100
101 enum {
102 /* set the local administered indication */
103 IB_SA_WELL_KNOWN_GUID = BIT_ULL(57) | 2,
104 };
105
106 enum rdma_transport_type {
107 RDMA_TRANSPORT_IB,
108 RDMA_TRANSPORT_IWARP,
109 RDMA_TRANSPORT_USNIC,
110 RDMA_TRANSPORT_USNIC_UDP
111 };
112
113 enum rdma_protocol_type {
114 RDMA_PROTOCOL_IB,
115 RDMA_PROTOCOL_IBOE,
116 RDMA_PROTOCOL_IWARP,
117 RDMA_PROTOCOL_USNIC_UDP
118 };
119
120 __attribute_const__ enum rdma_transport_type
121 rdma_node_get_transport(enum rdma_node_type node_type);
122
123 enum rdma_network_type {
124 RDMA_NETWORK_IB,
125 RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB,
126 RDMA_NETWORK_IPV4,
127 RDMA_NETWORK_IPV6
128 };
129
130 static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type)
131 {
132 if (network_type == RDMA_NETWORK_IPV4 ||
133 network_type == RDMA_NETWORK_IPV6)
134 return IB_GID_TYPE_ROCE_UDP_ENCAP;
135
136 /* IB_GID_TYPE_IB same as RDMA_NETWORK_ROCE_V1 */
137 return IB_GID_TYPE_IB;
138 }
139
140 static inline enum rdma_network_type ib_gid_to_network_type(enum ib_gid_type gid_type,
141 union ib_gid *gid)
142 {
143 if (gid_type == IB_GID_TYPE_IB)
144 return RDMA_NETWORK_IB;
145
146 if (ipv6_addr_v4mapped((struct in6_addr *)gid))
147 return RDMA_NETWORK_IPV4;
148 else
149 return RDMA_NETWORK_IPV6;
150 }
151
152 enum rdma_link_layer {
153 IB_LINK_LAYER_UNSPECIFIED,
154 IB_LINK_LAYER_INFINIBAND,
155 IB_LINK_LAYER_ETHERNET,
156 };
157
158 enum ib_device_cap_flags {
159 IB_DEVICE_RESIZE_MAX_WR = (1 << 0),
160 IB_DEVICE_BAD_PKEY_CNTR = (1 << 1),
161 IB_DEVICE_BAD_QKEY_CNTR = (1 << 2),
162 IB_DEVICE_RAW_MULTI = (1 << 3),
163 IB_DEVICE_AUTO_PATH_MIG = (1 << 4),
164 IB_DEVICE_CHANGE_PHY_PORT = (1 << 5),
165 IB_DEVICE_UD_AV_PORT_ENFORCE = (1 << 6),
166 IB_DEVICE_CURR_QP_STATE_MOD = (1 << 7),
167 IB_DEVICE_SHUTDOWN_PORT = (1 << 8),
168 IB_DEVICE_INIT_TYPE = (1 << 9),
169 IB_DEVICE_PORT_ACTIVE_EVENT = (1 << 10),
170 IB_DEVICE_SYS_IMAGE_GUID = (1 << 11),
171 IB_DEVICE_RC_RNR_NAK_GEN = (1 << 12),
172 IB_DEVICE_SRQ_RESIZE = (1 << 13),
173 IB_DEVICE_N_NOTIFY_CQ = (1 << 14),
174
175 /*
176 * This device supports a per-device lkey or stag that can be
177 * used without performing a memory registration for the local
178 * memory. Note that ULPs should never check this flag, but
179 * instead of use the local_dma_lkey flag in the ib_pd structure,
180 * which will always contain a usable lkey.
181 */
182 IB_DEVICE_LOCAL_DMA_LKEY = (1 << 15),
183 IB_DEVICE_RESERVED /* old SEND_W_INV */ = (1 << 16),
184 IB_DEVICE_MEM_WINDOW = (1 << 17),
185 /*
186 * Devices should set IB_DEVICE_UD_IP_SUM if they support
187 * insertion of UDP and TCP checksum on outgoing UD IPoIB
188 * messages and can verify the validity of checksum for
189 * incoming messages. Setting this flag implies that the
190 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
191 */
192 IB_DEVICE_UD_IP_CSUM = (1 << 18),
193 IB_DEVICE_UD_TSO = (1 << 19),
194 IB_DEVICE_XRC = (1 << 20),
195
196 /*
197 * This device supports the IB "base memory management extension",
198 * which includes support for fast registrations (IB_WR_REG_MR,
199 * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs). This flag should
200 * also be set by any iWarp device which must support FRs to comply
201 * to the iWarp verbs spec. iWarp devices also support the
202 * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the
203 * stag.
204 */
205 IB_DEVICE_MEM_MGT_EXTENSIONS = (1 << 21),
206 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1 << 22),
207 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1 << 23),
208 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1 << 24),
209 IB_DEVICE_RC_IP_CSUM = (1 << 25),
210 IB_DEVICE_RAW_IP_CSUM = (1 << 26),
211 /*
212 * Devices should set IB_DEVICE_CROSS_CHANNEL if they
213 * support execution of WQEs that involve synchronization
214 * of I/O operations with single completion queue managed
215 * by hardware.
216 */
217 IB_DEVICE_CROSS_CHANNEL = (1 << 27),
218 IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29),
219 IB_DEVICE_SIGNATURE_HANDOVER = (1 << 30),
220 IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31),
221 IB_DEVICE_SG_GAPS_REG = (1ULL << 32),
222 IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33),
223 IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34),
224 };
225
226 enum ib_signature_prot_cap {
227 IB_PROT_T10DIF_TYPE_1 = 1,
228 IB_PROT_T10DIF_TYPE_2 = 1 << 1,
229 IB_PROT_T10DIF_TYPE_3 = 1 << 2,
230 };
231
232 enum ib_signature_guard_cap {
233 IB_GUARD_T10DIF_CRC = 1,
234 IB_GUARD_T10DIF_CSUM = 1 << 1,
235 };
236
237 enum ib_atomic_cap {
238 IB_ATOMIC_NONE,
239 IB_ATOMIC_HCA,
240 IB_ATOMIC_GLOB
241 };
242
243 enum ib_odp_general_cap_bits {
244 IB_ODP_SUPPORT = 1 << 0,
245 };
246
247 enum ib_odp_transport_cap_bits {
248 IB_ODP_SUPPORT_SEND = 1 << 0,
249 IB_ODP_SUPPORT_RECV = 1 << 1,
250 IB_ODP_SUPPORT_WRITE = 1 << 2,
251 IB_ODP_SUPPORT_READ = 1 << 3,
252 IB_ODP_SUPPORT_ATOMIC = 1 << 4,
253 };
254
255 struct ib_odp_caps {
256 uint64_t general_caps;
257 struct {
258 uint32_t rc_odp_caps;
259 uint32_t uc_odp_caps;
260 uint32_t ud_odp_caps;
261 } per_transport_caps;
262 };
263
264 enum ib_cq_creation_flags {
265 IB_CQ_FLAGS_TIMESTAMP_COMPLETION = 1 << 0,
266 IB_CQ_FLAGS_IGNORE_OVERRUN = 1 << 1,
267 };
268
269 struct ib_cq_init_attr {
270 unsigned int cqe;
271 int comp_vector;
272 u32 flags;
273 };
274
275 struct ib_device_attr {
276 u64 fw_ver;
277 __be64 sys_image_guid;
278 u64 max_mr_size;
279 u64 page_size_cap;
280 u32 vendor_id;
281 u32 vendor_part_id;
282 u32 hw_ver;
283 int max_qp;
284 int max_qp_wr;
285 u64 device_cap_flags;
286 int max_sge;
287 int max_sge_rd;
288 int max_cq;
289 int max_cqe;
290 int max_mr;
291 int max_pd;
292 int max_qp_rd_atom;
293 int max_ee_rd_atom;
294 int max_res_rd_atom;
295 int max_qp_init_rd_atom;
296 int max_ee_init_rd_atom;
297 enum ib_atomic_cap atomic_cap;
298 enum ib_atomic_cap masked_atomic_cap;
299 int max_ee;
300 int max_rdd;
301 int max_mw;
302 int max_raw_ipv6_qp;
303 int max_raw_ethy_qp;
304 int max_mcast_grp;
305 int max_mcast_qp_attach;
306 int max_total_mcast_qp_attach;
307 int max_ah;
308 int max_fmr;
309 int max_map_per_fmr;
310 int max_srq;
311 int max_srq_wr;
312 int max_srq_sge;
313 unsigned int max_fast_reg_page_list_len;
314 u16 max_pkeys;
315 u8 local_ca_ack_delay;
316 int sig_prot_cap;
317 int sig_guard_cap;
318 struct ib_odp_caps odp_caps;
319 uint64_t timestamp_mask;
320 uint64_t hca_core_clock; /* in KHZ */
321 };
322
323 enum ib_mtu {
324 IB_MTU_256 = 1,
325 IB_MTU_512 = 2,
326 IB_MTU_1024 = 3,
327 IB_MTU_2048 = 4,
328 IB_MTU_4096 = 5
329 };
330
331 static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
332 {
333 switch (mtu) {
334 case IB_MTU_256: return 256;
335 case IB_MTU_512: return 512;
336 case IB_MTU_1024: return 1024;
337 case IB_MTU_2048: return 2048;
338 case IB_MTU_4096: return 4096;
339 default: return -1;
340 }
341 }
342
343 enum ib_port_state {
344 IB_PORT_NOP = 0,
345 IB_PORT_DOWN = 1,
346 IB_PORT_INIT = 2,
347 IB_PORT_ARMED = 3,
348 IB_PORT_ACTIVE = 4,
349 IB_PORT_ACTIVE_DEFER = 5
350 };
351
352 enum ib_port_cap_flags {
353 IB_PORT_SM = 1 << 1,
354 IB_PORT_NOTICE_SUP = 1 << 2,
355 IB_PORT_TRAP_SUP = 1 << 3,
356 IB_PORT_OPT_IPD_SUP = 1 << 4,
357 IB_PORT_AUTO_MIGR_SUP = 1 << 5,
358 IB_PORT_SL_MAP_SUP = 1 << 6,
359 IB_PORT_MKEY_NVRAM = 1 << 7,
360 IB_PORT_PKEY_NVRAM = 1 << 8,
361 IB_PORT_LED_INFO_SUP = 1 << 9,
362 IB_PORT_SM_DISABLED = 1 << 10,
363 IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11,
364 IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
365 IB_PORT_EXTENDED_SPEEDS_SUP = 1 << 14,
366 IB_PORT_CM_SUP = 1 << 16,
367 IB_PORT_SNMP_TUNNEL_SUP = 1 << 17,
368 IB_PORT_REINIT_SUP = 1 << 18,
369 IB_PORT_DEVICE_MGMT_SUP = 1 << 19,
370 IB_PORT_VENDOR_CLASS_SUP = 1 << 20,
371 IB_PORT_DR_NOTICE_SUP = 1 << 21,
372 IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
373 IB_PORT_BOOT_MGMT_SUP = 1 << 23,
374 IB_PORT_LINK_LATENCY_SUP = 1 << 24,
375 IB_PORT_CLIENT_REG_SUP = 1 << 25,
376 IB_PORT_IP_BASED_GIDS = 1 << 26,
377 };
378
379 enum ib_port_width {
380 IB_WIDTH_1X = 1,
381 IB_WIDTH_4X = 2,
382 IB_WIDTH_8X = 4,
383 IB_WIDTH_12X = 8
384 };
385
386 static inline int ib_width_enum_to_int(enum ib_port_width width)
387 {
388 switch (width) {
389 case IB_WIDTH_1X: return 1;
390 case IB_WIDTH_4X: return 4;
391 case IB_WIDTH_8X: return 8;
392 case IB_WIDTH_12X: return 12;
393 default: return -1;
394 }
395 }
396
397 enum ib_port_speed {
398 IB_SPEED_SDR = 1,
399 IB_SPEED_DDR = 2,
400 IB_SPEED_QDR = 4,
401 IB_SPEED_FDR10 = 8,
402 IB_SPEED_FDR = 16,
403 IB_SPEED_EDR = 32
404 };
405
406 /**
407 * struct rdma_hw_stats
408 * @timestamp - Used by the core code to track when the last update was
409 * @lifespan - Used by the core code to determine how old the counters
410 * should be before being updated again. Stored in jiffies, defaults
411 * to 10 milliseconds, drivers can override the default be specifying
412 * their own value during their allocation routine.
413 * @name - Array of pointers to static names used for the counters in
414 * directory.
415 * @num_counters - How many hardware counters there are. If name is
416 * shorter than this number, a kernel oops will result. Driver authors
417 * are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters)
418 * in their code to prevent this.
419 * @value - Array of u64 counters that are accessed by the sysfs code and
420 * filled in by the drivers get_stats routine
421 */
422 struct rdma_hw_stats {
423 unsigned long timestamp;
424 unsigned long lifespan;
425 const char * const *names;
426 int num_counters;
427 u64 value[];
428 };
429
430 #define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
431 /**
432 * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct
433 * for drivers.
434 * @names - Array of static const char *
435 * @num_counters - How many elements in array
436 * @lifespan - How many milliseconds between updates
437 */
438 static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
439 const char * const *names, int num_counters,
440 unsigned long lifespan)
441 {
442 struct rdma_hw_stats *stats;
443
444 stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64),
445 GFP_KERNEL);
446 if (!stats)
447 return NULL;
448 stats->names = names;
449 stats->num_counters = num_counters;
450 stats->lifespan = msecs_to_jiffies(lifespan);
451
452 return stats;
453 }
454
455
456 /* Define bits for the various functionality this port needs to be supported by
457 * the core.
458 */
459 /* Management 0x00000FFF */
460 #define RDMA_CORE_CAP_IB_MAD 0x00000001
461 #define RDMA_CORE_CAP_IB_SMI 0x00000002
462 #define RDMA_CORE_CAP_IB_CM 0x00000004
463 #define RDMA_CORE_CAP_IW_CM 0x00000008
464 #define RDMA_CORE_CAP_IB_SA 0x00000010
465 #define RDMA_CORE_CAP_OPA_MAD 0x00000020
466
467 /* Address format 0x000FF000 */
468 #define RDMA_CORE_CAP_AF_IB 0x00001000
469 #define RDMA_CORE_CAP_ETH_AH 0x00002000
470
471 /* Protocol 0xFFF00000 */
472 #define RDMA_CORE_CAP_PROT_IB 0x00100000
473 #define RDMA_CORE_CAP_PROT_ROCE 0x00200000
474 #define RDMA_CORE_CAP_PROT_IWARP 0x00400000
475 #define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
476
477 #define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \
478 | RDMA_CORE_CAP_IB_MAD \
479 | RDMA_CORE_CAP_IB_SMI \
480 | RDMA_CORE_CAP_IB_CM \
481 | RDMA_CORE_CAP_IB_SA \
482 | RDMA_CORE_CAP_AF_IB)
483 #define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \
484 | RDMA_CORE_CAP_IB_MAD \
485 | RDMA_CORE_CAP_IB_CM \
486 | RDMA_CORE_CAP_AF_IB \
487 | RDMA_CORE_CAP_ETH_AH)
488 #define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP \
489 (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
490 | RDMA_CORE_CAP_IB_MAD \
491 | RDMA_CORE_CAP_IB_CM \
492 | RDMA_CORE_CAP_AF_IB \
493 | RDMA_CORE_CAP_ETH_AH)
494 #define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \
495 | RDMA_CORE_CAP_IW_CM)
496 #define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \
497 | RDMA_CORE_CAP_OPA_MAD)
498
499 struct ib_port_attr {
500 u64 subnet_prefix;
501 enum ib_port_state state;
502 enum ib_mtu max_mtu;
503 enum ib_mtu active_mtu;
504 int gid_tbl_len;
505 u32 port_cap_flags;
506 u32 max_msg_sz;
507 u32 bad_pkey_cntr;
508 u32 qkey_viol_cntr;
509 u16 pkey_tbl_len;
510 u16 lid;
511 u16 sm_lid;
512 u8 lmc;
513 u8 max_vl_num;
514 u8 sm_sl;
515 u8 subnet_timeout;
516 u8 init_type_reply;
517 u8 active_width;
518 u8 active_speed;
519 u8 phys_state;
520 bool grh_required;
521 };
522
523 enum ib_device_modify_flags {
524 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
525 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
526 };
527
528 struct ib_device_modify {
529 u64 sys_image_guid;
530 char node_desc[64];
531 };
532
533 enum ib_port_modify_flags {
534 IB_PORT_SHUTDOWN = 1,
535 IB_PORT_INIT_TYPE = (1<<2),
536 IB_PORT_RESET_QKEY_CNTR = (1<<3)
537 };
538
539 struct ib_port_modify {
540 u32 set_port_cap_mask;
541 u32 clr_port_cap_mask;
542 u8 init_type;
543 };
544
545 enum ib_event_type {
546 IB_EVENT_CQ_ERR,
547 IB_EVENT_QP_FATAL,
548 IB_EVENT_QP_REQ_ERR,
549 IB_EVENT_QP_ACCESS_ERR,
550 IB_EVENT_COMM_EST,
551 IB_EVENT_SQ_DRAINED,
552 IB_EVENT_PATH_MIG,
553 IB_EVENT_PATH_MIG_ERR,
554 IB_EVENT_DEVICE_FATAL,
555 IB_EVENT_PORT_ACTIVE,
556 IB_EVENT_PORT_ERR,
557 IB_EVENT_LID_CHANGE,
558 IB_EVENT_PKEY_CHANGE,
559 IB_EVENT_SM_CHANGE,
560 IB_EVENT_SRQ_ERR,
561 IB_EVENT_SRQ_LIMIT_REACHED,
562 IB_EVENT_QP_LAST_WQE_REACHED,
563 IB_EVENT_CLIENT_REREGISTER,
564 IB_EVENT_GID_CHANGE,
565 IB_EVENT_WQ_FATAL,
566 };
567
568 const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
569
570 struct ib_event {
571 struct ib_device *device;
572 union {
573 struct ib_cq *cq;
574 struct ib_qp *qp;
575 struct ib_srq *srq;
576 struct ib_wq *wq;
577 u8 port_num;
578 } element;
579 enum ib_event_type event;
580 };
581
582 struct ib_event_handler {
583 struct ib_device *device;
584 void (*handler)(struct ib_event_handler *, struct ib_event *);
585 struct list_head list;
586 };
587
588 #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
589 do { \
590 (_ptr)->device = _device; \
591 (_ptr)->handler = _handler; \
592 INIT_LIST_HEAD(&(_ptr)->list); \
593 } while (0)
594
595 struct ib_global_route {
596 union ib_gid dgid;
597 u32 flow_label;
598 u8 sgid_index;
599 u8 hop_limit;
600 u8 traffic_class;
601 };
602
603 struct ib_grh {
604 __be32 version_tclass_flow;
605 __be16 paylen;
606 u8 next_hdr;
607 u8 hop_limit;
608 union ib_gid sgid;
609 union ib_gid dgid;
610 };
611
612 union rdma_network_hdr {
613 struct ib_grh ibgrh;
614 struct {
615 /* The IB spec states that if it's IPv4, the header
616 * is located in the last 20 bytes of the header.
617 */
618 u8 reserved[20];
619 struct iphdr roce4grh;
620 };
621 };
622
623 enum {
624 IB_MULTICAST_QPN = 0xffffff
625 };
626
627 #define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF)
628 #define IB_MULTICAST_LID_BASE cpu_to_be16(0xC000)
629
630 enum ib_ah_flags {
631 IB_AH_GRH = 1
632 };
633
634 enum ib_rate {
635 IB_RATE_PORT_CURRENT = 0,
636 IB_RATE_2_5_GBPS = 2,
637 IB_RATE_5_GBPS = 5,
638 IB_RATE_10_GBPS = 3,
639 IB_RATE_20_GBPS = 6,
640 IB_RATE_30_GBPS = 4,
641 IB_RATE_40_GBPS = 7,
642 IB_RATE_60_GBPS = 8,
643 IB_RATE_80_GBPS = 9,
644 IB_RATE_120_GBPS = 10,
645 IB_RATE_14_GBPS = 11,
646 IB_RATE_56_GBPS = 12,
647 IB_RATE_112_GBPS = 13,
648 IB_RATE_168_GBPS = 14,
649 IB_RATE_25_GBPS = 15,
650 IB_RATE_100_GBPS = 16,
651 IB_RATE_200_GBPS = 17,
652 IB_RATE_300_GBPS = 18
653 };
654
655 /**
656 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
657 * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be
658 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
659 * @rate: rate to convert.
660 */
661 __attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
662
663 /**
664 * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
665 * For example, IB_RATE_2_5_GBPS will be converted to 2500.
666 * @rate: rate to convert.
667 */
668 __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
669
670
671 /**
672 * enum ib_mr_type - memory region type
673 * @IB_MR_TYPE_MEM_REG: memory region that is used for
674 * normal registration
675 * @IB_MR_TYPE_SIGNATURE: memory region that is used for
676 * signature operations (data-integrity
677 * capable regions)
678 * @IB_MR_TYPE_SG_GAPS: memory region that is capable to
679 * register any arbitrary sg lists (without
680 * the normal mr constraints - see
681 * ib_map_mr_sg)
682 */
683 enum ib_mr_type {
684 IB_MR_TYPE_MEM_REG,
685 IB_MR_TYPE_SIGNATURE,
686 IB_MR_TYPE_SG_GAPS,
687 };
688
689 /**
690 * Signature types
691 * IB_SIG_TYPE_NONE: Unprotected.
692 * IB_SIG_TYPE_T10_DIF: Type T10-DIF
693 */
694 enum ib_signature_type {
695 IB_SIG_TYPE_NONE,
696 IB_SIG_TYPE_T10_DIF,
697 };
698
699 /**
700 * Signature T10-DIF block-guard types
701 * IB_T10DIF_CRC: Corresponds to T10-PI mandated CRC checksum rules.
702 * IB_T10DIF_CSUM: Corresponds to IP checksum rules.
703 */
704 enum ib_t10_dif_bg_type {
705 IB_T10DIF_CRC,
706 IB_T10DIF_CSUM
707 };
708
709 /**
710 * struct ib_t10_dif_domain - Parameters specific for T10-DIF
711 * domain.
712 * @bg_type: T10-DIF block guard type (CRC|CSUM)
713 * @pi_interval: protection information interval.
714 * @bg: seed of guard computation.
715 * @app_tag: application tag of guard block
716 * @ref_tag: initial guard block reference tag.
717 * @ref_remap: Indicate wethear the reftag increments each block
718 * @app_escape: Indicate to skip block check if apptag=0xffff
719 * @ref_escape: Indicate to skip block check if reftag=0xffffffff
720 * @apptag_check_mask: check bitmask of application tag.
721 */
722 struct ib_t10_dif_domain {
723 enum ib_t10_dif_bg_type bg_type;
724 u16 pi_interval;
725 u16 bg;
726 u16 app_tag;
727 u32 ref_tag;
728 bool ref_remap;
729 bool app_escape;
730 bool ref_escape;
731 u16 apptag_check_mask;
732 };
733
734 /**
735 * struct ib_sig_domain - Parameters for signature domain
736 * @sig_type: specific signauture type
737 * @sig: union of all signature domain attributes that may
738 * be used to set domain layout.
739 */
740 struct ib_sig_domain {
741 enum ib_signature_type sig_type;
742 union {
743 struct ib_t10_dif_domain dif;
744 } sig;
745 };
746
747 /**
748 * struct ib_sig_attrs - Parameters for signature handover operation
749 * @check_mask: bitmask for signature byte check (8 bytes)
750 * @mem: memory domain layout desciptor.
751 * @wire: wire domain layout desciptor.
752 */
753 struct ib_sig_attrs {
754 u8 check_mask;
755 struct ib_sig_domain mem;
756 struct ib_sig_domain wire;
757 };
758
759 enum ib_sig_err_type {
760 IB_SIG_BAD_GUARD,
761 IB_SIG_BAD_REFTAG,
762 IB_SIG_BAD_APPTAG,
763 };
764
765 /**
766 * struct ib_sig_err - signature error descriptor
767 */
768 struct ib_sig_err {
769 enum ib_sig_err_type err_type;
770 u32 expected;
771 u32 actual;
772 u64 sig_err_offset;
773 u32 key;
774 };
775
776 enum ib_mr_status_check {
777 IB_MR_CHECK_SIG_STATUS = 1,
778 };
779
780 /**
781 * struct ib_mr_status - Memory region status container
782 *
783 * @fail_status: Bitmask of MR checks status. For each
784 * failed check a corresponding status bit is set.
785 * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
786 * failure.
787 */
788 struct ib_mr_status {
789 u32 fail_status;
790 struct ib_sig_err sig_err;
791 };
792
793 /**
794 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
795 * enum.
796 * @mult: multiple to convert.
797 */
798 __attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
799
800 struct ib_ah_attr {
801 struct ib_global_route grh;
802 u16 dlid;
803 u8 sl;
804 u8 src_path_bits;
805 u8 static_rate;
806 u8 ah_flags;
807 u8 port_num;
808 u8 dmac[ETH_ALEN];
809 };
810
811 enum ib_wc_status {
812 IB_WC_SUCCESS,
813 IB_WC_LOC_LEN_ERR,
814 IB_WC_LOC_QP_OP_ERR,
815 IB_WC_LOC_EEC_OP_ERR,
816 IB_WC_LOC_PROT_ERR,
817 IB_WC_WR_FLUSH_ERR,
818 IB_WC_MW_BIND_ERR,
819 IB_WC_BAD_RESP_ERR,
820 IB_WC_LOC_ACCESS_ERR,
821 IB_WC_REM_INV_REQ_ERR,
822 IB_WC_REM_ACCESS_ERR,
823 IB_WC_REM_OP_ERR,
824 IB_WC_RETRY_EXC_ERR,
825 IB_WC_RNR_RETRY_EXC_ERR,
826 IB_WC_LOC_RDD_VIOL_ERR,
827 IB_WC_REM_INV_RD_REQ_ERR,
828 IB_WC_REM_ABORT_ERR,
829 IB_WC_INV_EECN_ERR,
830 IB_WC_INV_EEC_STATE_ERR,
831 IB_WC_FATAL_ERR,
832 IB_WC_RESP_TIMEOUT_ERR,
833 IB_WC_GENERAL_ERR
834 };
835
836 const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
837
838 enum ib_wc_opcode {
839 IB_WC_SEND,
840 IB_WC_RDMA_WRITE,
841 IB_WC_RDMA_READ,
842 IB_WC_COMP_SWAP,
843 IB_WC_FETCH_ADD,
844 IB_WC_LSO,
845 IB_WC_LOCAL_INV,
846 IB_WC_REG_MR,
847 IB_WC_MASKED_COMP_SWAP,
848 IB_WC_MASKED_FETCH_ADD,
849 /*
850 * Set value of IB_WC_RECV so consumers can test if a completion is a
851 * receive by testing (opcode & IB_WC_RECV).
852 */
853 IB_WC_RECV = 1 << 7,
854 IB_WC_RECV_RDMA_WITH_IMM
855 };
856
857 enum ib_wc_flags {
858 IB_WC_GRH = 1,
859 IB_WC_WITH_IMM = (1<<1),
860 IB_WC_WITH_INVALIDATE = (1<<2),
861 IB_WC_IP_CSUM_OK = (1<<3),
862 IB_WC_WITH_SMAC = (1<<4),
863 IB_WC_WITH_VLAN = (1<<5),
864 IB_WC_WITH_NETWORK_HDR_TYPE = (1<<6),
865 };
866
867 struct ib_wc {
868 union {
869 u64 wr_id;
870 struct ib_cqe *wr_cqe;
871 };
872 enum ib_wc_status status;
873 enum ib_wc_opcode opcode;
874 u32 vendor_err;
875 u32 byte_len;
876 struct ib_qp *qp;
877 union {
878 __be32 imm_data;
879 u32 invalidate_rkey;
880 } ex;
881 u32 src_qp;
882 int wc_flags;
883 u16 pkey_index;
884 u16 slid;
885 u8 sl;
886 u8 dlid_path_bits;
887 u8 port_num; /* valid only for DR SMPs on switches */
888 u8 smac[ETH_ALEN];
889 u16 vlan_id;
890 u8 network_hdr_type;
891 };
892
893 enum ib_cq_notify_flags {
894 IB_CQ_SOLICITED = 1 << 0,
895 IB_CQ_NEXT_COMP = 1 << 1,
896 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
897 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
898 };
899
900 enum ib_srq_type {
901 IB_SRQT_BASIC,
902 IB_SRQT_XRC
903 };
904
905 enum ib_srq_attr_mask {
906 IB_SRQ_MAX_WR = 1 << 0,
907 IB_SRQ_LIMIT = 1 << 1,
908 };
909
910 struct ib_srq_attr {
911 u32 max_wr;
912 u32 max_sge;
913 u32 srq_limit;
914 };
915
916 struct ib_srq_init_attr {
917 void (*event_handler)(struct ib_event *, void *);
918 void *srq_context;
919 struct ib_srq_attr attr;
920 enum ib_srq_type srq_type;
921
922 union {
923 struct {
924 struct ib_xrcd *xrcd;
925 struct ib_cq *cq;
926 } xrc;
927 } ext;
928 };
929
930 struct ib_qp_cap {
931 u32 max_send_wr;
932 u32 max_recv_wr;
933 u32 max_send_sge;
934 u32 max_recv_sge;
935 u32 max_inline_data;
936
937 /*
938 * Maximum number of rdma_rw_ctx structures in flight at a time.
939 * ib_create_qp() will calculate the right amount of neededed WRs
940 * and MRs based on this.
941 */
942 u32 max_rdma_ctxs;
943 };
944
945 enum ib_sig_type {
946 IB_SIGNAL_ALL_WR,
947 IB_SIGNAL_REQ_WR
948 };
949
950 enum ib_qp_type {
951 /*
952 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
953 * here (and in that order) since the MAD layer uses them as
954 * indices into a 2-entry table.
955 */
956 IB_QPT_SMI,
957 IB_QPT_GSI,
958
959 IB_QPT_RC,
960 IB_QPT_UC,
961 IB_QPT_UD,
962 IB_QPT_RAW_IPV6,
963 IB_QPT_RAW_ETHERTYPE,
964 IB_QPT_RAW_PACKET = 8,
965 IB_QPT_XRC_INI = 9,
966 IB_QPT_XRC_TGT,
967 IB_QPT_MAX,
968 /* Reserve a range for qp types internal to the low level driver.
969 * These qp types will not be visible at the IB core layer, so the
970 * IB_QPT_MAX usages should not be affected in the core layer
971 */
972 IB_QPT_RESERVED1 = 0x1000,
973 IB_QPT_RESERVED2,
974 IB_QPT_RESERVED3,
975 IB_QPT_RESERVED4,
976 IB_QPT_RESERVED5,
977 IB_QPT_RESERVED6,
978 IB_QPT_RESERVED7,
979 IB_QPT_RESERVED8,
980 IB_QPT_RESERVED9,
981 IB_QPT_RESERVED10,
982 };
983
984 enum ib_qp_create_flags {
985 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
986 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
987 IB_QP_CREATE_CROSS_CHANNEL = 1 << 2,
988 IB_QP_CREATE_MANAGED_SEND = 1 << 3,
989 IB_QP_CREATE_MANAGED_RECV = 1 << 4,
990 IB_QP_CREATE_NETIF_QP = 1 << 5,
991 IB_QP_CREATE_SIGNATURE_EN = 1 << 6,
992 IB_QP_CREATE_USE_GFP_NOIO = 1 << 7,
993 IB_QP_CREATE_SCATTER_FCS = 1 << 8,
994 /* reserve bits 26-31 for low level drivers' internal use */
995 IB_QP_CREATE_RESERVED_START = 1 << 26,
996 IB_QP_CREATE_RESERVED_END = 1 << 31,
997 };
998
999 /*
1000 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler
1001 * callback to destroy the passed in QP.
1002 */
1003
1004 struct ib_qp_init_attr {
1005 void (*event_handler)(struct ib_event *, void *);
1006 void *qp_context;
1007 struct ib_cq *send_cq;
1008 struct ib_cq *recv_cq;
1009 struct ib_srq *srq;
1010 struct ib_xrcd *xrcd; /* XRC TGT QPs only */
1011 struct ib_qp_cap cap;
1012 enum ib_sig_type sq_sig_type;
1013 enum ib_qp_type qp_type;
1014 enum ib_qp_create_flags create_flags;
1015
1016 /*
1017 * Only needed for special QP types, or when using the RW API.
1018 */
1019 u8 port_num;
1020 struct ib_rwq_ind_table *rwq_ind_tbl;
1021 };
1022
1023 struct ib_qp_open_attr {
1024 void (*event_handler)(struct ib_event *, void *);
1025 void *qp_context;
1026 u32 qp_num;
1027 enum ib_qp_type qp_type;
1028 };
1029
1030 enum ib_rnr_timeout {
1031 IB_RNR_TIMER_655_36 = 0,
1032 IB_RNR_TIMER_000_01 = 1,
1033 IB_RNR_TIMER_000_02 = 2,
1034 IB_RNR_TIMER_000_03 = 3,
1035 IB_RNR_TIMER_000_04 = 4,
1036 IB_RNR_TIMER_000_06 = 5,
1037 IB_RNR_TIMER_000_08 = 6,
1038 IB_RNR_TIMER_000_12 = 7,
1039 IB_RNR_TIMER_000_16 = 8,
1040 IB_RNR_TIMER_000_24 = 9,
1041 IB_RNR_TIMER_000_32 = 10,
1042 IB_RNR_TIMER_000_48 = 11,
1043 IB_RNR_TIMER_000_64 = 12,
1044 IB_RNR_TIMER_000_96 = 13,
1045 IB_RNR_TIMER_001_28 = 14,
1046 IB_RNR_TIMER_001_92 = 15,
1047 IB_RNR_TIMER_002_56 = 16,
1048 IB_RNR_TIMER_003_84 = 17,
1049 IB_RNR_TIMER_005_12 = 18,
1050 IB_RNR_TIMER_007_68 = 19,
1051 IB_RNR_TIMER_010_24 = 20,
1052 IB_RNR_TIMER_015_36 = 21,
1053 IB_RNR_TIMER_020_48 = 22,
1054 IB_RNR_TIMER_030_72 = 23,
1055 IB_RNR_TIMER_040_96 = 24,
1056 IB_RNR_TIMER_061_44 = 25,
1057 IB_RNR_TIMER_081_92 = 26,
1058 IB_RNR_TIMER_122_88 = 27,
1059 IB_RNR_TIMER_163_84 = 28,
1060 IB_RNR_TIMER_245_76 = 29,
1061 IB_RNR_TIMER_327_68 = 30,
1062 IB_RNR_TIMER_491_52 = 31
1063 };
1064
1065 enum ib_qp_attr_mask {
1066 IB_QP_STATE = 1,
1067 IB_QP_CUR_STATE = (1<<1),
1068 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2),
1069 IB_QP_ACCESS_FLAGS = (1<<3),
1070 IB_QP_PKEY_INDEX = (1<<4),
1071 IB_QP_PORT = (1<<5),
1072 IB_QP_QKEY = (1<<6),
1073 IB_QP_AV = (1<<7),
1074 IB_QP_PATH_MTU = (1<<8),
1075 IB_QP_TIMEOUT = (1<<9),
1076 IB_QP_RETRY_CNT = (1<<10),
1077 IB_QP_RNR_RETRY = (1<<11),
1078 IB_QP_RQ_PSN = (1<<12),
1079 IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
1080 IB_QP_ALT_PATH = (1<<14),
1081 IB_QP_MIN_RNR_TIMER = (1<<15),
1082 IB_QP_SQ_PSN = (1<<16),
1083 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
1084 IB_QP_PATH_MIG_STATE = (1<<18),
1085 IB_QP_CAP = (1<<19),
1086 IB_QP_DEST_QPN = (1<<20),
1087 IB_QP_RESERVED1 = (1<<21),
1088 IB_QP_RESERVED2 = (1<<22),
1089 IB_QP_RESERVED3 = (1<<23),
1090 IB_QP_RESERVED4 = (1<<24),
1091 };
1092
1093 enum ib_qp_state {
1094 IB_QPS_RESET,
1095 IB_QPS_INIT,
1096 IB_QPS_RTR,
1097 IB_QPS_RTS,
1098 IB_QPS_SQD,
1099 IB_QPS_SQE,
1100 IB_QPS_ERR
1101 };
1102
1103 enum ib_mig_state {
1104 IB_MIG_MIGRATED,
1105 IB_MIG_REARM,
1106 IB_MIG_ARMED
1107 };
1108
1109 enum ib_mw_type {
1110 IB_MW_TYPE_1 = 1,
1111 IB_MW_TYPE_2 = 2
1112 };
1113
1114 struct ib_qp_attr {
1115 enum ib_qp_state qp_state;
1116 enum ib_qp_state cur_qp_state;
1117 enum ib_mtu path_mtu;
1118 enum ib_mig_state path_mig_state;
1119 u32 qkey;
1120 u32 rq_psn;
1121 u32 sq_psn;
1122 u32 dest_qp_num;
1123 int qp_access_flags;
1124 struct ib_qp_cap cap;
1125 struct ib_ah_attr ah_attr;
1126 struct ib_ah_attr alt_ah_attr;
1127 u16 pkey_index;
1128 u16 alt_pkey_index;
1129 u8 en_sqd_async_notify;
1130 u8 sq_draining;
1131 u8 max_rd_atomic;
1132 u8 max_dest_rd_atomic;
1133 u8 min_rnr_timer;
1134 u8 port_num;
1135 u8 timeout;
1136 u8 retry_cnt;
1137 u8 rnr_retry;
1138 u8 alt_port_num;
1139 u8 alt_timeout;
1140 };
1141
1142 enum ib_wr_opcode {
1143 IB_WR_RDMA_WRITE,
1144 IB_WR_RDMA_WRITE_WITH_IMM,
1145 IB_WR_SEND,
1146 IB_WR_SEND_WITH_IMM,
1147 IB_WR_RDMA_READ,
1148 IB_WR_ATOMIC_CMP_AND_SWP,
1149 IB_WR_ATOMIC_FETCH_AND_ADD,
1150 IB_WR_LSO,
1151 IB_WR_SEND_WITH_INV,
1152 IB_WR_RDMA_READ_WITH_INV,
1153 IB_WR_LOCAL_INV,
1154 IB_WR_REG_MR,
1155 IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
1156 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1157 IB_WR_REG_SIG_MR,
1158 /* reserve values for low level drivers' internal use.
1159 * These values will not be used at all in the ib core layer.
1160 */
1161 IB_WR_RESERVED1 = 0xf0,
1162 IB_WR_RESERVED2,
1163 IB_WR_RESERVED3,
1164 IB_WR_RESERVED4,
1165 IB_WR_RESERVED5,
1166 IB_WR_RESERVED6,
1167 IB_WR_RESERVED7,
1168 IB_WR_RESERVED8,
1169 IB_WR_RESERVED9,
1170 IB_WR_RESERVED10,
1171 };
1172
1173 enum ib_send_flags {
1174 IB_SEND_FENCE = 1,
1175 IB_SEND_SIGNALED = (1<<1),
1176 IB_SEND_SOLICITED = (1<<2),
1177 IB_SEND_INLINE = (1<<3),
1178 IB_SEND_IP_CSUM = (1<<4),
1179
1180 /* reserve bits 26-31 for low level drivers' internal use */
1181 IB_SEND_RESERVED_START = (1 << 26),
1182 IB_SEND_RESERVED_END = (1 << 31),
1183 };
1184
1185 struct ib_sge {
1186 u64 addr;
1187 u32 length;
1188 u32 lkey;
1189 };
1190
1191 struct ib_cqe {
1192 void (*done)(struct ib_cq *cq, struct ib_wc *wc);
1193 };
1194
1195 struct ib_send_wr {
1196 struct ib_send_wr *next;
1197 union {
1198 u64 wr_id;
1199 struct ib_cqe *wr_cqe;
1200 };
1201 struct ib_sge *sg_list;
1202 int num_sge;
1203 enum ib_wr_opcode opcode;
1204 int send_flags;
1205 union {
1206 __be32 imm_data;
1207 u32 invalidate_rkey;
1208 } ex;
1209 };
1210
1211 struct ib_rdma_wr {
1212 struct ib_send_wr wr;
1213 u64 remote_addr;
1214 u32 rkey;
1215 };
1216
1217 static inline struct ib_rdma_wr *rdma_wr(struct ib_send_wr *wr)
1218 {
1219 return container_of(wr, struct ib_rdma_wr, wr);
1220 }
1221
1222 struct ib_atomic_wr {
1223 struct ib_send_wr wr;
1224 u64 remote_addr;
1225 u64 compare_add;
1226 u64 swap;
1227 u64 compare_add_mask;
1228 u64 swap_mask;
1229 u32 rkey;
1230 };
1231
1232 static inline struct ib_atomic_wr *atomic_wr(struct ib_send_wr *wr)
1233 {
1234 return container_of(wr, struct ib_atomic_wr, wr);
1235 }
1236
1237 struct ib_ud_wr {
1238 struct ib_send_wr wr;
1239 struct ib_ah *ah;
1240 void *header;
1241 int hlen;
1242 int mss;
1243 u32 remote_qpn;
1244 u32 remote_qkey;
1245 u16 pkey_index; /* valid for GSI only */
1246 u8 port_num; /* valid for DR SMPs on switch only */
1247 };
1248
1249 static inline struct ib_ud_wr *ud_wr(struct ib_send_wr *wr)
1250 {
1251 return container_of(wr, struct ib_ud_wr, wr);
1252 }
1253
1254 struct ib_reg_wr {
1255 struct ib_send_wr wr;
1256 struct ib_mr *mr;
1257 u32 key;
1258 int access;
1259 };
1260
1261 static inline struct ib_reg_wr *reg_wr(struct ib_send_wr *wr)
1262 {
1263 return container_of(wr, struct ib_reg_wr, wr);
1264 }
1265
1266 struct ib_sig_handover_wr {
1267 struct ib_send_wr wr;
1268 struct ib_sig_attrs *sig_attrs;
1269 struct ib_mr *sig_mr;
1270 int access_flags;
1271 struct ib_sge *prot;
1272 };
1273
1274 static inline struct ib_sig_handover_wr *sig_handover_wr(struct ib_send_wr *wr)
1275 {
1276 return container_of(wr, struct ib_sig_handover_wr, wr);
1277 }
1278
1279 struct ib_recv_wr {
1280 struct ib_recv_wr *next;
1281 union {
1282 u64 wr_id;
1283 struct ib_cqe *wr_cqe;
1284 };
1285 struct ib_sge *sg_list;
1286 int num_sge;
1287 };
1288
1289 enum ib_access_flags {
1290 IB_ACCESS_LOCAL_WRITE = 1,
1291 IB_ACCESS_REMOTE_WRITE = (1<<1),
1292 IB_ACCESS_REMOTE_READ = (1<<2),
1293 IB_ACCESS_REMOTE_ATOMIC = (1<<3),
1294 IB_ACCESS_MW_BIND = (1<<4),
1295 IB_ZERO_BASED = (1<<5),
1296 IB_ACCESS_ON_DEMAND = (1<<6),
1297 };
1298
1299 /*
1300 * XXX: these are apparently used for ->rereg_user_mr, no idea why they
1301 * are hidden here instead of a uapi header!
1302 */
1303 enum ib_mr_rereg_flags {
1304 IB_MR_REREG_TRANS = 1,
1305 IB_MR_REREG_PD = (1<<1),
1306 IB_MR_REREG_ACCESS = (1<<2),
1307 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1)
1308 };
1309
1310 struct ib_fmr_attr {
1311 int max_pages;
1312 int max_maps;
1313 u8 page_shift;
1314 };
1315
1316 struct ib_umem;
1317
1318 struct ib_ucontext {
1319 struct ib_device *device;
1320 struct list_head pd_list;
1321 struct list_head mr_list;
1322 struct list_head mw_list;
1323 struct list_head cq_list;
1324 struct list_head qp_list;
1325 struct list_head srq_list;
1326 struct list_head ah_list;
1327 struct list_head xrcd_list;
1328 struct list_head rule_list;
1329 struct list_head wq_list;
1330 struct list_head rwq_ind_tbl_list;
1331 int closing;
1332
1333 struct pid *tgid;
1334 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1335 struct rb_root umem_tree;
1336 /*
1337 * Protects .umem_rbroot and tree, as well as odp_mrs_count and
1338 * mmu notifiers registration.
1339 */
1340 struct rw_semaphore umem_rwsem;
1341 void (*invalidate_range)(struct ib_umem *umem,
1342 unsigned long start, unsigned long end);
1343
1344 struct mmu_notifier mn;
1345 atomic_t notifier_count;
1346 /* A list of umems that don't have private mmu notifier counters yet. */
1347 struct list_head no_private_counters;
1348 int odp_mrs_count;
1349 #endif
1350 };
1351
1352 struct ib_uobject {
1353 u64 user_handle; /* handle given to us by userspace */
1354 struct ib_ucontext *context; /* associated user context */
1355 void *object; /* containing object */
1356 struct list_head list; /* link to context's list */
1357 int id; /* index into kernel idr */
1358 struct kref ref;
1359 struct rw_semaphore mutex; /* protects .live */
1360 struct rcu_head rcu; /* kfree_rcu() overhead */
1361 int live;
1362 };
1363
1364 struct ib_udata {
1365 const void __user *inbuf;
1366 void __user *outbuf;
1367 size_t inlen;
1368 size_t outlen;
1369 };
1370
1371 struct ib_pd {
1372 u32 local_dma_lkey;
1373 struct ib_device *device;
1374 struct ib_uobject *uobject;
1375 atomic_t usecnt; /* count all resources */
1376 struct ib_mr *local_mr;
1377 };
1378
1379 struct ib_xrcd {
1380 struct ib_device *device;
1381 atomic_t usecnt; /* count all exposed resources */
1382 struct inode *inode;
1383
1384 struct mutex tgt_qp_mutex;
1385 struct list_head tgt_qp_list;
1386 };
1387
1388 struct ib_ah {
1389 struct ib_device *device;
1390 struct ib_pd *pd;
1391 struct ib_uobject *uobject;
1392 };
1393
1394 typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1395
1396 enum ib_poll_context {
1397 IB_POLL_DIRECT, /* caller context, no hw completions */
1398 IB_POLL_SOFTIRQ, /* poll from softirq context */
1399 IB_POLL_WORKQUEUE, /* poll from workqueue */
1400 };
1401
1402 struct ib_cq {
1403 struct ib_device *device;
1404 struct ib_uobject *uobject;
1405 ib_comp_handler comp_handler;
1406 void (*event_handler)(struct ib_event *, void *);
1407 void *cq_context;
1408 int cqe;
1409 atomic_t usecnt; /* count number of work queues */
1410 enum ib_poll_context poll_ctx;
1411 struct ib_wc *wc;
1412 union {
1413 struct irq_poll iop;
1414 struct work_struct work;
1415 };
1416 };
1417
1418 struct ib_srq {
1419 struct ib_device *device;
1420 struct ib_pd *pd;
1421 struct ib_uobject *uobject;
1422 void (*event_handler)(struct ib_event *, void *);
1423 void *srq_context;
1424 enum ib_srq_type srq_type;
1425 atomic_t usecnt;
1426
1427 union {
1428 struct {
1429 struct ib_xrcd *xrcd;
1430 struct ib_cq *cq;
1431 u32 srq_num;
1432 } xrc;
1433 } ext;
1434 };
1435
1436 enum ib_wq_type {
1437 IB_WQT_RQ
1438 };
1439
1440 enum ib_wq_state {
1441 IB_WQS_RESET,
1442 IB_WQS_RDY,
1443 IB_WQS_ERR
1444 };
1445
1446 struct ib_wq {
1447 struct ib_device *device;
1448 struct ib_uobject *uobject;
1449 void *wq_context;
1450 void (*event_handler)(struct ib_event *, void *);
1451 struct ib_pd *pd;
1452 struct ib_cq *cq;
1453 u32 wq_num;
1454 enum ib_wq_state state;
1455 enum ib_wq_type wq_type;
1456 atomic_t usecnt;
1457 };
1458
1459 struct ib_wq_init_attr {
1460 void *wq_context;
1461 enum ib_wq_type wq_type;
1462 u32 max_wr;
1463 u32 max_sge;
1464 struct ib_cq *cq;
1465 void (*event_handler)(struct ib_event *, void *);
1466 };
1467
1468 enum ib_wq_attr_mask {
1469 IB_WQ_STATE = 1 << 0,
1470 IB_WQ_CUR_STATE = 1 << 1,
1471 };
1472
1473 struct ib_wq_attr {
1474 enum ib_wq_state wq_state;
1475 enum ib_wq_state curr_wq_state;
1476 };
1477
1478 struct ib_rwq_ind_table {
1479 struct ib_device *device;
1480 struct ib_uobject *uobject;
1481 atomic_t usecnt;
1482 u32 ind_tbl_num;
1483 u32 log_ind_tbl_size;
1484 struct ib_wq **ind_tbl;
1485 };
1486
1487 struct ib_rwq_ind_table_init_attr {
1488 u32 log_ind_tbl_size;
1489 /* Each entry is a pointer to Receive Work Queue */
1490 struct ib_wq **ind_tbl;
1491 };
1492
1493 /*
1494 * @max_write_sge: Maximum SGE elements per RDMA WRITE request.
1495 * @max_read_sge: Maximum SGE elements per RDMA READ request.
1496 */
1497 struct ib_qp {
1498 struct ib_device *device;
1499 struct ib_pd *pd;
1500 struct ib_cq *send_cq;
1501 struct ib_cq *recv_cq;
1502 spinlock_t mr_lock;
1503 int mrs_used;
1504 struct list_head rdma_mrs;
1505 struct list_head sig_mrs;
1506 struct ib_srq *srq;
1507 struct ib_xrcd *xrcd; /* XRC TGT QPs only */
1508 struct list_head xrcd_list;
1509
1510 /* count times opened, mcast attaches, flow attaches */
1511 atomic_t usecnt;
1512 struct list_head open_list;
1513 struct ib_qp *real_qp;
1514 struct ib_uobject *uobject;
1515 void (*event_handler)(struct ib_event *, void *);
1516 void *qp_context;
1517 u32 qp_num;
1518 u32 max_write_sge;
1519 u32 max_read_sge;
1520 enum ib_qp_type qp_type;
1521 struct ib_rwq_ind_table *rwq_ind_tbl;
1522 };
1523
1524 struct ib_mr {
1525 struct ib_device *device;
1526 struct ib_pd *pd;
1527 u32 lkey;
1528 u32 rkey;
1529 u64 iova;
1530 u32 length;
1531 unsigned int page_size;
1532 bool need_inval;
1533 union {
1534 struct ib_uobject *uobject; /* user */
1535 struct list_head qp_entry; /* FR */
1536 };
1537 };
1538
1539 struct ib_mw {
1540 struct ib_device *device;
1541 struct ib_pd *pd;
1542 struct ib_uobject *uobject;
1543 u32 rkey;
1544 enum ib_mw_type type;
1545 };
1546
1547 struct ib_fmr {
1548 struct ib_device *device;
1549 struct ib_pd *pd;
1550 struct list_head list;
1551 u32 lkey;
1552 u32 rkey;
1553 };
1554
1555 /* Supported steering options */
1556 enum ib_flow_attr_type {
1557 /* steering according to rule specifications */
1558 IB_FLOW_ATTR_NORMAL = 0x0,
1559 /* default unicast and multicast rule -
1560 * receive all Eth traffic which isn't steered to any QP
1561 */
1562 IB_FLOW_ATTR_ALL_DEFAULT = 0x1,
1563 /* default multicast rule -
1564 * receive all Eth multicast traffic which isn't steered to any QP
1565 */
1566 IB_FLOW_ATTR_MC_DEFAULT = 0x2,
1567 /* sniffer rule - receive all port traffic */
1568 IB_FLOW_ATTR_SNIFFER = 0x3
1569 };
1570
1571 /* Supported steering header types */
1572 enum ib_flow_spec_type {
1573 /* L2 headers*/
1574 IB_FLOW_SPEC_ETH = 0x20,
1575 IB_FLOW_SPEC_IB = 0x22,
1576 /* L3 header*/
1577 IB_FLOW_SPEC_IPV4 = 0x30,
1578 IB_FLOW_SPEC_IPV6 = 0x31,
1579 /* L4 headers*/
1580 IB_FLOW_SPEC_TCP = 0x40,
1581 IB_FLOW_SPEC_UDP = 0x41
1582 };
1583 #define IB_FLOW_SPEC_LAYER_MASK 0xF0
1584 #define IB_FLOW_SPEC_SUPPORT_LAYERS 4
1585
1586 /* Flow steering rule priority is set according to it's domain.
1587 * Lower domain value means higher priority.
1588 */
1589 enum ib_flow_domain {
1590 IB_FLOW_DOMAIN_USER,
1591 IB_FLOW_DOMAIN_ETHTOOL,
1592 IB_FLOW_DOMAIN_RFS,
1593 IB_FLOW_DOMAIN_NIC,
1594 IB_FLOW_DOMAIN_NUM /* Must be last */
1595 };
1596
1597 enum ib_flow_flags {
1598 IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */
1599 IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 2 /* Must be last */
1600 };
1601
1602 struct ib_flow_eth_filter {
1603 u8 dst_mac[6];
1604 u8 src_mac[6];
1605 __be16 ether_type;
1606 __be16 vlan_tag;
1607 };
1608
1609 struct ib_flow_spec_eth {
1610 enum ib_flow_spec_type type;
1611 u16 size;
1612 struct ib_flow_eth_filter val;
1613 struct ib_flow_eth_filter mask;
1614 };
1615
1616 struct ib_flow_ib_filter {
1617 __be16 dlid;
1618 __u8 sl;
1619 };
1620
1621 struct ib_flow_spec_ib {
1622 enum ib_flow_spec_type type;
1623 u16 size;
1624 struct ib_flow_ib_filter val;
1625 struct ib_flow_ib_filter mask;
1626 };
1627
1628 struct ib_flow_ipv4_filter {
1629 __be32 src_ip;
1630 __be32 dst_ip;
1631 };
1632
1633 struct ib_flow_spec_ipv4 {
1634 enum ib_flow_spec_type type;
1635 u16 size;
1636 struct ib_flow_ipv4_filter val;
1637 struct ib_flow_ipv4_filter mask;
1638 };
1639
1640 struct ib_flow_ipv6_filter {
1641 u8 src_ip[16];
1642 u8 dst_ip[16];
1643 };
1644
1645 struct ib_flow_spec_ipv6 {
1646 enum ib_flow_spec_type type;
1647 u16 size;
1648 struct ib_flow_ipv6_filter val;
1649 struct ib_flow_ipv6_filter mask;
1650 };
1651
1652 struct ib_flow_tcp_udp_filter {
1653 __be16 dst_port;
1654 __be16 src_port;
1655 };
1656
1657 struct ib_flow_spec_tcp_udp {
1658 enum ib_flow_spec_type type;
1659 u16 size;
1660 struct ib_flow_tcp_udp_filter val;
1661 struct ib_flow_tcp_udp_filter mask;
1662 };
1663
1664 union ib_flow_spec {
1665 struct {
1666 enum ib_flow_spec_type type;
1667 u16 size;
1668 };
1669 struct ib_flow_spec_eth eth;
1670 struct ib_flow_spec_ib ib;
1671 struct ib_flow_spec_ipv4 ipv4;
1672 struct ib_flow_spec_tcp_udp tcp_udp;
1673 struct ib_flow_spec_ipv6 ipv6;
1674 };
1675
1676 struct ib_flow_attr {
1677 enum ib_flow_attr_type type;
1678 u16 size;
1679 u16 priority;
1680 u32 flags;
1681 u8 num_of_specs;
1682 u8 port;
1683 /* Following are the optional layers according to user request
1684 * struct ib_flow_spec_xxx
1685 * struct ib_flow_spec_yyy
1686 */
1687 };
1688
1689 struct ib_flow {
1690 struct ib_qp *qp;
1691 struct ib_uobject *uobject;
1692 };
1693
1694 struct ib_mad_hdr;
1695 struct ib_grh;
1696
1697 enum ib_process_mad_flags {
1698 IB_MAD_IGNORE_MKEY = 1,
1699 IB_MAD_IGNORE_BKEY = 2,
1700 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
1701 };
1702
1703 enum ib_mad_result {
1704 IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */
1705 IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */
1706 IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */
1707 IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */
1708 };
1709
1710 #define IB_DEVICE_NAME_MAX 64
1711
1712 struct ib_cache {
1713 rwlock_t lock;
1714 struct ib_event_handler event_handler;
1715 struct ib_pkey_cache **pkey_cache;
1716 struct ib_gid_table **gid_cache;
1717 u8 *lmc_cache;
1718 };
1719
1720 struct ib_dma_mapping_ops {
1721 int (*mapping_error)(struct ib_device *dev,
1722 u64 dma_addr);
1723 u64 (*map_single)(struct ib_device *dev,
1724 void *ptr, size_t size,
1725 enum dma_data_direction direction);
1726 void (*unmap_single)(struct ib_device *dev,
1727 u64 addr, size_t size,
1728 enum dma_data_direction direction);
1729 u64 (*map_page)(struct ib_device *dev,
1730 struct page *page, unsigned long offset,
1731 size_t size,
1732 enum dma_data_direction direction);
1733 void (*unmap_page)(struct ib_device *dev,
1734 u64 addr, size_t size,
1735 enum dma_data_direction direction);
1736 int (*map_sg)(struct ib_device *dev,
1737 struct scatterlist *sg, int nents,
1738 enum dma_data_direction direction);
1739 void (*unmap_sg)(struct ib_device *dev,
1740 struct scatterlist *sg, int nents,
1741 enum dma_data_direction direction);
1742 void (*sync_single_for_cpu)(struct ib_device *dev,
1743 u64 dma_handle,
1744 size_t size,
1745 enum dma_data_direction dir);
1746 void (*sync_single_for_device)(struct ib_device *dev,
1747 u64 dma_handle,
1748 size_t size,
1749 enum dma_data_direction dir);
1750 void *(*alloc_coherent)(struct ib_device *dev,
1751 size_t size,
1752 u64 *dma_handle,
1753 gfp_t flag);
1754 void (*free_coherent)(struct ib_device *dev,
1755 size_t size, void *cpu_addr,
1756 u64 dma_handle);
1757 };
1758
1759 struct iw_cm_verbs;
1760
1761 struct ib_port_immutable {
1762 int pkey_tbl_len;
1763 int gid_tbl_len;
1764 u32 core_cap_flags;
1765 u32 max_mad_size;
1766 };
1767
1768 struct ib_device {
1769 struct device *dma_device;
1770
1771 char name[IB_DEVICE_NAME_MAX];
1772
1773 struct list_head event_handler_list;
1774 spinlock_t event_handler_lock;
1775
1776 spinlock_t client_data_lock;
1777 struct list_head core_list;
1778 /* Access to the client_data_list is protected by the client_data_lock
1779 * spinlock and the lists_rwsem read-write semaphore */
1780 struct list_head client_data_list;
1781
1782 struct ib_cache cache;
1783 /**
1784 * port_immutable is indexed by port number
1785 */
1786 struct ib_port_immutable *port_immutable;
1787
1788 int num_comp_vectors;
1789
1790 struct iw_cm_verbs *iwcm;
1791
1792 /**
1793 * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the
1794 * driver initialized data. The struct is kfree()'ed by the sysfs
1795 * core when the device is removed. A lifespan of -1 in the return
1796 * struct tells the core to set a default lifespan.
1797 */
1798 struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device,
1799 u8 port_num);
1800 /**
1801 * get_hw_stats - Fill in the counter value(s) in the stats struct.
1802 * @index - The index in the value array we wish to have updated, or
1803 * num_counters if we want all stats updated
1804 * Return codes -
1805 * < 0 - Error, no counters updated
1806 * index - Updated the single counter pointed to by index
1807 * num_counters - Updated all counters (will reset the timestamp
1808 * and prevent further calls for lifespan milliseconds)
1809 * Drivers are allowed to update all counters in leiu of just the
1810 * one given in index at their option
1811 */
1812 int (*get_hw_stats)(struct ib_device *device,
1813 struct rdma_hw_stats *stats,
1814 u8 port, int index);
1815 int (*query_device)(struct ib_device *device,
1816 struct ib_device_attr *device_attr,
1817 struct ib_udata *udata);
1818 int (*query_port)(struct ib_device *device,
1819 u8 port_num,
1820 struct ib_port_attr *port_attr);
1821 enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
1822 u8 port_num);
1823 /* When calling get_netdev, the HW vendor's driver should return the
1824 * net device of device @device at port @port_num or NULL if such
1825 * a net device doesn't exist. The vendor driver should call dev_hold
1826 * on this net device. The HW vendor's device driver must guarantee
1827 * that this function returns NULL before the net device reaches
1828 * NETDEV_UNREGISTER_FINAL state.
1829 */
1830 struct net_device *(*get_netdev)(struct ib_device *device,
1831 u8 port_num);
1832 int (*query_gid)(struct ib_device *device,
1833 u8 port_num, int index,
1834 union ib_gid *gid);
1835 /* When calling add_gid, the HW vendor's driver should
1836 * add the gid of device @device at gid index @index of
1837 * port @port_num to be @gid. Meta-info of that gid (for example,
1838 * the network device related to this gid is available
1839 * at @attr. @context allows the HW vendor driver to store extra
1840 * information together with a GID entry. The HW vendor may allocate
1841 * memory to contain this information and store it in @context when a
1842 * new GID entry is written to. Params are consistent until the next
1843 * call of add_gid or delete_gid. The function should return 0 on
1844 * success or error otherwise. The function could be called
1845 * concurrently for different ports. This function is only called
1846 * when roce_gid_table is used.
1847 */
1848 int (*add_gid)(struct ib_device *device,
1849 u8 port_num,
1850 unsigned int index,
1851 const union ib_gid *gid,
1852 const struct ib_gid_attr *attr,
1853 void **context);
1854 /* When calling del_gid, the HW vendor's driver should delete the
1855 * gid of device @device at gid index @index of port @port_num.
1856 * Upon the deletion of a GID entry, the HW vendor must free any
1857 * allocated memory. The caller will clear @context afterwards.
1858 * This function is only called when roce_gid_table is used.
1859 */
1860 int (*del_gid)(struct ib_device *device,
1861 u8 port_num,
1862 unsigned int index,
1863 void **context);
1864 int (*query_pkey)(struct ib_device *device,
1865 u8 port_num, u16 index, u16 *pkey);
1866 int (*modify_device)(struct ib_device *device,
1867 int device_modify_mask,
1868 struct ib_device_modify *device_modify);
1869 int (*modify_port)(struct ib_device *device,
1870 u8 port_num, int port_modify_mask,
1871 struct ib_port_modify *port_modify);
1872 struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device,
1873 struct ib_udata *udata);
1874 int (*dealloc_ucontext)(struct ib_ucontext *context);
1875 int (*mmap)(struct ib_ucontext *context,
1876 struct vm_area_struct *vma);
1877 struct ib_pd * (*alloc_pd)(struct ib_device *device,
1878 struct ib_ucontext *context,
1879 struct ib_udata *udata);
1880 int (*dealloc_pd)(struct ib_pd *pd);
1881 struct ib_ah * (*create_ah)(struct ib_pd *pd,
1882 struct ib_ah_attr *ah_attr);
1883 int (*modify_ah)(struct ib_ah *ah,
1884 struct ib_ah_attr *ah_attr);
1885 int (*query_ah)(struct ib_ah *ah,
1886 struct ib_ah_attr *ah_attr);
1887 int (*destroy_ah)(struct ib_ah *ah);
1888 struct ib_srq * (*create_srq)(struct ib_pd *pd,
1889 struct ib_srq_init_attr *srq_init_attr,
1890 struct ib_udata *udata);
1891 int (*modify_srq)(struct ib_srq *srq,
1892 struct ib_srq_attr *srq_attr,
1893 enum ib_srq_attr_mask srq_attr_mask,
1894 struct ib_udata *udata);
1895 int (*query_srq)(struct ib_srq *srq,
1896 struct ib_srq_attr *srq_attr);
1897 int (*destroy_srq)(struct ib_srq *srq);
1898 int (*post_srq_recv)(struct ib_srq *srq,
1899 struct ib_recv_wr *recv_wr,
1900 struct ib_recv_wr **bad_recv_wr);
1901 struct ib_qp * (*create_qp)(struct ib_pd *pd,
1902 struct ib_qp_init_attr *qp_init_attr,
1903 struct ib_udata *udata);
1904 int (*modify_qp)(struct ib_qp *qp,
1905 struct ib_qp_attr *qp_attr,
1906 int qp_attr_mask,
1907 struct ib_udata *udata);
1908 int (*query_qp)(struct ib_qp *qp,
1909 struct ib_qp_attr *qp_attr,
1910 int qp_attr_mask,
1911 struct ib_qp_init_attr *qp_init_attr);
1912 int (*destroy_qp)(struct ib_qp *qp);
1913 int (*post_send)(struct ib_qp *qp,
1914 struct ib_send_wr *send_wr,
1915 struct ib_send_wr **bad_send_wr);
1916 int (*post_recv)(struct ib_qp *qp,
1917 struct ib_recv_wr *recv_wr,
1918 struct ib_recv_wr **bad_recv_wr);
1919 struct ib_cq * (*create_cq)(struct ib_device *device,
1920 const struct ib_cq_init_attr *attr,
1921 struct ib_ucontext *context,
1922 struct ib_udata *udata);
1923 int (*modify_cq)(struct ib_cq *cq, u16 cq_count,
1924 u16 cq_period);
1925 int (*destroy_cq)(struct ib_cq *cq);
1926 int (*resize_cq)(struct ib_cq *cq, int cqe,
1927 struct ib_udata *udata);
1928 int (*poll_cq)(struct ib_cq *cq, int num_entries,
1929 struct ib_wc *wc);
1930 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
1931 int (*req_notify_cq)(struct ib_cq *cq,
1932 enum ib_cq_notify_flags flags);
1933 int (*req_ncomp_notif)(struct ib_cq *cq,
1934 int wc_cnt);
1935 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd,
1936 int mr_access_flags);
1937 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd,
1938 u64 start, u64 length,
1939 u64 virt_addr,
1940 int mr_access_flags,
1941 struct ib_udata *udata);
1942 int (*rereg_user_mr)(struct ib_mr *mr,
1943 int flags,
1944 u64 start, u64 length,
1945 u64 virt_addr,
1946 int mr_access_flags,
1947 struct ib_pd *pd,
1948 struct ib_udata *udata);
1949 int (*dereg_mr)(struct ib_mr *mr);
1950 struct ib_mr * (*alloc_mr)(struct ib_pd *pd,
1951 enum ib_mr_type mr_type,
1952 u32 max_num_sg);
1953 int (*map_mr_sg)(struct ib_mr *mr,
1954 struct scatterlist *sg,
1955 int sg_nents,
1956 unsigned int *sg_offset);
1957 struct ib_mw * (*alloc_mw)(struct ib_pd *pd,
1958 enum ib_mw_type type,
1959 struct ib_udata *udata);
1960 int (*dealloc_mw)(struct ib_mw *mw);
1961 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd,
1962 int mr_access_flags,
1963 struct ib_fmr_attr *fmr_attr);
1964 int (*map_phys_fmr)(struct ib_fmr *fmr,
1965 u64 *page_list, int list_len,
1966 u64 iova);
1967 int (*unmap_fmr)(struct list_head *fmr_list);
1968 int (*dealloc_fmr)(struct ib_fmr *fmr);
1969 int (*attach_mcast)(struct ib_qp *qp,
1970 union ib_gid *gid,
1971 u16 lid);
1972 int (*detach_mcast)(struct ib_qp *qp,
1973 union ib_gid *gid,
1974 u16 lid);
1975 int (*process_mad)(struct ib_device *device,
1976 int process_mad_flags,
1977 u8 port_num,
1978 const struct ib_wc *in_wc,
1979 const struct ib_grh *in_grh,
1980 const struct ib_mad_hdr *in_mad,
1981 size_t in_mad_size,
1982 struct ib_mad_hdr *out_mad,
1983 size_t *out_mad_size,
1984 u16 *out_mad_pkey_index);
1985 struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device,
1986 struct ib_ucontext *ucontext,
1987 struct ib_udata *udata);
1988 int (*dealloc_xrcd)(struct ib_xrcd *xrcd);
1989 struct ib_flow * (*create_flow)(struct ib_qp *qp,
1990 struct ib_flow_attr
1991 *flow_attr,
1992 int domain);
1993 int (*destroy_flow)(struct ib_flow *flow_id);
1994 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
1995 struct ib_mr_status *mr_status);
1996 void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
1997 void (*drain_rq)(struct ib_qp *qp);
1998 void (*drain_sq)(struct ib_qp *qp);
1999 int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port,
2000 int state);
2001 int (*get_vf_config)(struct ib_device *device, int vf, u8 port,
2002 struct ifla_vf_info *ivf);
2003 int (*get_vf_stats)(struct ib_device *device, int vf, u8 port,
2004 struct ifla_vf_stats *stats);
2005 int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
2006 int type);
2007 struct ib_wq * (*create_wq)(struct ib_pd *pd,
2008 struct ib_wq_init_attr *init_attr,
2009 struct ib_udata *udata);
2010 int (*destroy_wq)(struct ib_wq *wq);
2011 int (*modify_wq)(struct ib_wq *wq,
2012 struct ib_wq_attr *attr,
2013 u32 wq_attr_mask,
2014 struct ib_udata *udata);
2015 struct ib_rwq_ind_table * (*create_rwq_ind_table)(struct ib_device *device,
2016 struct ib_rwq_ind_table_init_attr *init_attr,
2017 struct ib_udata *udata);
2018 int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
2019 struct ib_dma_mapping_ops *dma_ops;
2020
2021 struct module *owner;
2022 struct device dev;
2023 struct kobject *ports_parent;
2024 struct list_head port_list;
2025
2026 enum {
2027 IB_DEV_UNINITIALIZED,
2028 IB_DEV_REGISTERED,
2029 IB_DEV_UNREGISTERED
2030 } reg_state;
2031
2032 int uverbs_abi_ver;
2033 u64 uverbs_cmd_mask;
2034 u64 uverbs_ex_cmd_mask;
2035
2036 char node_desc[64];
2037 __be64 node_guid;
2038 u32 local_dma_lkey;
2039 u16 is_switch:1;
2040 u8 node_type;
2041 u8 phys_port_cnt;
2042 struct ib_device_attr attrs;
2043 struct attribute_group *hw_stats_ag;
2044 struct rdma_hw_stats *hw_stats;
2045
2046 /**
2047 * The following mandatory functions are used only at device
2048 * registration. Keep functions such as these at the end of this
2049 * structure to avoid cache line misses when accessing struct ib_device
2050 * in fast paths.
2051 */
2052 int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *);
2053 void (*get_dev_fw_str)(struct ib_device *, char *str, size_t str_len);
2054 };
2055
2056 struct ib_client {
2057 char *name;
2058 void (*add) (struct ib_device *);
2059 void (*remove)(struct ib_device *, void *client_data);
2060
2061 /* Returns the net_dev belonging to this ib_client and matching the
2062 * given parameters.
2063 * @dev: An RDMA device that the net_dev use for communication.
2064 * @port: A physical port number on the RDMA device.
2065 * @pkey: P_Key that the net_dev uses if applicable.
2066 * @gid: A GID that the net_dev uses to communicate.
2067 * @addr: An IP address the net_dev is configured with.
2068 * @client_data: The device's client data set by ib_set_client_data().
2069 *
2070 * An ib_client that implements a net_dev on top of RDMA devices
2071 * (such as IP over IB) should implement this callback, allowing the
2072 * rdma_cm module to find the right net_dev for a given request.
2073 *
2074 * The caller is responsible for calling dev_put on the returned
2075 * netdev. */
2076 struct net_device *(*get_net_dev_by_params)(
2077 struct ib_device *dev,
2078 u8 port,
2079 u16 pkey,
2080 const union ib_gid *gid,
2081 const struct sockaddr *addr,
2082 void *client_data);
2083 struct list_head list;
2084 };
2085
2086 struct ib_device *ib_alloc_device(size_t size);
2087 void ib_dealloc_device(struct ib_device *device);
2088
2089 void ib_get_device_fw_str(struct ib_device *device, char *str, size_t str_len);
2090
2091 int ib_register_device(struct ib_device *device,
2092 int (*port_callback)(struct ib_device *,
2093 u8, struct kobject *));
2094 void ib_unregister_device(struct ib_device *device);
2095
2096 int ib_register_client (struct ib_client *client);
2097 void ib_unregister_client(struct ib_client *client);
2098
2099 void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
2100 void ib_set_client_data(struct ib_device *device, struct ib_client *client,
2101 void *data);
2102
2103 static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
2104 {
2105 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
2106 }
2107
2108 static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
2109 {
2110 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
2111 }
2112
2113 static inline bool ib_is_udata_cleared(struct ib_udata *udata,
2114 size_t offset,
2115 size_t len)
2116 {
2117 const void __user *p = udata->inbuf + offset;
2118 bool ret = false;
2119 u8 *buf;
2120
2121 if (len > USHRT_MAX)
2122 return false;
2123
2124 buf = kmalloc(len, GFP_KERNEL);
2125 if (!buf)
2126 return false;
2127
2128 if (copy_from_user(buf, p, len))
2129 goto free;
2130
2131 ret = !memchr_inv(buf, 0, len);
2132
2133 free:
2134 kfree(buf);
2135 return ret;
2136 }
2137
2138 /**
2139 * ib_modify_qp_is_ok - Check that the supplied attribute mask
2140 * contains all required attributes and no attributes not allowed for
2141 * the given QP state transition.
2142 * @cur_state: Current QP state
2143 * @next_state: Next QP state
2144 * @type: QP type
2145 * @mask: Mask of supplied QP attributes
2146 * @ll : link layer of port
2147 *
2148 * This function is a helper function that a low-level driver's
2149 * modify_qp method can use to validate the consumer's input. It
2150 * checks that cur_state and next_state are valid QP states, that a
2151 * transition from cur_state to next_state is allowed by the IB spec,
2152 * and that the attribute mask supplied is allowed for the transition.
2153 */
2154 int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
2155 enum ib_qp_type type, enum ib_qp_attr_mask mask,
2156 enum rdma_link_layer ll);
2157
2158 int ib_register_event_handler (struct ib_event_handler *event_handler);
2159 int ib_unregister_event_handler(struct ib_event_handler *event_handler);
2160 void ib_dispatch_event(struct ib_event *event);
2161
2162 int ib_query_port(struct ib_device *device,
2163 u8 port_num, struct ib_port_attr *port_attr);
2164
2165 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
2166 u8 port_num);
2167
2168 /**
2169 * rdma_cap_ib_switch - Check if the device is IB switch
2170 * @device: Device to check
2171 *
2172 * Device driver is responsible for setting is_switch bit on
2173 * in ib_device structure at init time.
2174 *
2175 * Return: true if the device is IB switch.
2176 */
2177 static inline bool rdma_cap_ib_switch(const struct ib_device *device)
2178 {
2179 return device->is_switch;
2180 }
2181
2182 /**
2183 * rdma_start_port - Return the first valid port number for the device
2184 * specified
2185 *
2186 * @device: Device to be checked
2187 *
2188 * Return start port number
2189 */
2190 static inline u8 rdma_start_port(const struct ib_device *device)
2191 {
2192 return rdma_cap_ib_switch(device) ? 0 : 1;
2193 }
2194
2195 /**
2196 * rdma_end_port - Return the last valid port number for the device
2197 * specified
2198 *
2199 * @device: Device to be checked
2200 *
2201 * Return last port number
2202 */
2203 static inline u8 rdma_end_port(const struct ib_device *device)
2204 {
2205 return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
2206 }
2207
2208 static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
2209 {
2210 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB;
2211 }
2212
2213 static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num)
2214 {
2215 return device->port_immutable[port_num].core_cap_flags &
2216 (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
2217 }
2218
2219 static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num)
2220 {
2221 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
2222 }
2223
2224 static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num)
2225 {
2226 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE;
2227 }
2228
2229 static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num)
2230 {
2231 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP;
2232 }
2233
2234 static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
2235 {
2236 return rdma_protocol_ib(device, port_num) ||
2237 rdma_protocol_roce(device, port_num);
2238 }
2239
2240 /**
2241 * rdma_cap_ib_mad - Check if the port of a device supports Infiniband
2242 * Management Datagrams.
2243 * @device: Device to check
2244 * @port_num: Port number to check
2245 *
2246 * Management Datagrams (MAD) are a required part of the InfiniBand
2247 * specification and are supported on all InfiniBand devices. A slightly
2248 * extended version are also supported on OPA interfaces.
2249 *
2250 * Return: true if the port supports sending/receiving of MAD packets.
2251 */
2252 static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
2253 {
2254 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD;
2255 }
2256
2257 /**
2258 * rdma_cap_opa_mad - Check if the port of device provides support for OPA
2259 * Management Datagrams.
2260 * @device: Device to check
2261 * @port_num: Port number to check
2262 *
2263 * Intel OmniPath devices extend and/or replace the InfiniBand Management
2264 * datagrams with their own versions. These OPA MADs share many but not all of
2265 * the characteristics of InfiniBand MADs.
2266 *
2267 * OPA MADs differ in the following ways:
2268 *
2269 * 1) MADs are variable size up to 2K
2270 * IBTA defined MADs remain fixed at 256 bytes
2271 * 2) OPA SMPs must carry valid PKeys
2272 * 3) OPA SMP packets are a different format
2273 *
2274 * Return: true if the port supports OPA MAD packet formats.
2275 */
2276 static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
2277 {
2278 return (device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_OPA_MAD)
2279 == RDMA_CORE_CAP_OPA_MAD;
2280 }
2281
2282 /**
2283 * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband
2284 * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI).
2285 * @device: Device to check
2286 * @port_num: Port number to check
2287 *
2288 * Each InfiniBand node is required to provide a Subnet Management Agent
2289 * that the subnet manager can access. Prior to the fabric being fully
2290 * configured by the subnet manager, the SMA is accessed via a well known
2291 * interface called the Subnet Management Interface (SMI). This interface
2292 * uses directed route packets to communicate with the SM to get around the
2293 * chicken and egg problem of the SM needing to know what's on the fabric
2294 * in order to configure the fabric, and needing to configure the fabric in
2295 * order to send packets to the devices on the fabric. These directed
2296 * route packets do not need the fabric fully configured in order to reach
2297 * their destination. The SMI is the only method allowed to send
2298 * directed route packets on an InfiniBand fabric.
2299 *
2300 * Return: true if the port provides an SMI.
2301 */
2302 static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
2303 {
2304 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI;
2305 }
2306
2307 /**
2308 * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband
2309 * Communication Manager.
2310 * @device: Device to check
2311 * @port_num: Port number to check
2312 *
2313 * The InfiniBand Communication Manager is one of many pre-defined General
2314 * Service Agents (GSA) that are accessed via the General Service
2315 * Interface (GSI). It's role is to facilitate establishment of connections
2316 * between nodes as well as other management related tasks for established
2317 * connections.
2318 *
2319 * Return: true if the port supports an IB CM (this does not guarantee that
2320 * a CM is actually running however).
2321 */
2322 static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
2323 {
2324 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM;
2325 }
2326
2327 /**
2328 * rdma_cap_iw_cm - Check if the port of device has the capability IWARP
2329 * Communication Manager.
2330 * @device: Device to check
2331 * @port_num: Port number to check
2332 *
2333 * Similar to above, but specific to iWARP connections which have a different
2334 * managment protocol than InfiniBand.
2335 *
2336 * Return: true if the port supports an iWARP CM (this does not guarantee that
2337 * a CM is actually running however).
2338 */
2339 static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
2340 {
2341 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM;
2342 }
2343
2344 /**
2345 * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
2346 * Subnet Administration.
2347 * @device: Device to check
2348 * @port_num: Port number to check
2349 *
2350 * An InfiniBand Subnet Administration (SA) service is a pre-defined General
2351 * Service Agent (GSA) provided by the Subnet Manager (SM). On InfiniBand
2352 * fabrics, devices should resolve routes to other hosts by contacting the
2353 * SA to query the proper route.
2354 *
2355 * Return: true if the port should act as a client to the fabric Subnet
2356 * Administration interface. This does not imply that the SA service is
2357 * running locally.
2358 */
2359 static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
2360 {
2361 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA;
2362 }
2363
2364 /**
2365 * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband
2366 * Multicast.
2367 * @device: Device to check
2368 * @port_num: Port number to check
2369 *
2370 * InfiniBand multicast registration is more complex than normal IPv4 or
2371 * IPv6 multicast registration. Each Host Channel Adapter must register
2372 * with the Subnet Manager when it wishes to join a multicast group. It
2373 * should do so only once regardless of how many queue pairs it subscribes
2374 * to this group. And it should leave the group only after all queue pairs
2375 * attached to the group have been detached.
2376 *
2377 * Return: true if the port must undertake the additional adminstrative
2378 * overhead of registering/unregistering with the SM and tracking of the
2379 * total number of queue pairs attached to the multicast group.
2380 */
2381 static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num)
2382 {
2383 return rdma_cap_ib_sa(device, port_num);
2384 }
2385
2386 /**
2387 * rdma_cap_af_ib - Check if the port of device has the capability
2388 * Native Infiniband Address.
2389 * @device: Device to check
2390 * @port_num: Port number to check
2391 *
2392 * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default
2393 * GID. RoCE uses a different mechanism, but still generates a GID via
2394 * a prescribed mechanism and port specific data.
2395 *
2396 * Return: true if the port uses a GID address to identify devices on the
2397 * network.
2398 */
2399 static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
2400 {
2401 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB;
2402 }
2403
2404 /**
2405 * rdma_cap_eth_ah - Check if the port of device has the capability
2406 * Ethernet Address Handle.
2407 * @device: Device to check
2408 * @port_num: Port number to check
2409 *
2410 * RoCE is InfiniBand over Ethernet, and it uses a well defined technique
2411 * to fabricate GIDs over Ethernet/IP specific addresses native to the
2412 * port. Normally, packet headers are generated by the sending host
2413 * adapter, but when sending connectionless datagrams, we must manually
2414 * inject the proper headers for the fabric we are communicating over.
2415 *
2416 * Return: true if we are running as a RoCE port and must force the
2417 * addition of a Global Route Header built from our Ethernet Address
2418 * Handle into our header list for connectionless packets.
2419 */
2420 static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
2421 {
2422 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH;
2423 }
2424
2425 /**
2426 * rdma_max_mad_size - Return the max MAD size required by this RDMA Port.
2427 *
2428 * @device: Device
2429 * @port_num: Port number
2430 *
2431 * This MAD size includes the MAD headers and MAD payload. No other headers
2432 * are included.
2433 *
2434 * Return the max MAD size required by the Port. Will return 0 if the port
2435 * does not support MADs
2436 */
2437 static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num)
2438 {
2439 return device->port_immutable[port_num].max_mad_size;
2440 }
2441
2442 /**
2443 * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table
2444 * @device: Device to check
2445 * @port_num: Port number to check
2446 *
2447 * RoCE GID table mechanism manages the various GIDs for a device.
2448 *
2449 * NOTE: if allocating the port's GID table has failed, this call will still
2450 * return true, but any RoCE GID table API will fail.
2451 *
2452 * Return: true if the port uses RoCE GID table mechanism in order to manage
2453 * its GIDs.
2454 */
2455 static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
2456 u8 port_num)
2457 {
2458 return rdma_protocol_roce(device, port_num) &&
2459 device->add_gid && device->del_gid;
2460 }
2461
2462 /*
2463 * Check if the device supports READ W/ INVALIDATE.
2464 */
2465 static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
2466 {
2467 /*
2468 * iWarp drivers must support READ W/ INVALIDATE. No other protocol
2469 * has support for it yet.
2470 */
2471 return rdma_protocol_iwarp(dev, port_num);
2472 }
2473
2474 int ib_query_gid(struct ib_device *device,
2475 u8 port_num, int index, union ib_gid *gid,
2476 struct ib_gid_attr *attr);
2477
2478 int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
2479 int state);
2480 int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
2481 struct ifla_vf_info *info);
2482 int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
2483 struct ifla_vf_stats *stats);
2484 int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
2485 int type);
2486
2487 int ib_query_pkey(struct ib_device *device,
2488 u8 port_num, u16 index, u16 *pkey);
2489
2490 int ib_modify_device(struct ib_device *device,
2491 int device_modify_mask,
2492 struct ib_device_modify *device_modify);
2493
2494 int ib_modify_port(struct ib_device *device,
2495 u8 port_num, int port_modify_mask,
2496 struct ib_port_modify *port_modify);
2497
2498 int ib_find_gid(struct ib_device *device, union ib_gid *gid,
2499 enum ib_gid_type gid_type, struct net_device *ndev,
2500 u8 *port_num, u16 *index);
2501
2502 int ib_find_pkey(struct ib_device *device,
2503 u8 port_num, u16 pkey, u16 *index);
2504
2505 struct ib_pd *ib_alloc_pd(struct ib_device *device);
2506
2507 void ib_dealloc_pd(struct ib_pd *pd);
2508
2509 /**
2510 * ib_create_ah - Creates an address handle for the given address vector.
2511 * @pd: The protection domain associated with the address handle.
2512 * @ah_attr: The attributes of the address vector.
2513 *
2514 * The address handle is used to reference a local or global destination
2515 * in all UD QP post sends.
2516 */
2517 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
2518
2519 /**
2520 * ib_init_ah_from_wc - Initializes address handle attributes from a
2521 * work completion.
2522 * @device: Device on which the received message arrived.
2523 * @port_num: Port on which the received message arrived.
2524 * @wc: Work completion associated with the received message.
2525 * @grh: References the received global route header. This parameter is
2526 * ignored unless the work completion indicates that the GRH is valid.
2527 * @ah_attr: Returned attributes that can be used when creating an address
2528 * handle for replying to the message.
2529 */
2530 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
2531 const struct ib_wc *wc, const struct ib_grh *grh,
2532 struct ib_ah_attr *ah_attr);
2533
2534 /**
2535 * ib_create_ah_from_wc - Creates an address handle associated with the
2536 * sender of the specified work completion.
2537 * @pd: The protection domain associated with the address handle.
2538 * @wc: Work completion information associated with a received message.
2539 * @grh: References the received global route header. This parameter is
2540 * ignored unless the work completion indicates that the GRH is valid.
2541 * @port_num: The outbound port number to associate with the address.
2542 *
2543 * The address handle is used to reference a local or global destination
2544 * in all UD QP post sends.
2545 */
2546 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
2547 const struct ib_grh *grh, u8 port_num);
2548
2549 /**
2550 * ib_modify_ah - Modifies the address vector associated with an address
2551 * handle.
2552 * @ah: The address handle to modify.
2553 * @ah_attr: The new address vector attributes to associate with the
2554 * address handle.
2555 */
2556 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
2557
2558 /**
2559 * ib_query_ah - Queries the address vector associated with an address
2560 * handle.
2561 * @ah: The address handle to query.
2562 * @ah_attr: The address vector attributes associated with the address
2563 * handle.
2564 */
2565 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
2566
2567 /**
2568 * ib_destroy_ah - Destroys an address handle.
2569 * @ah: The address handle to destroy.
2570 */
2571 int ib_destroy_ah(struct ib_ah *ah);
2572
2573 /**
2574 * ib_create_srq - Creates a SRQ associated with the specified protection
2575 * domain.
2576 * @pd: The protection domain associated with the SRQ.
2577 * @srq_init_attr: A list of initial attributes required to create the
2578 * SRQ. If SRQ creation succeeds, then the attributes are updated to
2579 * the actual capabilities of the created SRQ.
2580 *
2581 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
2582 * requested size of the SRQ, and set to the actual values allocated
2583 * on return. If ib_create_srq() succeeds, then max_wr and max_sge
2584 * will always be at least as large as the requested values.
2585 */
2586 struct ib_srq *ib_create_srq(struct ib_pd *pd,
2587 struct ib_srq_init_attr *srq_init_attr);
2588
2589 /**
2590 * ib_modify_srq - Modifies the attributes for the specified SRQ.
2591 * @srq: The SRQ to modify.
2592 * @srq_attr: On input, specifies the SRQ attributes to modify. On output,
2593 * the current values of selected SRQ attributes are returned.
2594 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
2595 * are being modified.
2596 *
2597 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
2598 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
2599 * the number of receives queued drops below the limit.
2600 */
2601 int ib_modify_srq(struct ib_srq *srq,
2602 struct ib_srq_attr *srq_attr,
2603 enum ib_srq_attr_mask srq_attr_mask);
2604
2605 /**
2606 * ib_query_srq - Returns the attribute list and current values for the
2607 * specified SRQ.
2608 * @srq: The SRQ to query.
2609 * @srq_attr: The attributes of the specified SRQ.
2610 */
2611 int ib_query_srq(struct ib_srq *srq,
2612 struct ib_srq_attr *srq_attr);
2613
2614 /**
2615 * ib_destroy_srq - Destroys the specified SRQ.
2616 * @srq: The SRQ to destroy.
2617 */
2618 int ib_destroy_srq(struct ib_srq *srq);
2619
2620 /**
2621 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
2622 * @srq: The SRQ to post the work request on.
2623 * @recv_wr: A list of work requests to post on the receive queue.
2624 * @bad_recv_wr: On an immediate failure, this parameter will reference
2625 * the work request that failed to be posted on the QP.
2626 */
2627 static inline int ib_post_srq_recv(struct ib_srq *srq,
2628 struct ib_recv_wr *recv_wr,
2629 struct ib_recv_wr **bad_recv_wr)
2630 {
2631 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
2632 }
2633
2634 /**
2635 * ib_create_qp - Creates a QP associated with the specified protection
2636 * domain.
2637 * @pd: The protection domain associated with the QP.
2638 * @qp_init_attr: A list of initial attributes required to create the
2639 * QP. If QP creation succeeds, then the attributes are updated to
2640 * the actual capabilities of the created QP.
2641 */
2642 struct ib_qp *ib_create_qp(struct ib_pd *pd,
2643 struct ib_qp_init_attr *qp_init_attr);
2644
2645 /**
2646 * ib_modify_qp - Modifies the attributes for the specified QP and then
2647 * transitions the QP to the given state.
2648 * @qp: The QP to modify.
2649 * @qp_attr: On input, specifies the QP attributes to modify. On output,
2650 * the current values of selected QP attributes are returned.
2651 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
2652 * are being modified.
2653 */
2654 int ib_modify_qp(struct ib_qp *qp,
2655 struct ib_qp_attr *qp_attr,
2656 int qp_attr_mask);
2657
2658 /**
2659 * ib_query_qp - Returns the attribute list and current values for the
2660 * specified QP.
2661 * @qp: The QP to query.
2662 * @qp_attr: The attributes of the specified QP.
2663 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
2664 * @qp_init_attr: Additional attributes of the selected QP.
2665 *
2666 * The qp_attr_mask may be used to limit the query to gathering only the
2667 * selected attributes.
2668 */
2669 int ib_query_qp(struct ib_qp *qp,
2670 struct ib_qp_attr *qp_attr,
2671 int qp_attr_mask,
2672 struct ib_qp_init_attr *qp_init_attr);
2673
2674 /**
2675 * ib_destroy_qp - Destroys the specified QP.
2676 * @qp: The QP to destroy.
2677 */
2678 int ib_destroy_qp(struct ib_qp *qp);
2679
2680 /**
2681 * ib_open_qp - Obtain a reference to an existing sharable QP.
2682 * @xrcd - XRC domain
2683 * @qp_open_attr: Attributes identifying the QP to open.
2684 *
2685 * Returns a reference to a sharable QP.
2686 */
2687 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
2688 struct ib_qp_open_attr *qp_open_attr);
2689
2690 /**
2691 * ib_close_qp - Release an external reference to a QP.
2692 * @qp: The QP handle to release
2693 *
2694 * The opened QP handle is released by the caller. The underlying
2695 * shared QP is not destroyed until all internal references are released.
2696 */
2697 int ib_close_qp(struct ib_qp *qp);
2698
2699 /**
2700 * ib_post_send - Posts a list of work requests to the send queue of
2701 * the specified QP.
2702 * @qp: The QP to post the work request on.
2703 * @send_wr: A list of work requests to post on the send queue.
2704 * @bad_send_wr: On an immediate failure, this parameter will reference
2705 * the work request that failed to be posted on the QP.
2706 *
2707 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
2708 * error is returned, the QP state shall not be affected,
2709 * ib_post_send() will return an immediate error after queueing any
2710 * earlier work requests in the list.
2711 */
2712 static inline int ib_post_send(struct ib_qp *qp,
2713 struct ib_send_wr *send_wr,
2714 struct ib_send_wr **bad_send_wr)
2715 {
2716 return qp->device->post_send(qp, send_wr, bad_send_wr);
2717 }
2718
2719 /**
2720 * ib_post_recv - Posts a list of work requests to the receive queue of
2721 * the specified QP.
2722 * @qp: The QP to post the work request on.
2723 * @recv_wr: A list of work requests to post on the receive queue.
2724 * @bad_recv_wr: On an immediate failure, this parameter will reference
2725 * the work request that failed to be posted on the QP.
2726 */
2727 static inline int ib_post_recv(struct ib_qp *qp,
2728 struct ib_recv_wr *recv_wr,
2729 struct ib_recv_wr **bad_recv_wr)
2730 {
2731 return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
2732 }
2733
2734 struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
2735 int nr_cqe, int comp_vector, enum ib_poll_context poll_ctx);
2736 void ib_free_cq(struct ib_cq *cq);
2737 int ib_process_cq_direct(struct ib_cq *cq, int budget);
2738
2739 /**
2740 * ib_create_cq - Creates a CQ on the specified device.
2741 * @device: The device on which to create the CQ.
2742 * @comp_handler: A user-specified callback that is invoked when a
2743 * completion event occurs on the CQ.
2744 * @event_handler: A user-specified callback that is invoked when an
2745 * asynchronous event not associated with a completion occurs on the CQ.
2746 * @cq_context: Context associated with the CQ returned to the user via
2747 * the associated completion and event handlers.
2748 * @cq_attr: The attributes the CQ should be created upon.
2749 *
2750 * Users can examine the cq structure to determine the actual CQ size.
2751 */
2752 struct ib_cq *ib_create_cq(struct ib_device *device,
2753 ib_comp_handler comp_handler,
2754 void (*event_handler)(struct ib_event *, void *),
2755 void *cq_context,
2756 const struct ib_cq_init_attr *cq_attr);
2757
2758 /**
2759 * ib_resize_cq - Modifies the capacity of the CQ.
2760 * @cq: The CQ to resize.
2761 * @cqe: The minimum size of the CQ.
2762 *
2763 * Users can examine the cq structure to determine the actual CQ size.
2764 */
2765 int ib_resize_cq(struct ib_cq *cq, int cqe);
2766
2767 /**
2768 * ib_modify_cq - Modifies moderation params of the CQ
2769 * @cq: The CQ to modify.
2770 * @cq_count: number of CQEs that will trigger an event
2771 * @cq_period: max period of time in usec before triggering an event
2772 *
2773 */
2774 int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
2775
2776 /**
2777 * ib_destroy_cq - Destroys the specified CQ.
2778 * @cq: The CQ to destroy.
2779 */
2780 int ib_destroy_cq(struct ib_cq *cq);
2781
2782 /**
2783 * ib_poll_cq - poll a CQ for completion(s)
2784 * @cq:the CQ being polled
2785 * @num_entries:maximum number of completions to return
2786 * @wc:array of at least @num_entries &struct ib_wc where completions
2787 * will be returned
2788 *
2789 * Poll a CQ for (possibly multiple) completions. If the return value
2790 * is < 0, an error occurred. If the return value is >= 0, it is the
2791 * number of completions returned. If the return value is
2792 * non-negative and < num_entries, then the CQ was emptied.
2793 */
2794 static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
2795 struct ib_wc *wc)
2796 {
2797 return cq->device->poll_cq(cq, num_entries, wc);
2798 }
2799
2800 /**
2801 * ib_peek_cq - Returns the number of unreaped completions currently
2802 * on the specified CQ.
2803 * @cq: The CQ to peek.
2804 * @wc_cnt: A minimum number of unreaped completions to check for.
2805 *
2806 * If the number of unreaped completions is greater than or equal to wc_cnt,
2807 * this function returns wc_cnt, otherwise, it returns the actual number of
2808 * unreaped completions.
2809 */
2810 int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
2811
2812 /**
2813 * ib_req_notify_cq - Request completion notification on a CQ.
2814 * @cq: The CQ to generate an event for.
2815 * @flags:
2816 * Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
2817 * to request an event on the next solicited event or next work
2818 * completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
2819 * may also be |ed in to request a hint about missed events, as
2820 * described below.
2821 *
2822 * Return Value:
2823 * < 0 means an error occurred while requesting notification
2824 * == 0 means notification was requested successfully, and if
2825 * IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
2826 * were missed and it is safe to wait for another event. In
2827 * this case is it guaranteed that any work completions added
2828 * to the CQ since the last CQ poll will trigger a completion
2829 * notification event.
2830 * > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
2831 * in. It means that the consumer must poll the CQ again to
2832 * make sure it is empty to avoid missing an event because of a
2833 * race between requesting notification and an entry being
2834 * added to the CQ. This return value means it is possible
2835 * (but not guaranteed) that a work completion has been added
2836 * to the CQ since the last poll without triggering a
2837 * completion notification event.
2838 */
2839 static inline int ib_req_notify_cq(struct ib_cq *cq,
2840 enum ib_cq_notify_flags flags)
2841 {
2842 return cq->device->req_notify_cq(cq, flags);
2843 }
2844
2845 /**
2846 * ib_req_ncomp_notif - Request completion notification when there are
2847 * at least the specified number of unreaped completions on the CQ.
2848 * @cq: The CQ to generate an event for.
2849 * @wc_cnt: The number of unreaped completions that should be on the
2850 * CQ before an event is generated.
2851 */
2852 static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
2853 {
2854 return cq->device->req_ncomp_notif ?
2855 cq->device->req_ncomp_notif(cq, wc_cnt) :
2856 -ENOSYS;
2857 }
2858
2859 /**
2860 * ib_get_dma_mr - Returns a memory region for system memory that is
2861 * usable for DMA.
2862 * @pd: The protection domain associated with the memory region.
2863 * @mr_access_flags: Specifies the memory access rights.
2864 *
2865 * Note that the ib_dma_*() functions defined below must be used
2866 * to create/destroy addresses used with the Lkey or Rkey returned
2867 * by ib_get_dma_mr().
2868 */
2869 struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
2870
2871 /**
2872 * ib_dma_mapping_error - check a DMA addr for error
2873 * @dev: The device for which the dma_addr was created
2874 * @dma_addr: The DMA address to check
2875 */
2876 static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
2877 {
2878 if (dev->dma_ops)
2879 return dev->dma_ops->mapping_error(dev, dma_addr);
2880 return dma_mapping_error(dev->dma_device, dma_addr);
2881 }
2882
2883 /**
2884 * ib_dma_map_single - Map a kernel virtual address to DMA address
2885 * @dev: The device for which the dma_addr is to be created
2886 * @cpu_addr: The kernel virtual address
2887 * @size: The size of the region in bytes
2888 * @direction: The direction of the DMA
2889 */
2890 static inline u64 ib_dma_map_single(struct ib_device *dev,
2891 void *cpu_addr, size_t size,
2892 enum dma_data_direction direction)
2893 {
2894 if (dev->dma_ops)
2895 return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
2896 return dma_map_single(dev->dma_device, cpu_addr, size, direction);
2897 }
2898
2899 /**
2900 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
2901 * @dev: The device for which the DMA address was created
2902 * @addr: The DMA address
2903 * @size: The size of the region in bytes
2904 * @direction: The direction of the DMA
2905 */
2906 static inline void ib_dma_unmap_single(struct ib_device *dev,
2907 u64 addr, size_t size,
2908 enum dma_data_direction direction)
2909 {
2910 if (dev->dma_ops)
2911 dev->dma_ops->unmap_single(dev, addr, size, direction);
2912 else
2913 dma_unmap_single(dev->dma_device, addr, size, direction);
2914 }
2915
2916 static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
2917 void *cpu_addr, size_t size,
2918 enum dma_data_direction direction,
2919 unsigned long dma_attrs)
2920 {
2921 return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
2922 direction, dma_attrs);
2923 }
2924
2925 static inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
2926 u64 addr, size_t size,
2927 enum dma_data_direction direction,
2928 unsigned long dma_attrs)
2929 {
2930 return dma_unmap_single_attrs(dev->dma_device, addr, size,
2931 direction, dma_attrs);
2932 }
2933
2934 /**
2935 * ib_dma_map_page - Map a physical page to DMA address
2936 * @dev: The device for which the dma_addr is to be created
2937 * @page: The page to be mapped
2938 * @offset: The offset within the page
2939 * @size: The size of the region in bytes
2940 * @direction: The direction of the DMA
2941 */
2942 static inline u64 ib_dma_map_page(struct ib_device *dev,
2943 struct page *page,
2944 unsigned long offset,
2945 size_t size,
2946 enum dma_data_direction direction)
2947 {
2948 if (dev->dma_ops)
2949 return dev->dma_ops->map_page(dev, page, offset, size, direction);
2950 return dma_map_page(dev->dma_device, page, offset, size, direction);
2951 }
2952
2953 /**
2954 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
2955 * @dev: The device for which the DMA address was created
2956 * @addr: The DMA address
2957 * @size: The size of the region in bytes
2958 * @direction: The direction of the DMA
2959 */
2960 static inline void ib_dma_unmap_page(struct ib_device *dev,
2961 u64 addr, size_t size,
2962 enum dma_data_direction direction)
2963 {
2964 if (dev->dma_ops)
2965 dev->dma_ops->unmap_page(dev, addr, size, direction);
2966 else
2967 dma_unmap_page(dev->dma_device, addr, size, direction);
2968 }
2969
2970 /**
2971 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
2972 * @dev: The device for which the DMA addresses are to be created
2973 * @sg: The array of scatter/gather entries
2974 * @nents: The number of scatter/gather entries
2975 * @direction: The direction of the DMA
2976 */
2977 static inline int ib_dma_map_sg(struct ib_device *dev,
2978 struct scatterlist *sg, int nents,
2979 enum dma_data_direction direction)
2980 {
2981 if (dev->dma_ops)
2982 return dev->dma_ops->map_sg(dev, sg, nents, direction);
2983 return dma_map_sg(dev->dma_device, sg, nents, direction);
2984 }
2985
2986 /**
2987 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
2988 * @dev: The device for which the DMA addresses were created
2989 * @sg: The array of scatter/gather entries
2990 * @nents: The number of scatter/gather entries
2991 * @direction: The direction of the DMA
2992 */
2993 static inline void ib_dma_unmap_sg(struct ib_device *dev,
2994 struct scatterlist *sg, int nents,
2995 enum dma_data_direction direction)
2996 {
2997 if (dev->dma_ops)
2998 dev->dma_ops->unmap_sg(dev, sg, nents, direction);
2999 else
3000 dma_unmap_sg(dev->dma_device, sg, nents, direction);
3001 }
3002
3003 static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
3004 struct scatterlist *sg, int nents,
3005 enum dma_data_direction direction,
3006 unsigned long dma_attrs)
3007 {
3008 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
3009 dma_attrs);
3010 }
3011
3012 static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
3013 struct scatterlist *sg, int nents,
3014 enum dma_data_direction direction,
3015 unsigned long dma_attrs)
3016 {
3017 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
3018 }
3019 /**
3020 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
3021 * @dev: The device for which the DMA addresses were created
3022 * @sg: The scatter/gather entry
3023 *
3024 * Note: this function is obsolete. To do: change all occurrences of
3025 * ib_sg_dma_address() into sg_dma_address().
3026 */
3027 static inline u64 ib_sg_dma_address(struct ib_device *dev,
3028 struct scatterlist *sg)
3029 {
3030 return sg_dma_address(sg);
3031 }
3032
3033 /**
3034 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
3035 * @dev: The device for which the DMA addresses were created
3036 * @sg: The scatter/gather entry
3037 *
3038 * Note: this function is obsolete. To do: change all occurrences of
3039 * ib_sg_dma_len() into sg_dma_len().
3040 */
3041 static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
3042 struct scatterlist *sg)
3043 {
3044 return sg_dma_len(sg);
3045 }
3046
3047 /**
3048 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
3049 * @dev: The device for which the DMA address was created
3050 * @addr: The DMA address
3051 * @size: The size of the region in bytes
3052 * @dir: The direction of the DMA
3053 */
3054 static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
3055 u64 addr,
3056 size_t size,
3057 enum dma_data_direction dir)
3058 {
3059 if (dev->dma_ops)
3060 dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
3061 else
3062 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
3063 }
3064
3065 /**
3066 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
3067 * @dev: The device for which the DMA address was created
3068 * @addr: The DMA address
3069 * @size: The size of the region in bytes
3070 * @dir: The direction of the DMA
3071 */
3072 static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
3073 u64 addr,
3074 size_t size,
3075 enum dma_data_direction dir)
3076 {
3077 if (dev->dma_ops)
3078 dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
3079 else
3080 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
3081 }
3082
3083 /**
3084 * ib_dma_alloc_coherent - Allocate memory and map it for DMA
3085 * @dev: The device for which the DMA address is requested
3086 * @size: The size of the region to allocate in bytes
3087 * @dma_handle: A pointer for returning the DMA address of the region
3088 * @flag: memory allocator flags
3089 */
3090 static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
3091 size_t size,
3092 u64 *dma_handle,
3093 gfp_t flag)
3094 {
3095 if (dev->dma_ops)
3096 return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
3097 else {
3098 dma_addr_t handle;
3099 void *ret;
3100
3101 ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);
3102 *dma_handle = handle;
3103 return ret;
3104 }
3105 }
3106
3107 /**
3108 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
3109 * @dev: The device for which the DMA addresses were allocated
3110 * @size: The size of the region
3111 * @cpu_addr: the address returned by ib_dma_alloc_coherent()
3112 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
3113 */
3114 static inline void ib_dma_free_coherent(struct ib_device *dev,
3115 size_t size, void *cpu_addr,
3116 u64 dma_handle)
3117 {
3118 if (dev->dma_ops)
3119 dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
3120 else
3121 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
3122 }
3123
3124 /**
3125 * ib_dereg_mr - Deregisters a memory region and removes it from the
3126 * HCA translation table.
3127 * @mr: The memory region to deregister.
3128 *
3129 * This function can fail, if the memory region has memory windows bound to it.
3130 */
3131 int ib_dereg_mr(struct ib_mr *mr);
3132
3133 struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
3134 enum ib_mr_type mr_type,
3135 u32 max_num_sg);
3136
3137 /**
3138 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
3139 * R_Key and L_Key.
3140 * @mr - struct ib_mr pointer to be updated.
3141 * @newkey - new key to be used.
3142 */
3143 static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
3144 {
3145 mr->lkey = (mr->lkey & 0xffffff00) | newkey;
3146 mr->rkey = (mr->rkey & 0xffffff00) | newkey;
3147 }
3148
3149 /**
3150 * ib_inc_rkey - increments the key portion of the given rkey. Can be used
3151 * for calculating a new rkey for type 2 memory windows.
3152 * @rkey - the rkey to increment.
3153 */
3154 static inline u32 ib_inc_rkey(u32 rkey)
3155 {
3156 const u32 mask = 0x000000ff;
3157 return ((rkey + 1) & mask) | (rkey & ~mask);
3158 }
3159
3160 /**
3161 * ib_alloc_fmr - Allocates a unmapped fast memory region.
3162 * @pd: The protection domain associated with the unmapped region.
3163 * @mr_access_flags: Specifies the memory access rights.
3164 * @fmr_attr: Attributes of the unmapped region.
3165 *
3166 * A fast memory region must be mapped before it can be used as part of
3167 * a work request.
3168 */
3169 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
3170 int mr_access_flags,
3171 struct ib_fmr_attr *fmr_attr);
3172
3173 /**
3174 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
3175 * @fmr: The fast memory region to associate with the pages.
3176 * @page_list: An array of physical pages to map to the fast memory region.
3177 * @list_len: The number of pages in page_list.
3178 * @iova: The I/O virtual address to use with the mapped region.
3179 */
3180 static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
3181 u64 *page_list, int list_len,
3182 u64 iova)
3183 {
3184 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
3185 }
3186
3187 /**
3188 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
3189 * @fmr_list: A linked list of fast memory regions to unmap.
3190 */
3191 int ib_unmap_fmr(struct list_head *fmr_list);
3192
3193 /**
3194 * ib_dealloc_fmr - Deallocates a fast memory region.
3195 * @fmr: The fast memory region to deallocate.
3196 */
3197 int ib_dealloc_fmr(struct ib_fmr *fmr);
3198
3199 /**
3200 * ib_attach_mcast - Attaches the specified QP to a multicast group.
3201 * @qp: QP to attach to the multicast group. The QP must be type
3202 * IB_QPT_UD.
3203 * @gid: Multicast group GID.
3204 * @lid: Multicast group LID in host byte order.
3205 *
3206 * In order to send and receive multicast packets, subnet
3207 * administration must have created the multicast group and configured
3208 * the fabric appropriately. The port associated with the specified
3209 * QP must also be a member of the multicast group.
3210 */
3211 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
3212
3213 /**
3214 * ib_detach_mcast - Detaches the specified QP from a multicast group.
3215 * @qp: QP to detach from the multicast group.
3216 * @gid: Multicast group GID.
3217 * @lid: Multicast group LID in host byte order.
3218 */
3219 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
3220
3221 /**
3222 * ib_alloc_xrcd - Allocates an XRC domain.
3223 * @device: The device on which to allocate the XRC domain.
3224 */
3225 struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device);
3226
3227 /**
3228 * ib_dealloc_xrcd - Deallocates an XRC domain.
3229 * @xrcd: The XRC domain to deallocate.
3230 */
3231 int ib_dealloc_xrcd(struct ib_xrcd *xrcd);
3232
3233 struct ib_flow *ib_create_flow(struct ib_qp *qp,
3234 struct ib_flow_attr *flow_attr, int domain);
3235 int ib_destroy_flow(struct ib_flow *flow_id);
3236
3237 static inline int ib_check_mr_access(int flags)
3238 {
3239 /*
3240 * Local write permission is required if remote write or
3241 * remote atomic permission is also requested.
3242 */
3243 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
3244 !(flags & IB_ACCESS_LOCAL_WRITE))
3245 return -EINVAL;
3246
3247 return 0;
3248 }
3249
3250 /**
3251 * ib_check_mr_status: lightweight check of MR status.
3252 * This routine may provide status checks on a selected
3253 * ib_mr. first use is for signature status check.
3254 *
3255 * @mr: A memory region.
3256 * @check_mask: Bitmask of which checks to perform from
3257 * ib_mr_status_check enumeration.
3258 * @mr_status: The container of relevant status checks.
3259 * failed checks will be indicated in the status bitmask
3260 * and the relevant info shall be in the error item.
3261 */
3262 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
3263 struct ib_mr_status *mr_status);
3264
3265 struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
3266 u16 pkey, const union ib_gid *gid,
3267 const struct sockaddr *addr);
3268 struct ib_wq *ib_create_wq(struct ib_pd *pd,
3269 struct ib_wq_init_attr *init_attr);
3270 int ib_destroy_wq(struct ib_wq *wq);
3271 int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
3272 u32 wq_attr_mask);
3273 struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
3274 struct ib_rwq_ind_table_init_attr*
3275 wq_ind_table_init_attr);
3276 int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
3277
3278 int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
3279 unsigned int *sg_offset, unsigned int page_size);
3280
3281 static inline int
3282 ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
3283 unsigned int *sg_offset, unsigned int page_size)
3284 {
3285 int n;
3286
3287 n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size);
3288 mr->iova = 0;
3289
3290 return n;
3291 }
3292
3293 int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
3294 unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64));
3295
3296 void ib_drain_rq(struct ib_qp *qp);
3297 void ib_drain_sq(struct ib_qp *qp);
3298 void ib_drain_qp(struct ib_qp *qp);
3299 #endif /* IB_VERBS_H */ 1 #include <linux/module.h>
2 #include <linux/list.h>
3 #include "iscsi_target_core.h"
4
5 struct iscsit_transport {
6 #define ISCSIT_TRANSPORT_NAME 16
7 char name[ISCSIT_TRANSPORT_NAME];
8 int transport_type;
9 bool rdma_shutdown;
10 int priv_size;
11 struct module *owner;
12 struct list_head t_node;
13 int (*iscsit_setup_np)(struct iscsi_np *, struct sockaddr_storage *);
14 int (*iscsit_accept_np)(struct iscsi_np *, struct iscsi_conn *);
15 void (*iscsit_free_np)(struct iscsi_np *);
16 void (*iscsit_wait_conn)(struct iscsi_conn *);
17 void (*iscsit_free_conn)(struct iscsi_conn *);
18 int (*iscsit_get_login_rx)(struct iscsi_conn *, struct iscsi_login *);
19 int (*iscsit_put_login_tx)(struct iscsi_conn *, struct iscsi_login *, u32);
20 int (*iscsit_immediate_queue)(struct iscsi_conn *, struct iscsi_cmd *, int);
21 int (*iscsit_response_queue)(struct iscsi_conn *, struct iscsi_cmd *, int);
22 int (*iscsit_get_dataout)(struct iscsi_conn *, struct iscsi_cmd *, bool);
23 int (*iscsit_queue_data_in)(struct iscsi_conn *, struct iscsi_cmd *);
24 int (*iscsit_queue_status)(struct iscsi_conn *, struct iscsi_cmd *);
25 void (*iscsit_aborted_task)(struct iscsi_conn *, struct iscsi_cmd *);
26 int (*iscsit_xmit_pdu)(struct iscsi_conn *, struct iscsi_cmd *,
27 struct iscsi_datain_req *, const void *, u32);
28 void (*iscsit_release_cmd)(struct iscsi_conn *, struct iscsi_cmd *);
29 void (*iscsit_get_rx_pdu)(struct iscsi_conn *);
30 int (*iscsit_validate_params)(struct iscsi_conn *);
31 void (*iscsit_get_r2t_ttt)(struct iscsi_conn *, struct iscsi_cmd *,
32 struct iscsi_r2t *);
33 enum target_prot_op (*iscsit_get_sup_prot_ops)(struct iscsi_conn *);
34 };
35
36 static inline void *iscsit_priv_cmd(struct iscsi_cmd *cmd)
37 {
38 return (void *)(cmd + 1);
39 }
40
41 /*
42 * From iscsi_target_transport.c
43 */
44
45 extern int iscsit_register_transport(struct iscsit_transport *);
46 extern void iscsit_unregister_transport(struct iscsit_transport *);
47 extern struct iscsit_transport *iscsit_get_transport(int);
48 extern void iscsit_put_transport(struct iscsit_transport *);
49
50 /*
51 * From iscsi_target.c
52 */
53 extern int iscsit_setup_scsi_cmd(struct iscsi_conn *, struct iscsi_cmd *,
54 unsigned char *);
55 extern void iscsit_set_unsoliticed_dataout(struct iscsi_cmd *);
56 extern int iscsit_process_scsi_cmd(struct iscsi_conn *, struct iscsi_cmd *,
57 struct iscsi_scsi_req *);
58 extern int iscsit_check_dataout_hdr(struct iscsi_conn *, unsigned char *,
59 struct iscsi_cmd **);
60 extern int iscsit_check_dataout_payload(struct iscsi_cmd *, struct iscsi_data *,
61 bool);
62 extern int iscsit_setup_nop_out(struct iscsi_conn *, struct iscsi_cmd *,
63 struct iscsi_nopout *);
64 extern int iscsit_process_nop_out(struct iscsi_conn *, struct iscsi_cmd *,
65 struct iscsi_nopout *);
66 extern int iscsit_handle_logout_cmd(struct iscsi_conn *, struct iscsi_cmd *,
67 unsigned char *);
68 extern int iscsit_handle_task_mgt_cmd(struct iscsi_conn *, struct iscsi_cmd *,
69 unsigned char *);
70 extern int iscsit_setup_text_cmd(struct iscsi_conn *, struct iscsi_cmd *,
71 struct iscsi_text *);
72 extern int iscsit_process_text_cmd(struct iscsi_conn *, struct iscsi_cmd *,
73 struct iscsi_text *);
74 extern void iscsit_build_rsp_pdu(struct iscsi_cmd *, struct iscsi_conn *,
75 bool, struct iscsi_scsi_rsp *);
76 extern void iscsit_build_nopin_rsp(struct iscsi_cmd *, struct iscsi_conn *,
77 struct iscsi_nopin *, bool);
78 extern void iscsit_build_task_mgt_rsp(struct iscsi_cmd *, struct iscsi_conn *,
79 struct iscsi_tm_rsp *);
80 extern int iscsit_build_text_rsp(struct iscsi_cmd *, struct iscsi_conn *,
81 struct iscsi_text_rsp *,
82 enum iscsit_transport_type);
83 extern void iscsit_build_reject(struct iscsi_cmd *, struct iscsi_conn *,
84 struct iscsi_reject *);
85 extern int iscsit_build_logout_rsp(struct iscsi_cmd *, struct iscsi_conn *,
86 struct iscsi_logout_rsp *);
87 extern int iscsit_logout_post_handler(struct iscsi_cmd *, struct iscsi_conn *);
88 extern int iscsit_queue_rsp(struct iscsi_conn *, struct iscsi_cmd *);
89 extern void iscsit_aborted_task(struct iscsi_conn *, struct iscsi_cmd *);
90 extern int iscsit_add_reject(struct iscsi_conn *, u8, unsigned char *);
91 extern int iscsit_reject_cmd(struct iscsi_cmd *, u8, unsigned char *);
92 extern int iscsit_handle_snack(struct iscsi_conn *, unsigned char *);
93 extern void iscsit_build_datain_pdu(struct iscsi_cmd *, struct iscsi_conn *,
94 struct iscsi_datain *,
95 struct iscsi_data_rsp *, bool);
96 extern int iscsit_build_r2ts_for_cmd(struct iscsi_conn *, struct iscsi_cmd *,
97 bool);
98 extern int iscsit_immediate_queue(struct iscsi_conn *, struct iscsi_cmd *, int);
99 extern int iscsit_response_queue(struct iscsi_conn *, struct iscsi_cmd *, int);
100 /*
101 * From iscsi_target_device.c
102 */
103 extern void iscsit_increment_maxcmdsn(struct iscsi_cmd *, struct iscsi_session *);
104 /*
105 * From iscsi_target_erl0.c
106 */
107 extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int);
108 /*
109 * From iscsi_target_erl1.c
110 */
111 extern void iscsit_stop_dataout_timer(struct iscsi_cmd *);
112
113 /*
114 * From iscsi_target_tmr.c
115 */
116 extern int iscsit_tmr_post_handler(struct iscsi_cmd *, struct iscsi_conn *);
117
118 /*
119 * From iscsi_target_util.c
120 */
121 extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, int);
122 extern int iscsit_sequence_cmd(struct iscsi_conn *, struct iscsi_cmd *,
123 unsigned char *, __be32);
124 extern void iscsit_release_cmd(struct iscsi_cmd *);
125 extern void iscsit_free_cmd(struct iscsi_cmd *, bool);
126 extern void iscsit_add_cmd_to_immediate_queue(struct iscsi_cmd *,
127 struct iscsi_conn *, u8);
128
129 /*
130 * From iscsi_target_nego.c
131 */
132 extern int iscsi_target_check_login_request(struct iscsi_conn *,
133 struct iscsi_login *);
134
135 /*
136 * From iscsi_target_login.c
137 */
138 extern __printf(2, 3) int iscsi_change_param_sprintf(
139 struct iscsi_conn *, const char *, ...);
140
141 /*
142 * From iscsi_target_parameters.c
143 */
144 extern struct iscsi_param *iscsi_find_param_from_key(
145 char *, struct iscsi_param_list *); 1 #ifndef _UAPI_LINUX_SWAB_H
2 #define _UAPI_LINUX_SWAB_H
3
4 #include <linux/types.h>
5 #include <linux/compiler.h>
6 #include <asm/swab.h>
7
8 /*
9 * casts are necessary for constants, because we never know how for sure
10 * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way.
11 */
12 #define ___constant_swab16(x) ((__u16)( \
13 (((__u16)(x) & (__u16)0x00ffU) << 8) | \
14 (((__u16)(x) & (__u16)0xff00U) >> 8)))
15
16 #define ___constant_swab32(x) ((__u32)( \
17 (((__u32)(x) & (__u32)0x000000ffUL) << 24) | \
18 (((__u32)(x) & (__u32)0x0000ff00UL) << 8) | \
19 (((__u32)(x) & (__u32)0x00ff0000UL) >> 8) | \
20 (((__u32)(x) & (__u32)0xff000000UL) >> 24)))
21
22 #define ___constant_swab64(x) ((__u64)( \
23 (((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \
24 (((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) | \
25 (((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) | \
26 (((__u64)(x) & (__u64)0x00000000ff000000ULL) << 8) | \
27 (((__u64)(x) & (__u64)0x000000ff00000000ULL) >> 8) | \
28 (((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) | \
29 (((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) | \
30 (((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56)))
31
32 #define ___constant_swahw32(x) ((__u32)( \
33 (((__u32)(x) & (__u32)0x0000ffffUL) << 16) | \
34 (((__u32)(x) & (__u32)0xffff0000UL) >> 16)))
35
36 #define ___constant_swahb32(x) ((__u32)( \
37 (((__u32)(x) & (__u32)0x00ff00ffUL) << 8) | \
38 (((__u32)(x) & (__u32)0xff00ff00UL) >> 8)))
39
40 /*
41 * Implement the following as inlines, but define the interface using
42 * macros to allow constant folding when possible:
43 * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
44 */
45
46 static inline __attribute_const__ __u16 __fswab16(__u16 val)
47 {
48 #if defined (__arch_swab16)
49 return __arch_swab16(val);
50 #else
51 return ___constant_swab16(val);
52 #endif
53 }
54
55 static inline __attribute_const__ __u32 __fswab32(__u32 val)
56 {
57 #if defined(__arch_swab32)
58 return __arch_swab32(val);
59 #else
60 return ___constant_swab32(val);
61 #endif
62 }
63
64 static inline __attribute_const__ __u64 __fswab64(__u64 val)
65 {
66 #if defined (__arch_swab64)
67 return __arch_swab64(val);
68 #elif defined(__SWAB_64_THRU_32__)
69 __u32 h = val >> 32;
70 __u32 l = val & ((1ULL << 32) - 1);
71 return (((__u64)__fswab32(l)) << 32) | ((__u64)(__fswab32(h)));
72 #else
73 return ___constant_swab64(val);
74 #endif
75 }
76
77 static inline __attribute_const__ __u32 __fswahw32(__u32 val)
78 {
79 #ifdef __arch_swahw32
80 return __arch_swahw32(val);
81 #else
82 return ___constant_swahw32(val);
83 #endif
84 }
85
86 static inline __attribute_const__ __u32 __fswahb32(__u32 val)
87 {
88 #ifdef __arch_swahb32
89 return __arch_swahb32(val);
90 #else
91 return ___constant_swahb32(val);
92 #endif
93 }
94
95 /**
96 * __swab16 - return a byteswapped 16-bit value
97 * @x: value to byteswap
98 */
99 #ifdef __HAVE_BUILTIN_BSWAP16__
100 #define __swab16(x) (__u16)__builtin_bswap16((__u16)(x))
101 #else
102 #define __swab16(x) \
103 (__builtin_constant_p((__u16)(x)) ? \
104 ___constant_swab16(x) : \
105 __fswab16(x))
106 #endif
107
108 /**
109 * __swab32 - return a byteswapped 32-bit value
110 * @x: value to byteswap
111 */
112 #ifdef __HAVE_BUILTIN_BSWAP32__
113 #define __swab32(x) (__u32)__builtin_bswap32((__u32)(x))
114 #else
115 #define __swab32(x) \
116 (__builtin_constant_p((__u32)(x)) ? \
117 ___constant_swab32(x) : \
118 __fswab32(x))
119 #endif
120
121 /**
122 * __swab64 - return a byteswapped 64-bit value
123 * @x: value to byteswap
124 */
125 #ifdef __HAVE_BUILTIN_BSWAP64__
126 #define __swab64(x) (__u64)__builtin_bswap64((__u64)(x))
127 #else
128 #define __swab64(x) \
129 (__builtin_constant_p((__u64)(x)) ? \
130 ___constant_swab64(x) : \
131 __fswab64(x))
132 #endif
133
134 /**
135 * __swahw32 - return a word-swapped 32-bit value
136 * @x: value to wordswap
137 *
138 * __swahw32(0x12340000) is 0x00001234
139 */
140 #define __swahw32(x) \
141 (__builtin_constant_p((__u32)(x)) ? \
142 ___constant_swahw32(x) : \
143 __fswahw32(x))
144
145 /**
146 * __swahb32 - return a high and low byte-swapped 32-bit value
147 * @x: value to byteswap
148 *
149 * __swahb32(0x12345678) is 0x34127856
150 */
151 #define __swahb32(x) \
152 (__builtin_constant_p((__u32)(x)) ? \
153 ___constant_swahb32(x) : \
154 __fswahb32(x))
155
156 /**
157 * __swab16p - return a byteswapped 16-bit value from a pointer
158 * @p: pointer to a naturally-aligned 16-bit value
159 */
160 static __always_inline __u16 __swab16p(const __u16 *p)
161 {
162 #ifdef __arch_swab16p
163 return __arch_swab16p(p);
164 #else
165 return __swab16(*p);
166 #endif
167 }
168
169 /**
170 * __swab32p - return a byteswapped 32-bit value from a pointer
171 * @p: pointer to a naturally-aligned 32-bit value
172 */
173 static __always_inline __u32 __swab32p(const __u32 *p)
174 {
175 #ifdef __arch_swab32p
176 return __arch_swab32p(p);
177 #else
178 return __swab32(*p);
179 #endif
180 }
181
182 /**
183 * __swab64p - return a byteswapped 64-bit value from a pointer
184 * @p: pointer to a naturally-aligned 64-bit value
185 */
186 static __always_inline __u64 __swab64p(const __u64 *p)
187 {
188 #ifdef __arch_swab64p
189 return __arch_swab64p(p);
190 #else
191 return __swab64(*p);
192 #endif
193 }
194
195 /**
196 * __swahw32p - return a wordswapped 32-bit value from a pointer
197 * @p: pointer to a naturally-aligned 32-bit value
198 *
199 * See __swahw32() for details of wordswapping.
200 */
201 static inline __u32 __swahw32p(const __u32 *p)
202 {
203 #ifdef __arch_swahw32p
204 return __arch_swahw32p(p);
205 #else
206 return __swahw32(*p);
207 #endif
208 }
209
210 /**
211 * __swahb32p - return a high and low byteswapped 32-bit value from a pointer
212 * @p: pointer to a naturally-aligned 32-bit value
213 *
214 * See __swahb32() for details of high/low byteswapping.
215 */
216 static inline __u32 __swahb32p(const __u32 *p)
217 {
218 #ifdef __arch_swahb32p
219 return __arch_swahb32p(p);
220 #else
221 return __swahb32(*p);
222 #endif
223 }
224
225 /**
226 * __swab16s - byteswap a 16-bit value in-place
227 * @p: pointer to a naturally-aligned 16-bit value
228 */
229 static inline void __swab16s(__u16 *p)
230 {
231 #ifdef __arch_swab16s
232 __arch_swab16s(p);
233 #else
234 *p = __swab16p(p);
235 #endif
236 }
237 /**
238 * __swab32s - byteswap a 32-bit value in-place
239 * @p: pointer to a naturally-aligned 32-bit value
240 */
241 static __always_inline void __swab32s(__u32 *p)
242 {
243 #ifdef __arch_swab32s
244 __arch_swab32s(p);
245 #else
246 *p = __swab32p(p);
247 #endif
248 }
249
250 /**
251 * __swab64s - byteswap a 64-bit value in-place
252 * @p: pointer to a naturally-aligned 64-bit value
253 */
254 static __always_inline void __swab64s(__u64 *p)
255 {
256 #ifdef __arch_swab64s
257 __arch_swab64s(p);
258 #else
259 *p = __swab64p(p);
260 #endif
261 }
262
263 /**
264 * __swahw32s - wordswap a 32-bit value in-place
265 * @p: pointer to a naturally-aligned 32-bit value
266 *
267 * See __swahw32() for details of wordswapping
268 */
269 static inline void __swahw32s(__u32 *p)
270 {
271 #ifdef __arch_swahw32s
272 __arch_swahw32s(p);
273 #else
274 *p = __swahw32p(p);
275 #endif
276 }
277
278 /**
279 * __swahb32s - high and low byteswap a 32-bit value in-place
280 * @p: pointer to a naturally-aligned 32-bit value
281 *
282 * See __swahb32() for details of high and low byte swapping
283 */
284 static inline void __swahb32s(__u32 *p)
285 {
286 #ifdef __arch_swahb32s
287 __arch_swahb32s(p);
288 #else
289 *p = __swahb32p(p);
290 #endif
291 }
292
293
294 #endif /* _UAPI_LINUX_SWAB_H */ |
Here is an explanation of a rule violation arisen while checking your driver against a corresponding kernel.
Note that it may be false positive, i.e. there isn't a real error indeed. Please analyze a given error trace and related source code to understand whether there is an error in your driver.
Error trace column contains a path on which the given rule is violated. You can expand/collapse some entity classes by clicking on corresponding checkboxes in a main menu or in an advanced Others menu. Also you can expand/collapse each particular entity by clicking on +/-. In hovering on some entities you can see some tips. Also the error trace is bound with related source code. Line numbers may be shown as links on the left. You can click on them to open corresponding lines in source code.
Source code column contains a content of files related with the error trace. There is source code of your driver (note that there are some LDV modifications at the end), kernel headers and rule model. Tabs show a currently opened file and other available files. In hovering on them you can see full file names. On clicking a corresponding file content will be shown.
Ядро | Модуль | Правило | Верификатор | Вердикт | Статус | Время создания | Описание проблемы |
linux-4.8-rc1.tar.xz | drivers/infiniband/ulp/isert/ib_isert.ko | 331_1a | CPAchecker | Bug | Fixed | 2016-10-22 01:05:07 | L0256 |
Комментарий
Reported: 22 Oct 2016
[В начало]