Online Linux Driver Verification Service (alpha)

Rule violation
Driver: xordev.tar.gz
Kernel: linux-2.6.37
Verification architecture: x86_64
Rule: Spinlocks lock/unlock
Error trace
Function bodies
Blocks
  • Others...
    Entry point
    Entry point body
    Function calls
    Initialization function calls
    Function without body calls
    Function stack overflows
    Initialization function bodies
    Returns
    Return values
    Asserts
    Assert conditions
    Identation
    Driver environment initialization
    Driver environment function calls
    Driver environment function bodies
    Model asserts
    Model state changes
    Model returns
    Model function calls
    Model function bodies
    Model function function calls
    Model function function bodies
    Model others
    Function bodies without model function calls
Hide Entry point Hide Entry point body Hide Function calls Hide Initialization function calls Hide Function without body calls Hide Function stack overflows Hide Function bodies Hide Initialization function bodies Hide Blocks Hide Returns Hide Return values Hide Asserts Hide Assert conditions Hide Identation Hide Driver environment initialization Hide Driver environment function calls Hide Driver environment function bodies Hide Model asserts Hide Model state changes Hide Model returns Hide Model function calls Hide Model function bodies Hide Model function function calls Hide Model function function bodies Hide Model others Hide Function bodies without model function calls
-entry_point();
{
-__BLAST_initialize_/home/ldv/ldv-new/ldv-tools-inst/tmp/run/work/current--X--xordev.tar.gz_89--X--defaultlinux-2.6.37--X--39_7/linux-2.6.37/csd_deg_dscv/3/dscv_tempdir/dscv/rcv/39_7/main-ldv_main1_sequence_infinite_withcheck_stateful/preprocess/1-cpp/circ_dma_buf.c.common.i();
{
30 -CDB_SIZE = 4096;
ldv_lock_TEMPLATE = 1;
return 0;
}
-__BLAST_initialize_/home/ldv/ldv-new/ldv-tools-inst/tmp/run/work/current--X--xordev.tar.gz_89--X--defaultlinux-2.6.37--X--39_7/linux-2.6.37/csd_deg_dscv/3/dscv_tempdir/dscv/rcv/39_7/main-ldv_main1_sequence_infinite_withcheck_stateful/preprocess/1-cpp/drv_xordev.i();
{
38 -__mod_author38[ 0 ] = 97;
__mod_author38[ 1 ] = 117;
__mod_author38[ 2 ] = 116;
__mod_author38[ 3 ] = 104;
__mod_author38[ 4 ] = 111;
__mod_author38[ 5 ] = 114;
__mod_author38[ 6 ] = 61;
__mod_author38[ 7 ] = 77;
__mod_author38[ 8 ] = 105;
__mod_author38[ 9 ] = 99;
__mod_author38[ 10 ] = 104;
__mod_author38[ 11 ] = 97;
__mod_author38[ 12 ] = 108;
__mod_author38[ 13 ] = 32;
__mod_author38[ 14 ] = 77;
__mod_author38[ 15 ] = 97;
__mod_author38[ 16 ] = 114;
__mod_author38[ 17 ] = 115;
__mod_author38[ 18 ] = 99;
__mod_author38[ 19 ] = 104;
__mod_author38[ 20 ] = 97;
__mod_author38[ 21 ] = 108;
__mod_author38[ 22 ] = 108;
__mod_author38[ 23 ] = 0;
__mod_license39[ 0 ] = 108;
__mod_license39[ 1 ] = 105;
__mod_license39[ 2 ] = 99;
__mod_license39[ 3 ] = 101;
__mod_license39[ 4 ] = 110;
__mod_license39[ 5 ] = 115;
__mod_license39[ 6 ] = 101;
__mod_license39[ 7 ] = 61;
__mod_license39[ 8 ] = 71;
__mod_license39[ 9 ] = 80;
__mod_license39[ 10 ] = 76;
__mod_license39[ 11 ] = 0;
TYPES = 3;
REG_INTR_EN = 0;
REG_SRC1 = 4;
REG_SRC2 = 8;
REG_DST = 12;
REG_COUNT = 16;
REG_INTR_COUNT = 20;
IRQ_OFF = 0;
IRQ_ON = 1;
xordev_fops.owner = &(__this_module);
xordev_fops.llseek = 0;
xordev_fops.read = 0;
xordev_fops.write = 0;
xordev_fops.aio_read = 0;
xordev_fops.aio_write = 0;
xordev_fops.readdir = 0;
xordev_fops.poll = 0;
xordev_fops.unlocked_ioctl = 0;
xordev_fops.compat_ioctl = 0;
xordev_fops.mmap = 0;
xordev_fops.open = &(xordev_open);
xordev_fops.flush = 0;
xordev_fops.release = &(xordev_release);
xordev_fops.fsync = 0;
xordev_fops.aio_fsync = 0;
xordev_fops.fasync = 0;
xordev_fops.lock = 0;
xordev_fops.sendpage = 0;
xordev_fops.get_unmapped_area = 0;
xordev_fops.check_flags = 0;
xordev_fops.flock = 0;
xordev_fops.splice_write = 0;
xordev_fops.splice_read = 0;
xordev_fops.setlease = 0;
xordev_dst_fops.owner = &(__this_module);
xordev_dst_fops.llseek = 0;
xordev_dst_fops.read = &(xordev_read);
xordev_dst_fops.write = 0;
xordev_dst_fops.aio_read = 0;
xordev_dst_fops.aio_write = 0;
xordev_dst_fops.readdir = 0;
xordev_dst_fops.poll = 0;
xordev_dst_fops.unlocked_ioctl = 0;
xordev_dst_fops.compat_ioctl = 0;
xordev_dst_fops.mmap = 0;
xordev_dst_fops.open = &(xordev_open);
xordev_dst_fops.flush = 0;
xordev_dst_fops.release = &(xordev_release);
xordev_dst_fops.fsync = 0;
xordev_dst_fops.aio_fsync = 0;
xordev_dst_fops.fasync = 0;
xordev_dst_fops.lock = 0;
xordev_dst_fops.sendpage = 0;
xordev_dst_fops.get_unmapped_area = 0;
xordev_dst_fops.check_flags = 0;
xordev_dst_fops.flock = 0;
xordev_dst_fops.splice_write = 0;
xordev_dst_fops.splice_read = 0;
xordev_dst_fops.setlease = 0;
xordev_input_fops.owner = &(__this_module);
xordev_input_fops.llseek = 0;
xordev_input_fops.read = 0;
xordev_input_fops.write = &(xordev_write);
xordev_input_fops.aio_read = 0;
xordev_input_fops.aio_write = 0;
xordev_input_fops.readdir = 0;
xordev_input_fops.poll = 0;
xordev_input_fops.unlocked_ioctl = 0;
xordev_input_fops.compat_ioctl = 0;
xordev_input_fops.mmap = 0;
xordev_input_fops.open = &(xordev_open);
xordev_input_fops.flush = 0;
xordev_input_fops.release = &(xordev_release);
xordev_input_fops.fsync = 0;
xordev_input_fops.aio_fsync = 0;
xordev_input_fops.fasync = 0;
xordev_input_fops.lock = 0;
xordev_input_fops.sendpage = 0;
xordev_input_fops.get_unmapped_area = 0;
xordev_input_fops.check_flags = 0;
xordev_input_fops.flock = 0;
xordev_input_fops.splice_write = 0;
xordev_input_fops.splice_read = 0;
xordev_input_fops.setlease = 0;
xordev_id.vendor = 6900;
xordev_id.device = 4349;
xordev_id.subvendor = ~(0);
xordev_id.subdevice = ~(0);
xordev_id.class = 0;
xordev_id.class_mask = 0;
xordev_id.driver_data = 0;
xordev_driver.node.next = 0;
xordev_driver.node.prev = 0;
xordev_driver.name = "xordev_pci";
xordev_driver.id_table = &(xordev_id);
xordev_driver.probe = &(xordev_probe);
xordev_driver.remove = &(xordev_remove);
xordev_driver.suspend = 0;
xordev_driver.suspend_late = 0;
xordev_driver.resume_early = 0;
xordev_driver.resume = 0;
xordev_driver.shutdown = 0;
xordev_driver.err_handler = 0;
xordev_driver.driver.name = 0;
xordev_driver.driver.bus = 0;
xordev_driver.driver.owner = 0;
xordev_driver.driver.mod_name = 0;
xordev_driver.driver.suppress_bind_attrs = 0;
xordev_driver.driver.probe = 0;
xordev_driver.driver.remove = 0;
xordev_driver.driver.shutdown = 0;
xordev_driver.driver.suspend = 0;
xordev_driver.driver.resume = 0;
xordev_driver.driver.groups = 0;
xordev_driver.driver.pm = 0;
xordev_driver.driver.p = 0;
xordev_driver.dynids.lock.__annonCompField18.rlock.raw_lock.slock = 0;
xordev_driver.dynids.lock.__annonCompField18.rlock.magic = 0;
xordev_driver.dynids.lock.__annonCompField18.rlock.owner_cpu = 0;
xordev_driver.dynids.lock.__annonCompField18.rlock.owner = 0;
xordev_driver.dynids.list.next = 0;
xordev_driver.dynids.list.prev = 0;
return 0;
}
559 LDV_IN_INTERRUPT = 1;
568 -ldv_initialize_FOREACH();
{
-ldv_initialize_lock();
{
339 ldv_lock_lock = 1;
return 0;
}
return 0;
}
584 -tmp___7 = xordev_init_module();
{
458 i = 0;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i < 64);
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
458 assert(i >= 64);
464 -xordev_major = register_chrdev(0 /* major */, "xordev_char" /* name */, &(xordev_fops) /* fops */);
{
2068 tmp = __register_chrdev(major, 0, 256, name, fops) { /* The function body is undefined. */ };
2068 return tmp;
}
465 assert(xordev_major >= 0);
469 tmp___7 = __class_create(&(__this_module), "xordev_class", &(__key___7)) { /* The function body is undefined. */ };
469 xordev_class = tmp___7;
470 -tmp___8 = IS_ERR(xordev_class /* ptr */);
{
470 assert(ptr < -4095);
470 __cil_tmp3 = 0;
34 -tmp = __builtin_expect(__cil_tmp3 /* val */, 0 /* res */);
{
136 return val;
}
34 return tmp;
}
470 assert(tmp___8 == 0);
474 ret_value = __pci_register_driver(&(xordev_driver), &(__this_module), "xordev") { /* The function body is undefined. */ };
475 assert(ret_value >= 0);
478 __retres5 = 0;
454 return __retres5;
}
584 assert(tmp___7 == 0);
589 tmp___9 = nondet_int() { /* The function body is undefined. */ };
589 assert(tmp___9 != 0);
592 tmp___8 = nondet_int() { /* The function body is undefined. */ };
594 assert(tmp___8 == 0);
597 LDV_IN_INTERRUPT = 2;
612 -xordev_irq_handler(var_xordev_irq_handler_12_p0 /* irq */, var_xordev_irq_handler_12_p1 /* device */);
{
311 -tmp___7 = pci_get_drvdata(device /* pdev */);
{
1255 tmp = dev_get_drvdata(&(pdev)->dev) { /* The function body is undefined. */ };
1255 return tmp;
}
311 xordev = tmp___7;
315 -ldv_spin_lock_irqsave_lock(&(xordev)->lock /* lock */, irq_flags /* flags */);
{
203 assert(ldv_lock_lock == 1);
203 ldv_lock_lock = 2;
return 0;
}
316 tmp___8 = ioread32(*(xordev).iomem + REG_INTR_EN) { /* The function body is undefined. */ };
316 assert(tmp___8 == IRQ_ON);
320 tmp___9 = ioread32(*(xordev).iomem + REG_COUNT) { /* The function body is undefined. */ };
320 count = tmp___9;
321 tmp___10 = ioread32(*(xordev).iomem + REG_INTR_COUNT) { /* The function body is undefined. */ };
321 intr_count = tmp___10;
322 assert(count <= intr_count);
328 -update_buffer(*(xordev).src1 /* chardev */);
{
107 buffer = *(chardev).buffer;
111 -ldv_spin_lock_irqsave_lock(&(buffer)->lock /* lock */, irq_flags /* flags */);
{
203 assert(ldv_lock_lock != 1);
203 -ldv_blast_assert();
{
}
}
}
}
}
Source code
1 #include <linux/spinlock.h> 2 void ldv_spin_lock_irqsave(spinlock_t *lock, unsigned long flags); 3 void ldv_spin_lock_nested(spinlock_t *lock, int subclass); 4 void ldv_spin_lock_nest_lock(spinlock_t *lock, void *map); 5 void ldv_spin_lock_irqsave_nested(spinlock_t *lock, int subclass); 6 int ldv_spin_trylock_irqsave(spinlock_t *lock, unsigned long flags); 7 void ldv_spin_lock(spinlock_t *lock); 8 void ldv_spin_lock_bh(spinlock_t *lock); 9 void ldv_spin_lock_irq(spinlock_t *lock); 10 int ldv_spin_trylock(spinlock_t *lock); 11 int ldv_spin_trylock_bh(spinlock_t *lock); 12 int ldv_spin_trylock_irq(spinlock_t *lock); 13 void ldv_spin_unlock(spinlock_t *lock); 14 void ldv_spin_unlock_bh(spinlock_t *lock); 15 void ldv_spin_unlock_irq(spinlock_t *lock); 16 void ldv_spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags); 17 void ldv_spin_unlock_wait(spinlock_t *lock); 18 int ldv_spin_is_locked(spinlock_t *lock); 19 int ldv_spin_is_contended(spinlock_t *lock); 20 int ldv_spin_can_lock(spinlock_t *lock); 21 int ldv_atomic_dec_and_lock(spinlock_t *lock, atomic_t *atomic); 22 #define ldv_atomic_dec_and_lock_macro(atomic,lock) ldv_atomic_dec_and_lock(lock,atomic) 23 // Michal Marschall 24 // Numer indeksu: 291693 25 26 #include "circ_dma_buf.h" 27 #include <linux/kernel.h> 28 #include <linux/pci.h> 29 30 const int CDB_SIZE = 4096; 31 32 struct circ_dma_buf *cdb_alloc(struct device *device) { 33 struct circ_dma_buf *buffer = kmalloc(sizeof(struct circ_dma_buf), GFP_KERNEL); 34 if(buffer == NULL) 35 return NULL; 36 buffer->data = dma_alloc_coherent(device, CDB_SIZE, &buffer->handle, GFP_KERNEL); 37 if(buffer->data == NULL) { 38 kfree(buffer); // avoid memory leak 39 return NULL; 40 } 41 buffer->start = 0; 42 buffer->fill = 0; 43 buffer->device = device; 44 spin_lock_init(&buffer->lock); 45 return buffer; 46 } 47 48 void cdb_free(struct circ_dma_buf *buffer) { 49 dma_free_coherent(buffer->device, CDB_SIZE, buffer->data, buffer->handle); 50 kfree(buffer); 51 } 52 53 int cdb_inc_begin(struct circ_dma_buf *buffer, int value) { 54 if(value > buffer->fill) 55 value = buffer->fill; 56 buffer->fill -= value; 57 buffer->start = (buffer->start + value) % CDB_SIZE; 58 return value; 59 } 60 61 int cdb_inc_end(struct circ_dma_buf *buffer, int value) { 62 if(value > CDB_SIZE - buffer->fill) 63 value = CDB_SIZE - buffer->fill; 64 buffer->fill += value; 65 return value; 66 } 67 68 int cdb_copy_from(struct circ_dma_buf *source, void *dest, int count, int (*copy_func)(void *, void *, int)) { 69 int first, copied1, copied2; 70 unsigned long irq_flags; 71 72 ldv_spin_lock_irqsave(&source->lock, irq_flags); 73 if(count > source->fill) 74 count = source->fill; 75 76 // copy first part: 77 if(count > CDB_SIZE - source->start) 78 first = CDB_SIZE - source->start; 79 else 80 first = count; 81 ldv_spin_unlock_irqrestore(&source->lock, irq_flags); 82 copied1 = copy_func(source->data + source->start, dest, first); 83 ldv_spin_lock_irqsave(&source->lock, irq_flags); 84 cdb_inc_begin(source, copied1); 85 ldv_spin_unlock_irqrestore(&source->lock, irq_flags); 86 if(first == count || copied1 < first) // copied everything or error 87 return copied1; 88 89 // copy second part: 90 copied2 = copy_func(source->data, dest + first, count - first); 91 cdb_inc_begin(source, copied2); 92 93 return copied1 + copied2; 94 } 95 96 int cdb_copy_to(void *source, struct circ_dma_buf *dest, int count, int (*copy_func)(void *, void *, int)) { 97 int end, first, copied1, copied2; 98 unsigned long irq_flags; 99 100 ldv_spin_lock_irqsave(&dest->lock, irq_flags); 101 if(count > CDB_SIZE - dest->fill) 102 count = CDB_SIZE - dest->fill; 103 104 // copy first part: 105 end = (dest->start + dest->fill) % CDB_SIZE; 106 if(end + count > CDB_SIZE) 107 first = CDB_SIZE - end; 108 else 109 first = count; 110 ldv_spin_unlock_irqrestore(&dest->lock, irq_flags); 111 copied1 = copy_func(source, dest->data + end, first); 112 ldv_spin_lock_irqsave(&dest->lock, irq_flags); 113 cdb_inc_end(dest, copied1); 114 ldv_spin_unlock_irqrestore(&dest->lock, irq_flags); 115 if(first == count || copied1 < first) // copied everything or error 116 return copied1; 117 118 // copy second part: 119 copied2 = copy_func(source + first, dest->data, count - first); 120 cdb_inc_end(dest, copied2); 121 122 return copied1 + copied2; 123 } 124 /* LDV_COMMENT_BEGIN_MODEL */ 125 #include <linux/kernel.h> 126 #include <linux/spinlock.h> 127 128 /* 129 CONFIG_DEBUG_SPINLOCK should be true 130 make menuconfig 131 Kernel hacking->Kernel debugging->Spinlock and rw-lock debugging: basic checks 132 */ 133 134 /* the function works only without aspectator */ 135 long __builtin_expect(long val, long res) { 136 return val; 137 } 138 139 #include "engine-blast.h" 140 141 142 /* Need this because rerouter is buggy!.. */ 143 extern int ldv_lock_TEMPLATE; 144 /* Now the actual variable goes... */ 145 int ldv_lock_TEMPLATE = 1; 146 147 #define __ldv_spin_lock() \ 148 do {\ 149 /* LDV_COMMENT_ASSERT Lock should be in a free state*/\ 150 ldv_assert(ldv_lock_TEMPLATE==1);\ 151 /* LDV_COMMENT_CHANGE_STATE Goto locked state*/\ 152 ldv_lock_TEMPLATE=2;\ 153 } while(0) 154 155 #define __ldv_spin_unlock() \ 156 do {\ 157 /* LDV_COMMENT_ASSERT Lock should be in a locked state*/\ 158 ldv_assert(ldv_lock_TEMPLATE!=1);\ 159 /* LDV_COMMENT_CHANGE_STATE Goto free state*/\ 160 ldv_lock_TEMPLATE=1;\ 161 } while(0) 162 163 #define __ldv_spin_trylock() \ 164 do {\ 165 int is_lock_held_by_another_thread;\ 166 /* LDV_COMMENT_OTHER Construct an arbitrary flag*/\ 167 is_lock_held_by_another_thread = ldv_undef_int();\ 168 /* LDV_COMMENT_OTHER If lock is free choose arbitrary action*/\ 169 if(ldv_lock_TEMPLATE==1 && is_lock_held_by_another_thread)\ 170 {\ 171 /* LDV_COMMENT_CHANGE_STATE Goto locked state*/\ 172 ldv_lock_TEMPLATE=2;\ 173 /* LDV_COMMENT_RETURN The lock is acquired*/\ 174 return 1;\ 175 }\ 176 else\ 177 {\ 178 /* LDV_COMMENT_RETURN The lock is not acquired*/\ 179 return 0;\ 180 }\ 181 } while(0) 182 183 #define __ldv_spin_checklocked(free,busy) \ 184 do {\ 185 int is_lock_held_by_another_thread;\ 186 /* LDV_COMMENT_OTHER Construct an arbitrary flag*/\ 187 is_lock_held_by_another_thread = ldv_undef_int();\ 188 /* LDV_COMMENT_OTHER If lock is free choose arbitrary action*/\ 189 if(ldv_lock_TEMPLATE==1 && is_lock_held_by_another_thread)\ 190 {\ 191 /* LDV_COMMENT_RETURN The lock is free*/\ 192 return free;\ 193 }\ 194 else\ 195 {\ 196 /* LDV_COMMENT_RETURN The lock is not free*/\ 197 return busy;\ 198 }\ 199 } while(0) 200 201 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_irqsave(?!_nested)') Acquires the lock and checks for double spin lock*/ 202 void ldv_spin_lock_irqsave_TEMPLATE(spinlock_t *lock, unsigned long flags) { 203 __ldv_spin_lock(); 204 } 205 206 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_nested') Acquires the lock and checks for double spin lock*/ 207 void ldv_spin_lock_nested_TEMPLATE(spinlock_t *lock, int subclass) { 208 __ldv_spin_lock(); 209 } 210 211 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_nest_lock') Acquires the lock and checks for double spin lock*/ 212 void ldv_spin_lock_nest_lock_TEMPLATE(spinlock_t *lock, void *map) { 213 __ldv_spin_lock(); 214 } 215 216 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_irqsave_nested') Acquires the lock and checks for double spin lock*/ 217 void ldv_spin_lock_irqsave_nested_TEMPLATE(spinlock_t *lock, unsigned long flags, int subclass) { 218 __ldv_spin_lock(); 219 } 220 221 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock_irqsave(?!_nested)') Tryies to acquire the lock and returns one if successful*/ 222 int ldv_spin_trylock_irqsave_TEMPLATE(spinlock_t *lock, unsigned long flags) { 223 __ldv_spin_trylock(); 224 } 225 226 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock(?!_bh|_irq|_irqsave|_nested|_irqsave_nested|_nest_lock)') Acquires the lock and checks for double spin lock*/ 227 void ldv_spin_lock_TEMPLATE(spinlock_t *lock) { 228 __ldv_spin_lock(); 229 } 230 231 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_bh') Acquires the lock and checks for double spin lock*/ 232 void ldv_spin_lock_bh_TEMPLATE(spinlock_t *lock) { 233 __ldv_spin_lock(); 234 } 235 236 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_irq(?!save|save_nested)') Acquires the lock and checks for double spin lock*/ 237 void ldv_spin_lock_irq_TEMPLATE(spinlock_t *lock) { 238 __ldv_spin_lock(); 239 } 240 241 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock(?!_bh|_irq|_irqsave|_irqsave_nested)') Tryies to acquire the lock and returns one if successful*/ 242 int ldv_spin_trylock_TEMPLATE(spinlock_t *lock) { 243 __ldv_spin_trylock(); 244 } 245 246 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock_bh') Tryies to acquire the lock and returns one if successful*/ 247 int ldv_spin_trylock_bh_TEMPLATE(spinlock_t *lock) { 248 __ldv_spin_trylock(); 249 } 250 251 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock_irq(?!save|save_nested)') Tryies to acquire the lock and returns one if successful*/ 252 int ldv_spin_trylock_irq_TEMPLATE(spinlock_t *lock) { 253 __ldv_spin_trylock(); 254 } 255 256 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock(?!_bh|_irq|_irqrestore)') Releases the lock and checks that lock was acquired before*/ 257 void ldv_spin_unlock_TEMPLATE(spinlock_t *lock) { 258 __ldv_spin_unlock(); 259 } 260 261 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_bh') Releases the lock and checks that lock was acquired before*/ 262 void ldv_spin_unlock_bh_TEMPLATE(spinlock_t *lock) { 263 __ldv_spin_unlock(); 264 } 265 266 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_irq(?!restore)') Releases the lock and checks that lock was acquired before*/ 267 void ldv_spin_unlock_irq_TEMPLATE(spinlock_t *lock) { 268 __ldv_spin_unlock(); 269 } 270 271 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_irqrestore') Releases the lock and checks that lock was acquired before*/ 272 void ldv_spin_unlock_irqrestore_TEMPLATE(spinlock_t *lock, unsigned long flags) { 273 __ldv_spin_unlock(); 274 } 275 276 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_wait') If the some process is holding the lock then it waits until it will be released*/ 277 void ldv_spin_unlock_wait_TEMPLATE(spinlock_t *lock) { 278 /* LDV_COMMENT_ASSERT The spinlock must not be locked by the current process*/ 279 ldv_assert(ldv_lock_TEMPLATE==1); 280 } 281 282 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_locked') Checks whether the lock is free or not*/ 283 int ldv_spin_is_locked_TEMPLATE(spinlock_t *lock) { 284 __ldv_spin_checklocked(0,1); 285 } 286 287 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_contended')*/ 288 int ldv_spin_is_contended_TEMPLATE(spinlock_t *lock) { 289 int is_lock_contended; 290 /* LDV_COMMENT_OTHER Construct an arbitrary flag*/ 291 is_lock_contended = ldv_undef_int(); 292 /* LDV_COMMENT_OTHER Choose arbitrary action*/ 293 if(is_lock_contended) 294 { 295 /* LDV_COMMENT_RETURN The lock is contended*/ 296 return 1; 297 } 298 else 299 { 300 /* LDV_COMMENT_RETURN The lock is not contended*/ 301 return 0; 302 } 303 } 304 305 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_can_lock') Checks whether the lock is free or not*/ 306 int ldv_spin_can_lock_TEMPLATE(spinlock_t *lock) { 307 __ldv_spin_checklocked(1,0); 308 } 309 310 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_lock') Locks on reaching reference count zero*/ 311 int ldv_atomic_dec_and_lock_TEMPLATE(spinlock_t *lock, atomic_t *atomic) { 312 int is_atomic_counter_is_one; 313 /* LDV_COMMENT_OTHER Construct an arbitrary flag*/ 314 is_atomic_counter_is_one = ldv_undef_int(); 315 /* LDV_COMMENT_OTHER Choose arbitrary action*/ 316 if(is_atomic_counter_is_one) { 317 /* LDV_COMMENT_RETURN Set counter to zero*/ 318 atomic_dec(atomic); 319 /* LDV_COMMENT_RETURN Acquire the lock and return true*/ 320 __ldv_spin_lock(); 321 return 1; 322 } else { 323 /* LDV_COMMENT_RETURN Return false*/ 324 return 0; 325 } 326 } 327 328 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_check_final_state') Check that the spinlock is unlocked at the end*/ 329 void ldv_check_final_state_TEMPLATE(void) 330 { 331 /* LDV_COMMENT_ASSERT The spinlock must be unlocked at the end*/ 332 ldv_assert(ldv_lock_TEMPLATE == 1); 333 } 334 335 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_initialize') Initialize spinlock variables*/ 336 void ldv_initialize_TEMPLATE(void) 337 { 338 /* LDV_COMMENT_ASSERT Initialize spinlock with initial model value*/ 339 ldv_lock_TEMPLATE = 1; 340 }
1 #include <linux/spinlock.h> 2 void ldv_spin_lock_irqsave(spinlock_t *lock, unsigned long flags); 3 void ldv_spin_lock_nested(spinlock_t *lock, int subclass); 4 void ldv_spin_lock_nest_lock(spinlock_t *lock, void *map); 5 void ldv_spin_lock_irqsave_nested(spinlock_t *lock, int subclass); 6 int ldv_spin_trylock_irqsave(spinlock_t *lock, unsigned long flags); 7 void ldv_spin_lock(spinlock_t *lock); 8 void ldv_spin_lock_bh(spinlock_t *lock); 9 void ldv_spin_lock_irq(spinlock_t *lock); 10 int ldv_spin_trylock(spinlock_t *lock); 11 int ldv_spin_trylock_bh(spinlock_t *lock); 12 int ldv_spin_trylock_irq(spinlock_t *lock); 13 void ldv_spin_unlock(spinlock_t *lock); 14 void ldv_spin_unlock_bh(spinlock_t *lock); 15 void ldv_spin_unlock_irq(spinlock_t *lock); 16 void ldv_spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags); 17 void ldv_spin_unlock_wait(spinlock_t *lock); 18 int ldv_spin_is_locked(spinlock_t *lock); 19 int ldv_spin_is_contended(spinlock_t *lock); 20 int ldv_spin_can_lock(spinlock_t *lock); 21 int ldv_atomic_dec_and_lock(spinlock_t *lock, atomic_t *atomic); 22 #define ldv_atomic_dec_and_lock_macro(atomic,lock) ldv_atomic_dec_and_lock(lock,atomic) 23 24 // Michal Marschall 25 // Numer indeksu: 291693 26 27 #include <linux/module.h> 28 #include <linux/kernel.h> 29 #include <linux/pci.h> 30 #include <linux/interrupt.h> 31 #include <linux/fs.h> 32 #include <linux/errno.h> 33 #include <linux/spinlock.h> 34 #include <linux/wait.h> 35 #include <linux/sched.h> 36 #include "circ_dma_buf.h" 37 38 MODULE_AUTHOR("Michal Marschall"); 39 MODULE_LICENSE("GPL"); 40 41 #define MAX_DEVICES 64 42 43 // types of character devices: 44 enum chardev_type {TYPE_SRC1, TYPE_SRC2, TYPE_DST}; 45 const int TYPES = 3; // total number of types 46 47 #define XORDEV_MINOR(number, type) ((number) * TYPES + (type)) 48 #define GET_NUMBER(minor) ((minor) / TYPES) 49 #define GET_TYPE(minor) ((minor) % TYPES) 50 51 // names used in various places: 52 #define NAME_CLASS "xordev_class" 53 #define NAME_PCI "xordev_pci" 54 #define NAME_CHAR "xordev_char" 55 56 // ids: 57 #define ID_VENDOR 0x1af4 58 #define ID_DEVICE 0x10fd 59 60 // xordev PCI registers: 61 static const int REG_INTR_EN = 0x00; 62 static const int REG_SRC1 = 0x04; 63 static const int REG_SRC2 = 0x08; 64 static const int REG_DST = 0x0c; 65 static const int REG_COUNT = 0x10; 66 static const int REG_INTR_COUNT = 0x14; 67 68 // values of REG_INTR_EN: 69 static const int IRQ_OFF = 0; 70 static const int IRQ_ON = 1; 71 72 struct xordev_device { 73 int present; 74 int number; // from 0 to MAX_DEVICES - 1 75 struct pci_dev *pcidev; 76 void __iomem *iomem; 77 int sent; // bytes sent to xor 78 wait_queue_head_t wait_queue; 79 spinlock_t lock; 80 struct char_device *src1; 81 struct char_device *src2; 82 struct char_device *dst; 83 }; 84 85 struct char_device { 86 dev_t node; 87 struct xordev_device *xordev; 88 struct circ_dma_buf *buffer; 89 struct mutex mutex; 90 // for src* devices only: 91 int end; 92 }; 93 94 static int xordev_major; 95 static struct class *xordev_class; 96 struct xordev_device devices[MAX_DEVICES]; 97 98 static int cdb_copy_to_user(void *source, void *dest, int count) { // for argument to cdb_copy_* 99 return count - copy_to_user(dest, source, count); 100 } 101 102 static int cdb_copy_from_user(void *source, void *dest, int count) { // for argument to cdb_copy_* 103 return count - copy_from_user(dest, source, count); 104 } 105 106 static int update_buffer(struct char_device *chardev) { 107 struct circ_dma_buf *buffer = chardev->buffer; 108 unsigned long irq_flags; 109 int position, result; 110 111 ldv_spin_lock_irqsave(&buffer->lock, irq_flags); 112 position = chardev->xordev->sent - ioread32(chardev->xordev->iomem + REG_COUNT); 113 switch(GET_TYPE(chardev->node)) { 114 case TYPE_SRC1: 115 case TYPE_SRC2: 116 result = position - buffer->start; 117 if(result < 0) 118 result += CDB_SIZE; 119 if(cdb_inc_begin(buffer, result) != result) // this should never happen 120 result = -EIO; 121 break; 122 case TYPE_DST: 123 result = position - (buffer->start + buffer->fill); 124 if(result < 0) 125 result += CDB_SIZE; 126 if(cdb_inc_end(buffer, result) != result) // this should never happen 127 result = -EIO; 128 break; 129 default: 130 result = -EINVAL; 131 } 132 ldv_spin_unlock_irqrestore(&buffer->lock, irq_flags); 133 134 return result; 135 } 136 137 static void start_xoring(struct xordev_device *xordev) { 138 unsigned long irq_flags1, irq_flags2; 139 int ready, space, send_now; 140 141 ldv_spin_lock_irqsave(&xordev->lock, irq_flags1); 142 ldv_spin_lock_irqsave(&xordev->dst->buffer->lock, irq_flags2); 143 ready = min_t(int, xordev->src1->end, xordev->src2->end) - xordev->sent; 144 space = xordev->dst->buffer->start - xordev->sent; 145 if(space < 0 || (space == 0 && xordev->dst->buffer->fill == 0)) 146 space = CDB_SIZE - xordev->sent; 147 send_now = min_t(int, ready, space); 148 if(send_now > 0) { 149 xordev->sent += send_now; 150 iowrite32(send_now, xordev->iomem + REG_COUNT); 151 iowrite32(IRQ_ON, xordev->iomem + REG_INTR_EN); 152 wake_up_interruptible(&xordev->wait_queue); 153 } 154 ldv_spin_unlock_irqrestore(&xordev->dst->buffer->lock, irq_flags2); 155 ldv_spin_unlock_irqrestore(&xordev->lock, irq_flags1); 156 } 157 158 static void irq_one_byte(struct xordev_device *xordev) { 159 int count = ioread32(xordev->iomem + REG_COUNT); 160 if(count > 0) 161 iowrite32(count - 1, xordev->iomem + REG_INTR_COUNT); 162 } 163 164 static ssize_t xordev_read(struct file *file, char *buffer, size_t count, loff_t *file_pos) { 165 struct char_device *chardev = (struct char_device*)file->private_data; 166 struct xordev_device *xordev = chardev->xordev; 167 ssize_t result; 168 int ret_value; 169 170 ret_value = update_buffer(chardev); 171 if(ret_value < 0) 172 return ret_value; 173 mutex_lock(&chardev->mutex); 174 while(chardev->buffer->fill == 0) { 175 irq_one_byte(xordev); 176 mutex_unlock(&chardev->mutex); 177 if(wait_event_interruptible(xordev->wait_queue, chardev->buffer->fill > 0) < 0) 178 return -ERESTARTSYS; 179 mutex_lock(&chardev->mutex); 180 } 181 result = cdb_copy_from(chardev->buffer, (void *)buffer, count, cdb_copy_to_user); 182 if(result == 0) { // this should never happen 183 mutex_unlock(&chardev->mutex); 184 return -EIO; 185 } 186 *file_pos = file->f_pos + result; 187 mutex_unlock(&chardev->mutex); 188 start_xoring(xordev); 189 190 return result; 191 } 192 193 static ssize_t xordev_write(struct file *file, const char *buffer, size_t count, loff_t *file_pos) { 194 struct char_device *chardev = (struct char_device*)file->private_data; 195 struct xordev_device *xordev = chardev->xordev; 196 ssize_t result; 197 int ret_value; 198 unsigned long irq_flags; 199 200 ret_value = update_buffer(chardev); 201 if(ret_value < 0) 202 return ret_value; 203 mutex_lock(&chardev->mutex); 204 while(chardev->buffer->fill == CDB_SIZE) { 205 irq_one_byte(xordev); 206 mutex_unlock(&chardev->mutex); 207 if(wait_event_interruptible(xordev->wait_queue, chardev->buffer->fill < CDB_SIZE) < 0) 208 return -ERESTARTSYS; 209 mutex_lock(&chardev->mutex); 210 } 211 result = cdb_copy_to((void *)buffer, chardev->buffer, count, cdb_copy_from_user); 212 if(result == 0) { // this should never happen 213 mutex_unlock(&chardev->mutex); 214 return -ENOSPC; 215 } 216 *file_pos = file->f_pos + result; 217 mutex_unlock(&chardev->mutex); 218 ldv_spin_lock_irqsave(&xordev->lock, irq_flags); 219 chardev->end += result; 220 ldv_spin_unlock_irqrestore(&xordev->lock, irq_flags); 221 start_xoring(xordev); 222 223 return result; 224 } 225 226 static struct file_operations xordev_dst_fops; 227 static struct file_operations xordev_input_fops; // definitions below not visible 228 229 static int xordev_open(struct inode *inode, struct file *file) { 230 int minor = MINOR(inode->i_rdev); 231 struct xordev_device *xordev = &devices[GET_NUMBER(minor)]; 232 struct char_device *chardev; 233 struct file_operations *file_ops; 234 235 switch(GET_TYPE(minor)) { 236 case TYPE_SRC1: 237 file_ops = &xordev_input_fops; 238 chardev = xordev->src1; 239 break; 240 case TYPE_SRC2: 241 file_ops = &xordev_input_fops; 242 chardev = xordev->src2; 243 break; 244 case TYPE_DST: 245 file_ops = &xordev_dst_fops; 246 chardev = xordev->dst; 247 break; 248 default: // this should never happen 249 return -EINVAL; 250 } 251 252 file->f_op = file_ops; 253 file->private_data = (void *)chardev; 254 255 return 0; 256 } 257 258 static int xordev_release(struct inode *inode, struct file *file) { 259 return 0; 260 } 261 262 static struct file_operations xordev_fops = { 263 owner: THIS_MODULE, 264 open: xordev_open, 265 release: xordev_release 266 }; 267 268 static struct file_operations xordev_dst_fops = { 269 owner: THIS_MODULE, 270 read: xordev_read, 271 open: xordev_open, 272 release: xordev_release 273 }; 274 275 static struct file_operations xordev_input_fops = { 276 owner: THIS_MODULE, 277 write: xordev_write, 278 open: xordev_open, 279 release: xordev_release 280 }; 281 282 static struct char_device *chardev_alloc(struct xordev_device *xordev, int type) { 283 struct char_device *chardev = kmalloc(sizeof(struct char_device), GFP_KERNEL); 284 if(chardev == NULL) 285 return NULL; 286 chardev->node = MKDEV(xordev_major, XORDEV_MINOR(xordev->number, type)); 287 chardev->xordev = xordev; 288 chardev->buffer = cdb_alloc(&xordev->pcidev->dev); 289 if(chardev->buffer == NULL) { 290 kfree(chardev); // avoid memory leak 291 return NULL; 292 } 293 mutex_init(&chardev->mutex); 294 chardev->end = 0; 295 return chardev; 296 } 297 298 static void chardev_free(struct char_device *chardev) { 299 cdb_free(chardev->buffer); 300 kfree(chardev); 301 } 302 303 static void init_reg_values(struct xordev_device *xordev) { 304 iowrite32(0, xordev->iomem + REG_INTR_COUNT); 305 iowrite32(xordev->src1->buffer->handle, xordev->iomem + REG_SRC1); 306 iowrite32(xordev->src2->buffer->handle, xordev->iomem + REG_SRC2); 307 iowrite32(xordev->dst->buffer->handle, xordev->iomem + REG_DST); 308 } 309 310 static irqreturn_t xordev_irq_handler(int irq, void *device) { 311 struct xordev_device *xordev = (struct xordev_device *)pci_get_drvdata((struct pci_dev *)device); 312 int count, intr_count; 313 unsigned long irq_flags; 314 315 ldv_spin_lock_irqsave(&xordev->lock, irq_flags); 316 if(ioread32(xordev->iomem + REG_INTR_EN) != IRQ_ON) { 317 ldv_spin_unlock_irqrestore(&xordev->lock, irq_flags); 318 return IRQ_NONE; 319 } 320 count = ioread32(xordev->iomem + REG_COUNT); 321 intr_count = ioread32(xordev->iomem + REG_INTR_COUNT); 322 if(count > intr_count) { 323 ldv_spin_unlock_irqrestore(&xordev->lock, irq_flags); 324 return IRQ_NONE; 325 } 326 327 // handle interrupt: 328 update_buffer(xordev->src1); 329 update_buffer(xordev->src2); 330 update_buffer(xordev->dst); 331 if(count == 0) { 332 iowrite32(IRQ_OFF, xordev->iomem + REG_INTR_EN); 333 if(xordev->sent == CDB_SIZE) { 334 xordev->sent -= CDB_SIZE; 335 xordev->src1->end -= CDB_SIZE; 336 xordev->src2->end -= CDB_SIZE; 337 init_reg_values(xordev); 338 } 339 } 340 if(intr_count != 0) 341 iowrite32(0, xordev->iomem + REG_INTR_COUNT); 342 wake_up_interruptible(&xordev->wait_queue); 343 ldv_spin_unlock_irqrestore(&xordev->lock, irq_flags); 344 start_xoring(xordev); 345 346 return IRQ_HANDLED; 347 } 348 349 static int xordev_probe(struct pci_dev *pcidev, const struct pci_device_id *id) { 350 int i, ret_value; 351 struct xordev_device *xordev; 352 struct device *node; 353 354 // look for first not used device: 355 for(i = 0; i < MAX_DEVICES; ++i) 356 if(!devices[i].present) 357 break; 358 if(i == MAX_DEVICES) 359 return -ENOMEM; 360 xordev = &devices[i]; 361 362 // enable device, map region: 363 xordev->pcidev = pcidev; 364 ret_value = pci_enable_device(pcidev); 365 if(ret_value < 0) 366 return ret_value; 367 ret_value = pci_request_regions(pcidev, NAME_PCI); 368 if(ret_value < 0) 369 return ret_value; 370 xordev->iomem = pci_iomap(pcidev, 0, 0); 371 if(xordev->iomem == NULL) 372 return -EIO; 373 374 // enable DMA: 375 pci_set_master(pcidev); 376 ret_value = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)); 377 if(ret_value < 0) 378 return ret_value; 379 ret_value = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32)); 380 if(ret_value < 0) 381 return ret_value; 382 383 // allocate other resources: 384 xordev->src1 = chardev_alloc(xordev, TYPE_SRC1); 385 if(xordev->src1 == NULL) 386 return -ENOMEM; 387 xordev->src2 = chardev_alloc(xordev, TYPE_SRC2); 388 if(xordev->src2 == NULL) 389 return -ENOMEM; 390 xordev->dst = chardev_alloc(xordev, TYPE_DST); 391 if(xordev->dst == NULL) 392 return -ENOMEM; 393 init_waitqueue_head(&xordev->wait_queue); 394 395 // set initial values: 396 spin_lock_init(&xordev->lock); 397 xordev->sent = 0; 398 pci_set_drvdata(pcidev, (void *)xordev); 399 init_reg_values(xordev); 400 iowrite32(IRQ_OFF, xordev->iomem + REG_INTR_EN); 401 402 // request interrupts: 403 ret_value = request_irq(pcidev->irq, xordev_irq_handler, IRQF_SHARED, NAME_PCI, (void *)pcidev); 404 if(ret_value < 0) 405 return ret_value; 406 407 // create nodes in /dev: 408 node = device_create(xordev_class, NULL, xordev->src1->node, NULL, "xor%ds1", xordev->number); 409 if(IS_ERR(node)) 410 return -EIO; 411 node = device_create(xordev_class, NULL, xordev->src2->node, NULL, "xor%ds2", xordev->number); 412 if(IS_ERR(node)) 413 return -EIO; 414 node = device_create(xordev_class, NULL, xordev->dst->node, NULL, "xor%dd", xordev->number); 415 if(IS_ERR(node)) 416 return -EIO; 417 418 xordev->present = 1; 419 420 return 0; 421 } 422 423 static void xordev_remove(struct pci_dev *pcidev) { 424 struct xordev_device *xordev = (struct xordev_device *)pci_get_drvdata(pcidev); 425 426 xordev->present = false; 427 428 // remove nodes from /dev: 429 device_destroy(xordev_class, xordev->src1->node); 430 device_destroy(xordev_class, xordev->src2->node); 431 device_destroy(xordev_class, xordev->dst->node); 432 433 // release resources, disable device: 434 free_irq(pcidev->irq, (void *)pcidev); 435 pci_iounmap(pcidev, xordev->iomem); 436 pci_release_regions(pcidev); 437 pci_disable_device(pcidev); 438 chardev_free(xordev->src1); 439 chardev_free(xordev->src2); 440 chardev_free(xordev->dst); 441 } 442 443 static struct pci_device_id xordev_id = { 444 PCI_DEVICE(ID_VENDOR, ID_DEVICE) 445 }; 446 447 static struct pci_driver xordev_driver = { 448 name: NAME_PCI, 449 id_table: &xordev_id, 450 probe: xordev_probe, 451 remove: xordev_remove, 452 }; 453 454 static int xordev_init_module(void) { 455 int i, ret_value; 456 457 // initialize array devices: 458 for(i = 0; i < MAX_DEVICES; ++i) { 459 devices[i].present = 0; 460 devices[i].number = i; 461 } 462 463 // register devices: 464 xordev_major = register_chrdev(0, NAME_CHAR, &xordev_fops); 465 if(xordev_major < 0) { 466 ret_value = xordev_major; 467 goto err_register_chrdev; 468 } 469 xordev_class = class_create(THIS_MODULE, NAME_CLASS); 470 if(IS_ERR(xordev_class)) { 471 ret_value = -EIO; 472 goto err_class_create; 473 } 474 ret_value = pci_register_driver(&xordev_driver); 475 if(ret_value < 0) 476 goto err_register_driver; 477 478 return 0; 479 480 err_register_driver: 481 pci_unregister_driver(&xordev_driver); 482 err_class_create: 483 class_destroy(xordev_class); 484 err_register_chrdev: 485 unregister_chrdev(xordev_major, NAME_CHAR); 486 return ret_value; 487 } 488 489 static void xordev_cleanup_module(void) { 490 pci_unregister_driver(&xordev_driver); 491 class_destroy(xordev_class); 492 unregister_chrdev(xordev_major, NAME_CHAR); 493 } 494 495 module_init(xordev_init_module); 496 module_exit(xordev_cleanup_module); 497 498 499 500 501 502 /* LDV_COMMENT_BEGIN_MAIN */ 503 #ifdef LDV_MAIN1_sequence_infinite_withcheck_stateful 504 505 /*###########################################################################*/ 506 507 /*############## Driver Environment Generator 0.2 output ####################*/ 508 509 /*###########################################################################*/ 510 511 512 513 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */ 514 void ldv_check_final_state(void); 515 516 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */ 517 void ldv_check_return_value(int res); 518 519 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */ 520 void ldv_initialize(void); 521 522 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */ 523 int nondet_int(void); 524 525 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */ 526 int LDV_IN_INTERRUPT; 527 528 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */ 529 void ldv_main1_sequence_infinite_withcheck_stateful(void) { 530 531 532 533 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */ 534 /*============================= VARIABLE DECLARATION PART =============================*/ 535 /** CALLBACK SECTION request_irq **/ 536 /* content: static irqreturn_t xordev_irq_handler(int irq, void *device)*/ 537 /* LDV_COMMENT_BEGIN_PREP */ 538 #define MAX_DEVICES 64 539 #define XORDEV_MINOR(number, type) ((number) * TYPES + (type)) 540 #define GET_NUMBER(minor) ((minor) / TYPES) 541 #define GET_TYPE(minor) ((minor) % TYPES) 542 #define NAME_CLASS "xordev_class" 543 #define NAME_PCI "xordev_pci" 544 #define NAME_CHAR "xordev_char" 545 #define ID_VENDOR 0x1af4 546 #define ID_DEVICE 0x10fd 547 /* LDV_COMMENT_END_PREP */ 548 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "xordev_irq_handler" */ 549 int var_xordev_irq_handler_12_p0; 550 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "xordev_irq_handler" */ 551 void * var_xordev_irq_handler_12_p1; 552 553 554 555 556 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */ 557 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */ 558 /*============================= VARIABLE INITIALIZING PART =============================*/ 559 LDV_IN_INTERRUPT=1; 560 561 562 563 564 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */ 565 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */ 566 /*============================= FUNCTION CALL SECTION =============================*/ 567 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */ 568 ldv_initialize(); 569 570 /** INIT: init_type: ST_MODULE_INIT **/ 571 /* content: static int xordev_init_module(void)*/ 572 /* LDV_COMMENT_BEGIN_PREP */ 573 #define MAX_DEVICES 64 574 #define XORDEV_MINOR(number, type) ((number) * TYPES + (type)) 575 #define GET_NUMBER(minor) ((minor) / TYPES) 576 #define GET_TYPE(minor) ((minor) % TYPES) 577 #define NAME_CLASS "xordev_class" 578 #define NAME_PCI "xordev_pci" 579 #define NAME_CHAR "xordev_char" 580 #define ID_VENDOR 0x1af4 581 #define ID_DEVICE 0x10fd 582 /* LDV_COMMENT_END_PREP */ 583 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver init function after driver loading to kernel. This function declared as "MODULE_INIT(function name)". */ 584 if(xordev_init_module()) 585 goto ldv_final; 586 587 588 589 while( nondet_int() 590 ) { 591 592 switch(nondet_int()) { 593 594 case 0: { 595 596 /** CALLBACK SECTION request_irq **/ 597 LDV_IN_INTERRUPT=2; 598 599 /* content: static irqreturn_t xordev_irq_handler(int irq, void *device)*/ 600 /* LDV_COMMENT_BEGIN_PREP */ 601 #define MAX_DEVICES 64 602 #define XORDEV_MINOR(number, type) ((number) * TYPES + (type)) 603 #define GET_NUMBER(minor) ((minor) / TYPES) 604 #define GET_TYPE(minor) ((minor) % TYPES) 605 #define NAME_CLASS "xordev_class" 606 #define NAME_PCI "xordev_pci" 607 #define NAME_CHAR "xordev_char" 608 #define ID_VENDOR 0x1af4 609 #define ID_DEVICE 0x10fd 610 /* LDV_COMMENT_END_PREP */ 611 /* LDV_COMMENT_FUNCTION_CALL */ 612 xordev_irq_handler( var_xordev_irq_handler_12_p0, var_xordev_irq_handler_12_p1); 613 LDV_IN_INTERRUPT=1; 614 615 616 617 } 618 619 break; 620 default: break; 621 622 } 623 624 } 625 626 ldv_module_exit: 627 628 /** INIT: init_type: ST_MODULE_EXIT **/ 629 /* content: static void xordev_cleanup_module(void)*/ 630 /* LDV_COMMENT_BEGIN_PREP */ 631 #define MAX_DEVICES 64 632 #define XORDEV_MINOR(number, type) ((number) * TYPES + (type)) 633 #define GET_NUMBER(minor) ((minor) / TYPES) 634 #define GET_TYPE(minor) ((minor) % TYPES) 635 #define NAME_CLASS "xordev_class" 636 #define NAME_PCI "xordev_pci" 637 #define NAME_CHAR "xordev_char" 638 #define ID_VENDOR 0x1af4 639 #define ID_DEVICE 0x10fd 640 /* LDV_COMMENT_END_PREP */ 641 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver release function before driver will be uploaded from kernel. This function declared as "MODULE_EXIT(function name)". */ 642 xordev_cleanup_module(); 643 644 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */ 645 ldv_final: ldv_check_final_state(); 646 647 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */ 648 return; 649 650 } 651 #endif 652 653 /* LDV_COMMENT_END_MAIN */
1 #ifndef _LINUX_ERR_H 2 #define _LINUX_ERR_H 3 4 #include <linux/compiler.h> 5 6 #include <asm/errno.h> 7 8 /* 9 * Kernel pointers have redundant information, so we can use a 10 * scheme where we can return either an error code or a dentry 11 * pointer with the same return value. 12 * 13 * This should be a per-architecture thing, to allow different 14 * error and pointer decisions. 15 */ 16 #define MAX_ERRNO 4095 17 18 #ifndef __ASSEMBLY__ 19 20 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO) 21 22 static inline void * __must_check ERR_PTR(long error) 23 { 24 return (void *) error; 25 } 26 27 static inline long __must_check PTR_ERR(const void *ptr) 28 { 29 return (long) ptr; 30 } 31 32 static inline long __must_check IS_ERR(const void *ptr) 33 { 34 return IS_ERR_VALUE((unsigned long)ptr); 35 } 36 37 static inline long __must_check IS_ERR_OR_NULL(const void *ptr) 38 { 39 return !ptr || IS_ERR_VALUE((unsigned long)ptr); 40 } 41 42 /** 43 * ERR_CAST - Explicitly cast an error-valued pointer to another pointer type 44 * @ptr: The pointer to cast. 45 * 46 * Explicitly cast an error-valued pointer to another pointer type in such a 47 * way as to make it clear that's what's going on. 48 */ 49 static inline void * __must_check ERR_CAST(const void *ptr) 50 { 51 /* cast away the const */ 52 return (void *) ptr; 53 } 54 55 #endif 56 57 #endif /* _LINUX_ERR_H */
1 #ifndef _LINUX_FS_H 2 #define _LINUX_FS_H 3 4 /* 5 * This file has definitions for some important file table 6 * structures etc. 7 */ 8 9 #include <linux/limits.h> 10 #include <linux/ioctl.h> 11 #include <linux/blk_types.h> 12 #include <linux/types.h> 13 14 /* 15 * It's silly to have NR_OPEN bigger than NR_FILE, but you can change 16 * the file limit at runtime and only root can increase the per-process 17 * nr_file rlimit, so it's safe to set up a ridiculously high absolute 18 * upper limit on files-per-process. 19 * 20 * Some programs (notably those using select()) may have to be 21 * recompiled to take full advantage of the new limits.. 22 */ 23 24 /* Fixed constants first: */ 25 #undef NR_OPEN 26 #define INR_OPEN 1024 /* Initial setting for nfile rlimits */ 27 28 #define BLOCK_SIZE_BITS 10 29 #define BLOCK_SIZE (1<<BLOCK_SIZE_BITS) 30 31 #define SEEK_SET 0 /* seek relative to beginning of file */ 32 #define SEEK_CUR 1 /* seek relative to current file position */ 33 #define SEEK_END 2 /* seek relative to end of file */ 34 #define SEEK_MAX SEEK_END 35 36 struct fstrim_range { 37 __u64 start; 38 __u64 len; 39 __u64 minlen; 40 }; 41 42 /* And dynamically-tunable limits and defaults: */ 43 struct files_stat_struct { 44 unsigned long nr_files; /* read only */ 45 unsigned long nr_free_files; /* read only */ 46 unsigned long max_files; /* tunable */ 47 }; 48 49 struct inodes_stat_t { 50 int nr_inodes; 51 int nr_unused; 52 int dummy[5]; /* padding for sysctl ABI compatibility */ 53 }; 54 55 56 #define NR_FILE 8192 /* this can well be larger on a larger system */ 57 58 #define MAY_EXEC 1 59 #define MAY_WRITE 2 60 #define MAY_READ 4 61 #define MAY_APPEND 8 62 #define MAY_ACCESS 16 63 #define MAY_OPEN 32 64 #define MAY_CHDIR 64 65 66 /* 67 * flags in file.f_mode. Note that FMODE_READ and FMODE_WRITE must correspond 68 * to O_WRONLY and O_RDWR via the strange trick in __dentry_open() 69 */ 70 71 /* file is open for reading */ 72 #define FMODE_READ ((__force fmode_t)0x1) 73 /* file is open for writing */ 74 #define FMODE_WRITE ((__force fmode_t)0x2) 75 /* file is seekable */ 76 #define FMODE_LSEEK ((__force fmode_t)0x4) 77 /* file can be accessed using pread */ 78 #define FMODE_PREAD ((__force fmode_t)0x8) 79 /* file can be accessed using pwrite */ 80 #define FMODE_PWRITE ((__force fmode_t)0x10) 81 /* File is opened for execution with sys_execve / sys_uselib */ 82 #define FMODE_EXEC ((__force fmode_t)0x20) 83 /* File is opened with O_NDELAY (only set for block devices) */ 84 #define FMODE_NDELAY ((__force fmode_t)0x40) 85 /* File is opened with O_EXCL (only set for block devices) */ 86 #define FMODE_EXCL ((__force fmode_t)0x80) 87 /* File is opened using open(.., 3, ..) and is writeable only for ioctls 88 (specialy hack for floppy.c) */ 89 #define FMODE_WRITE_IOCTL ((__force fmode_t)0x100) 90 91 /* 92 * Don't update ctime and mtime. 93 * 94 * Currently a special hack for the XFS open_by_handle ioctl, but we'll 95 * hopefully graduate it to a proper O_CMTIME flag supported by open(2) soon. 96 */ 97 #define FMODE_NOCMTIME ((__force fmode_t)0x800) 98 99 /* Expect random access pattern */ 100 #define FMODE_RANDOM ((__force fmode_t)0x1000) 101 102 /* File is huge (eg. /dev/kmem): treat loff_t as unsigned */ 103 #define FMODE_UNSIGNED_OFFSET ((__force fmode_t)0x2000) 104 105 /* File was opened by fanotify and shouldn't generate fanotify events */ 106 #define FMODE_NONOTIFY ((__force fmode_t)0x1000000) 107 108 /* 109 * The below are the various read and write types that we support. Some of 110 * them include behavioral modifiers that send information down to the 111 * block layer and IO scheduler. Terminology: 112 * 113 * The block layer uses device plugging to defer IO a little bit, in 114 * the hope that we will see more IO very shortly. This increases 115 * coalescing of adjacent IO and thus reduces the number of IOs we 116 * have to send to the device. It also allows for better queuing, 117 * if the IO isn't mergeable. If the caller is going to be waiting 118 * for the IO, then he must ensure that the device is unplugged so 119 * that the IO is dispatched to the driver. 120 * 121 * All IO is handled async in Linux. This is fine for background 122 * writes, but for reads or writes that someone waits for completion 123 * on, we want to notify the block layer and IO scheduler so that they 124 * know about it. That allows them to make better scheduling 125 * decisions. So when the below references 'sync' and 'async', it 126 * is referencing this priority hint. 127 * 128 * With that in mind, the available types are: 129 * 130 * READ A normal read operation. Device will be plugged. 131 * READ_SYNC A synchronous read. Device is not plugged, caller can 132 * immediately wait on this read without caring about 133 * unplugging. 134 * READA Used for read-ahead operations. Lower priority, and the 135 * block layer could (in theory) choose to ignore this 136 * request if it runs into resource problems. 137 * WRITE A normal async write. Device will be plugged. 138 * WRITE_SYNC_PLUG Synchronous write. Identical to WRITE, but passes down 139 * the hint that someone will be waiting on this IO 140 * shortly. The device must still be unplugged explicitly, 141 * WRITE_SYNC_PLUG does not do this as we could be 142 * submitting more writes before we actually wait on any 143 * of them. 144 * WRITE_SYNC Like WRITE_SYNC_PLUG, but also unplugs the device 145 * immediately after submission. The write equivalent 146 * of READ_SYNC. 147 * WRITE_ODIRECT_PLUG Special case write for O_DIRECT only. 148 * WRITE_FLUSH Like WRITE_SYNC but with preceding cache flush. 149 * WRITE_FUA Like WRITE_SYNC but data is guaranteed to be on 150 * non-volatile media on completion. 151 * WRITE_FLUSH_FUA Combination of WRITE_FLUSH and FUA. The IO is preceded 152 * by a cache flush and data is guaranteed to be on 153 * non-volatile media on completion. 154 * 155 */ 156 #define RW_MASK REQ_WRITE 157 #define RWA_MASK REQ_RAHEAD 158 159 #define READ 0 160 #define WRITE RW_MASK 161 #define READA RWA_MASK 162 163 #define READ_SYNC (READ | REQ_SYNC | REQ_UNPLUG) 164 #define READ_META (READ | REQ_META) 165 #define WRITE_SYNC_PLUG (WRITE | REQ_SYNC | REQ_NOIDLE) 166 #define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG) 167 #define WRITE_ODIRECT_PLUG (WRITE | REQ_SYNC) 168 #define WRITE_META (WRITE | REQ_META) 169 #define WRITE_FLUSH (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \ 170 REQ_FLUSH) 171 #define WRITE_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \ 172 REQ_FUA) 173 #define WRITE_FLUSH_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \ 174 REQ_FLUSH | REQ_FUA) 175 176 #define SEL_IN 1 177 #define SEL_OUT 2 178 #define SEL_EX 4 179 180 /* public flags for file_system_type */ 181 #define FS_REQUIRES_DEV 1 182 #define FS_BINARY_MOUNTDATA 2 183 #define FS_HAS_SUBTYPE 4 184 #define FS_REVAL_DOT 16384 /* Check the paths ".", ".." for staleness */ 185 #define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() 186 * during rename() internally. 187 */ 188 189 /* 190 * These are the fs-independent mount-flags: up to 32 flags are supported 191 */ 192 #define MS_RDONLY 1 /* Mount read-only */ 193 #define MS_NOSUID 2 /* Ignore suid and sgid bits */ 194 #define MS_NODEV 4 /* Disallow access to device special files */ 195 #define MS_NOEXEC 8 /* Disallow program execution */ 196 #define MS_SYNCHRONOUS 16 /* Writes are synced at once */ 197 #define MS_REMOUNT 32 /* Alter flags of a mounted FS */ 198 #define MS_MANDLOCK 64 /* Allow mandatory locks on an FS */ 199 #define MS_DIRSYNC 128 /* Directory modifications are synchronous */ 200 #define MS_NOATIME 1024 /* Do not update access times. */ 201 #define MS_NODIRATIME 2048 /* Do not update directory access times */ 202 #define MS_BIND 4096 203 #define MS_MOVE 8192 204 #define MS_REC 16384 205 #define MS_VERBOSE 32768 /* War is peace. Verbosity is silence. 206 MS_VERBOSE is deprecated. */ 207 #define MS_SILENT 32768 208 #define MS_POSIXACL (1<<16) /* VFS does not apply the umask */ 209 #define MS_UNBINDABLE (1<<17) /* change to unbindable */ 210 #define MS_PRIVATE (1<<18) /* change to private */ 211 #define MS_SLAVE (1<<19) /* change to slave */ 212 #define MS_SHARED (1<<20) /* change to shared */ 213 #define MS_RELATIME (1<<21) /* Update atime relative to mtime/ctime. */ 214 #define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */ 215 #define MS_I_VERSION (1<<23) /* Update inode I_version field */ 216 #define MS_STRICTATIME (1<<24) /* Always perform atime updates */ 217 #define MS_BORN (1<<29) 218 #define MS_ACTIVE (1<<30) 219 #define MS_NOUSER (1<<31) 220 221 /* 222 * Superblock flags that can be altered by MS_REMOUNT 223 */ 224 #define MS_RMT_MASK (MS_RDONLY|MS_SYNCHRONOUS|MS_MANDLOCK|MS_I_VERSION) 225 226 /* 227 * Old magic mount flag and mask 228 */ 229 #define MS_MGC_VAL 0xC0ED0000 230 #define MS_MGC_MSK 0xffff0000 231 232 /* Inode flags - they have nothing to superblock flags now */ 233 234 #define S_SYNC 1 /* Writes are synced at once */ 235 #define S_NOATIME 2 /* Do not update access times */ 236 #define S_APPEND 4 /* Append-only file */ 237 #define S_IMMUTABLE 8 /* Immutable file */ 238 #define S_DEAD 16 /* removed, but still open directory */ 239 #define S_NOQUOTA 32 /* Inode is not counted to quota */ 240 #define S_DIRSYNC 64 /* Directory modifications are synchronous */ 241 #define S_NOCMTIME 128 /* Do not update file c/mtime */ 242 #define S_SWAPFILE 256 /* Do not truncate: swapon got its bmaps */ 243 #define S_PRIVATE 512 /* Inode is fs-internal */ 244 #define S_IMA 1024 /* Inode has an associated IMA struct */ 245 246 /* 247 * Note that nosuid etc flags are inode-specific: setting some file-system 248 * flags just means all the inodes inherit those flags by default. It might be 249 * possible to override it selectively if you really wanted to with some 250 * ioctl() that is not currently implemented. 251 * 252 * Exception: MS_RDONLY is always applied to the entire file system. 253 * 254 * Unfortunately, it is possible to change a filesystems flags with it mounted 255 * with files in use. This means that all of the inodes will not have their 256 * i_flags updated. Hence, i_flags no longer inherit the superblock mount 257 * flags, so these have to be checked separately. -- rmk@arm.uk.linux.org 258 */ 259 #define __IS_FLG(inode,flg) ((inode)->i_sb->s_flags & (flg)) 260 261 #define IS_RDONLY(inode) ((inode)->i_sb->s_flags & MS_RDONLY) 262 #define IS_SYNC(inode) (__IS_FLG(inode, MS_SYNCHRONOUS) || \ 263 ((inode)->i_flags & S_SYNC)) 264 #define IS_DIRSYNC(inode) (__IS_FLG(inode, MS_SYNCHRONOUS|MS_DIRSYNC) || \ 265 ((inode)->i_flags & (S_SYNC|S_DIRSYNC))) 266 #define IS_MANDLOCK(inode) __IS_FLG(inode, MS_MANDLOCK) 267 #define IS_NOATIME(inode) __IS_FLG(inode, MS_RDONLY|MS_NOATIME) 268 #define IS_I_VERSION(inode) __IS_FLG(inode, MS_I_VERSION) 269 270 #define IS_NOQUOTA(inode) ((inode)->i_flags & S_NOQUOTA) 271 #define IS_APPEND(inode) ((inode)->i_flags & S_APPEND) 272 #define IS_IMMUTABLE(inode) ((inode)->i_flags & S_IMMUTABLE) 273 #define IS_POSIXACL(inode) __IS_FLG(inode, MS_POSIXACL) 274 275 #define IS_DEADDIR(inode) ((inode)->i_flags & S_DEAD) 276 #define IS_NOCMTIME(inode) ((inode)->i_flags & S_NOCMTIME) 277 #define IS_SWAPFILE(inode) ((inode)->i_flags & S_SWAPFILE) 278 #define IS_PRIVATE(inode) ((inode)->i_flags & S_PRIVATE) 279 #define IS_IMA(inode) ((inode)->i_flags & S_IMA) 280 281 /* the read-only stuff doesn't really belong here, but any other place is 282 probably as bad and I don't want to create yet another include file. */ 283 284 #define BLKROSET _IO(0x12,93) /* set device read-only (0 = read-write) */ 285 #define BLKROGET _IO(0x12,94) /* get read-only status (0 = read_write) */ 286 #define BLKRRPART _IO(0x12,95) /* re-read partition table */ 287 #define BLKGETSIZE _IO(0x12,96) /* return device size /512 (long *arg) */ 288 #define BLKFLSBUF _IO(0x12,97) /* flush buffer cache */ 289 #define BLKRASET _IO(0x12,98) /* set read ahead for block device */ 290 #define BLKRAGET _IO(0x12,99) /* get current read ahead setting */ 291 #define BLKFRASET _IO(0x12,100)/* set filesystem (mm/filemap.c) read-ahead */ 292 #define BLKFRAGET _IO(0x12,101)/* get filesystem (mm/filemap.c) read-ahead */ 293 #define BLKSECTSET _IO(0x12,102)/* set max sectors per request (ll_rw_blk.c) */ 294 #define BLKSECTGET _IO(0x12,103)/* get max sectors per request (ll_rw_blk.c) */ 295 #define BLKSSZGET _IO(0x12,104)/* get block device sector size */ 296 #if 0 297 #define BLKPG _IO(0x12,105)/* See blkpg.h */ 298 299 /* Some people are morons. Do not use sizeof! */ 300 301 #define BLKELVGET _IOR(0x12,106,size_t)/* elevator get */ 302 #define BLKELVSET _IOW(0x12,107,size_t)/* elevator set */ 303 /* This was here just to show that the number is taken - 304 probably all these _IO(0x12,*) ioctls should be moved to blkpg.h. */ 305 #endif 306 /* A jump here: 108-111 have been used for various private purposes. */ 307 #define BLKBSZGET _IOR(0x12,112,size_t) 308 #define BLKBSZSET _IOW(0x12,113,size_t) 309 #define BLKGETSIZE64 _IOR(0x12,114,size_t) /* return device size in bytes (u64 *arg) */ 310 #define BLKTRACESETUP _IOWR(0x12,115,struct blk_user_trace_setup) 311 #define BLKTRACESTART _IO(0x12,116) 312 #define BLKTRACESTOP _IO(0x12,117) 313 #define BLKTRACETEARDOWN _IO(0x12,118) 314 #define BLKDISCARD _IO(0x12,119) 315 #define BLKIOMIN _IO(0x12,120) 316 #define BLKIOOPT _IO(0x12,121) 317 #define BLKALIGNOFF _IO(0x12,122) 318 #define BLKPBSZGET _IO(0x12,123) 319 #define BLKDISCARDZEROES _IO(0x12,124) 320 #define BLKSECDISCARD _IO(0x12,125) 321 322 #define BMAP_IOCTL 1 /* obsolete - kept for compatibility */ 323 #define FIBMAP _IO(0x00,1) /* bmap access */ 324 #define FIGETBSZ _IO(0x00,2) /* get the block size used for bmap */ 325 #define FIFREEZE _IOWR('X', 119, int) /* Freeze */ 326 #define FITHAW _IOWR('X', 120, int) /* Thaw */ 327 #define FITRIM _IOWR('X', 121, struct fstrim_range) /* Trim */ 328 329 #define FS_IOC_GETFLAGS _IOR('f', 1, long) 330 #define FS_IOC_SETFLAGS _IOW('f', 2, long) 331 #define FS_IOC_GETVERSION _IOR('v', 1, long) 332 #define FS_IOC_SETVERSION _IOW('v', 2, long) 333 #define FS_IOC_FIEMAP _IOWR('f', 11, struct fiemap) 334 #define FS_IOC32_GETFLAGS _IOR('f', 1, int) 335 #define FS_IOC32_SETFLAGS _IOW('f', 2, int) 336 #define FS_IOC32_GETVERSION _IOR('v', 1, int) 337 #define FS_IOC32_SETVERSION _IOW('v', 2, int) 338 339 /* 340 * Inode flags (FS_IOC_GETFLAGS / FS_IOC_SETFLAGS) 341 */ 342 #define FS_SECRM_FL 0x00000001 /* Secure deletion */ 343 #define FS_UNRM_FL 0x00000002 /* Undelete */ 344 #define FS_COMPR_FL 0x00000004 /* Compress file */ 345 #define FS_SYNC_FL 0x00000008 /* Synchronous updates */ 346 #define FS_IMMUTABLE_FL 0x00000010 /* Immutable file */ 347 #define FS_APPEND_FL 0x00000020 /* writes to file may only append */ 348 #define FS_NODUMP_FL 0x00000040 /* do not dump file */ 349 #define FS_NOATIME_FL 0x00000080 /* do not update atime */ 350 /* Reserved for compression usage... */ 351 #define FS_DIRTY_FL 0x00000100 352 #define FS_COMPRBLK_FL 0x00000200 /* One or more compressed clusters */ 353 #define FS_NOCOMP_FL 0x00000400 /* Don't compress */ 354 #define FS_ECOMPR_FL 0x00000800 /* Compression error */ 355 /* End compression flags --- maybe not all used */ 356 #define FS_BTREE_FL 0x00001000 /* btree format dir */ 357 #define FS_INDEX_FL 0x00001000 /* hash-indexed directory */ 358 #define FS_IMAGIC_FL 0x00002000 /* AFS directory */ 359 #define FS_JOURNAL_DATA_FL 0x00004000 /* Reserved for ext3 */ 360 #define FS_NOTAIL_FL 0x00008000 /* file tail should not be merged */ 361 #define FS_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */ 362 #define FS_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/ 363 #define FS_EXTENT_FL 0x00080000 /* Extents */ 364 #define FS_DIRECTIO_FL 0x00100000 /* Use direct i/o */ 365 #define FS_RESERVED_FL 0x80000000 /* reserved for ext2 lib */ 366 367 #define FS_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */ 368 #define FS_FL_USER_MODIFIABLE 0x000380FF /* User modifiable flags */ 369 370 371 #define SYNC_FILE_RANGE_WAIT_BEFORE 1 372 #define SYNC_FILE_RANGE_WRITE 2 373 #define SYNC_FILE_RANGE_WAIT_AFTER 4 374 375 #ifdef __KERNEL__ 376 377 #include <linux/linkage.h> 378 #include <linux/wait.h> 379 #include <linux/types.h> 380 #include <linux/kdev_t.h> 381 #include <linux/dcache.h> 382 #include <linux/path.h> 383 #include <linux/stat.h> 384 #include <linux/cache.h> 385 #include <linux/kobject.h> 386 #include <linux/list.h> 387 #include <linux/radix-tree.h> 388 #include <linux/prio_tree.h> 389 #include <linux/init.h> 390 #include <linux/pid.h> 391 #include <linux/mutex.h> 392 #include <linux/capability.h> 393 #include <linux/semaphore.h> 394 #include <linux/fiemap.h> 395 396 #include <asm/atomic.h> 397 #include <asm/byteorder.h> 398 399 struct export_operations; 400 struct hd_geometry; 401 struct iovec; 402 struct nameidata; 403 struct kiocb; 404 struct pipe_inode_info; 405 struct poll_table_struct; 406 struct kstatfs; 407 struct vm_area_struct; 408 struct vfsmount; 409 struct cred; 410 411 extern void __init inode_init(void); 412 extern void __init inode_init_early(void); 413 extern void __init files_init(unsigned long); 414 415 extern struct files_stat_struct files_stat; 416 extern unsigned long get_max_files(void); 417 extern int sysctl_nr_open; 418 extern struct inodes_stat_t inodes_stat; 419 extern int leases_enable, lease_break_time; 420 421 struct buffer_head; 422 typedef int (get_block_t)(struct inode *inode, sector_t iblock, 423 struct buffer_head *bh_result, int create); 424 typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset, 425 ssize_t bytes, void *private, int ret, 426 bool is_async); 427 428 /* 429 * Attribute flags. These should be or-ed together to figure out what 430 * has been changed! 431 */ 432 #define ATTR_MODE (1 << 0) 433 #define ATTR_UID (1 << 1) 434 #define ATTR_GID (1 << 2) 435 #define ATTR_SIZE (1 << 3) 436 #define ATTR_ATIME (1 << 4) 437 #define ATTR_MTIME (1 << 5) 438 #define ATTR_CTIME (1 << 6) 439 #define ATTR_ATIME_SET (1 << 7) 440 #define ATTR_MTIME_SET (1 << 8) 441 #define ATTR_FORCE (1 << 9) /* Not a change, but a change it */ 442 #define ATTR_ATTR_FLAG (1 << 10) 443 #define ATTR_KILL_SUID (1 << 11) 444 #define ATTR_KILL_SGID (1 << 12) 445 #define ATTR_FILE (1 << 13) 446 #define ATTR_KILL_PRIV (1 << 14) 447 #define ATTR_OPEN (1 << 15) /* Truncating from open(O_TRUNC) */ 448 #define ATTR_TIMES_SET (1 << 16) 449 450 /* 451 * This is the Inode Attributes structure, used for notify_change(). It 452 * uses the above definitions as flags, to know which values have changed. 453 * Also, in this manner, a Filesystem can look at only the values it cares 454 * about. Basically, these are the attributes that the VFS layer can 455 * request to change from the FS layer. 456 * 457 * Derek Atkins <warlord@MIT.EDU> 94-10-20 458 */ 459 struct iattr { 460 unsigned int ia_valid; 461 umode_t ia_mode; 462 uid_t ia_uid; 463 gid_t ia_gid; 464 loff_t ia_size; 465 struct timespec ia_atime; 466 struct timespec ia_mtime; 467 struct timespec ia_ctime; 468 469 /* 470 * Not an attribute, but an auxilary info for filesystems wanting to 471 * implement an ftruncate() like method. NOTE: filesystem should 472 * check for (ia_valid & ATTR_FILE), and not for (ia_file != NULL). 473 */ 474 struct file *ia_file; 475 }; 476 477 /* 478 * Includes for diskquotas. 479 */ 480 #include <linux/quota.h> 481 482 /** 483 * enum positive_aop_returns - aop return codes with specific semantics 484 * 485 * @AOP_WRITEPAGE_ACTIVATE: Informs the caller that page writeback has 486 * completed, that the page is still locked, and 487 * should be considered active. The VM uses this hint 488 * to return the page to the active list -- it won't 489 * be a candidate for writeback again in the near 490 * future. Other callers must be careful to unlock 491 * the page if they get this return. Returned by 492 * writepage(); 493 * 494 * @AOP_TRUNCATED_PAGE: The AOP method that was handed a locked page has 495 * unlocked it and the page might have been truncated. 496 * The caller should back up to acquiring a new page and 497 * trying again. The aop will be taking reasonable 498 * precautions not to livelock. If the caller held a page 499 * reference, it should drop it before retrying. Returned 500 * by readpage(). 501 * 502 * address_space_operation functions return these large constants to indicate 503 * special semantics to the caller. These are much larger than the bytes in a 504 * page to allow for functions that return the number of bytes operated on in a 505 * given page. 506 */ 507 508 enum positive_aop_returns { 509 AOP_WRITEPAGE_ACTIVATE = 0x80000, 510 AOP_TRUNCATED_PAGE = 0x80001, 511 }; 512 513 #define AOP_FLAG_UNINTERRUPTIBLE 0x0001 /* will not do a short write */ 514 #define AOP_FLAG_CONT_EXPAND 0x0002 /* called from cont_expand */ 515 #define AOP_FLAG_NOFS 0x0004 /* used by filesystem to direct 516 * helper code (eg buffer layer) 517 * to clear GFP_FS from alloc */ 518 519 /* 520 * oh the beauties of C type declarations. 521 */ 522 struct page; 523 struct address_space; 524 struct writeback_control; 525 526 struct iov_iter { 527 const struct iovec *iov; 528 unsigned long nr_segs; 529 size_t iov_offset; 530 size_t count; 531 }; 532 533 size_t iov_iter_copy_from_user_atomic(struct page *page, 534 struct iov_iter *i, unsigned long offset, size_t bytes); 535 size_t iov_iter_copy_from_user(struct page *page, 536 struct iov_iter *i, unsigned long offset, size_t bytes); 537 void iov_iter_advance(struct iov_iter *i, size_t bytes); 538 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes); 539 size_t iov_iter_single_seg_count(struct iov_iter *i); 540 541 static inline void iov_iter_init(struct iov_iter *i, 542 const struct iovec *iov, unsigned long nr_segs, 543 size_t count, size_t written) 544 { 545 i->iov = iov; 546 i->nr_segs = nr_segs; 547 i->iov_offset = 0; 548 i->count = count + written; 549 550 iov_iter_advance(i, written); 551 } 552 553 static inline size_t iov_iter_count(struct iov_iter *i) 554 { 555 return i->count; 556 } 557 558 /* 559 * "descriptor" for what we're up to with a read. 560 * This allows us to use the same read code yet 561 * have multiple different users of the data that 562 * we read from a file. 563 * 564 * The simplest case just copies the data to user 565 * mode. 566 */ 567 typedef struct { 568 size_t written; 569 size_t count; 570 union { 571 char __user *buf; 572 void *data; 573 } arg; 574 int error; 575 } read_descriptor_t; 576 577 typedef int (*read_actor_t)(read_descriptor_t *, struct page *, 578 unsigned long, unsigned long); 579 580 struct address_space_operations { 581 int (*writepage)(struct page *page, struct writeback_control *wbc); 582 int (*readpage)(struct file *, struct page *); 583 void (*sync_page)(struct page *); 584 585 /* Write back some dirty pages from this mapping. */ 586 int (*writepages)(struct address_space *, struct writeback_control *); 587 588 /* Set a page dirty. Return true if this dirtied it */ 589 int (*set_page_dirty)(struct page *page); 590 591 int (*readpages)(struct file *filp, struct address_space *mapping, 592 struct list_head *pages, unsigned nr_pages); 593 594 int (*write_begin)(struct file *, struct address_space *mapping, 595 loff_t pos, unsigned len, unsigned flags, 596 struct page **pagep, void **fsdata); 597 int (*write_end)(struct file *, struct address_space *mapping, 598 loff_t pos, unsigned len, unsigned copied, 599 struct page *page, void *fsdata); 600 601 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */ 602 sector_t (*bmap)(struct address_space *, sector_t); 603 void (*invalidatepage) (struct page *, unsigned long); 604 int (*releasepage) (struct page *, gfp_t); 605 void (*freepage)(struct page *); 606 ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov, 607 loff_t offset, unsigned long nr_segs); 608 int (*get_xip_mem)(struct address_space *, pgoff_t, int, 609 void **, unsigned long *); 610 /* migrate the contents of a page to the specified target */ 611 int (*migratepage) (struct address_space *, 612 struct page *, struct page *); 613 int (*launder_page) (struct page *); 614 int (*is_partially_uptodate) (struct page *, read_descriptor_t *, 615 unsigned long); 616 int (*error_remove_page)(struct address_space *, struct page *); 617 }; 618 619 /* 620 * pagecache_write_begin/pagecache_write_end must be used by general code 621 * to write into the pagecache. 622 */ 623 int pagecache_write_begin(struct file *, struct address_space *mapping, 624 loff_t pos, unsigned len, unsigned flags, 625 struct page **pagep, void **fsdata); 626 627 int pagecache_write_end(struct file *, struct address_space *mapping, 628 loff_t pos, unsigned len, unsigned copied, 629 struct page *page, void *fsdata); 630 631 struct backing_dev_info; 632 struct address_space { 633 struct inode *host; /* owner: inode, block_device */ 634 struct radix_tree_root page_tree; /* radix tree of all pages */ 635 spinlock_t tree_lock; /* and lock protecting it */ 636 unsigned int i_mmap_writable;/* count VM_SHARED mappings */ 637 struct prio_tree_root i_mmap; /* tree of private and shared mappings */ 638 struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */ 639 spinlock_t i_mmap_lock; /* protect tree, count, list */ 640 unsigned int truncate_count; /* Cover race condition with truncate */ 641 unsigned long nrpages; /* number of total pages */ 642 pgoff_t writeback_index;/* writeback starts here */ 643 const struct address_space_operations *a_ops; /* methods */ 644 unsigned long flags; /* error bits/gfp mask */ 645 struct backing_dev_info *backing_dev_info; /* device readahead, etc */ 646 spinlock_t private_lock; /* for use by the address_space */ 647 struct list_head private_list; /* ditto */ 648 struct address_space *assoc_mapping; /* ditto */ 649 } __attribute__((aligned(sizeof(long)))); 650 /* 651 * On most architectures that alignment is already the case; but 652 * must be enforced here for CRIS, to let the least signficant bit 653 * of struct page's "mapping" pointer be used for PAGE_MAPPING_ANON. 654 */ 655 656 struct block_device { 657 dev_t bd_dev; /* not a kdev_t - it's a search key */ 658 struct inode * bd_inode; /* will die */ 659 struct super_block * bd_super; 660 int bd_openers; 661 struct mutex bd_mutex; /* open/close mutex */ 662 struct list_head bd_inodes; 663 void * bd_claiming; 664 void * bd_holder; 665 int bd_holders; 666 #ifdef CONFIG_SYSFS 667 struct list_head bd_holder_list; 668 #endif 669 struct block_device * bd_contains; 670 unsigned bd_block_size; 671 struct hd_struct * bd_part; 672 /* number of times partitions within this device have been opened. */ 673 unsigned bd_part_count; 674 int bd_invalidated; 675 struct gendisk * bd_disk; 676 struct list_head bd_list; 677 /* 678 * Private data. You must have bd_claim'ed the block_device 679 * to use this. NOTE: bd_claim allows an owner to claim 680 * the same device multiple times, the owner must take special 681 * care to not mess up bd_private for that case. 682 */ 683 unsigned long bd_private; 684 685 /* The counter of freeze processes */ 686 int bd_fsfreeze_count; 687 /* Mutex for freeze */ 688 struct mutex bd_fsfreeze_mutex; 689 }; 690 691 /* 692 * Radix-tree tags, for tagging dirty and writeback pages within the pagecache 693 * radix trees 694 */ 695 #define PAGECACHE_TAG_DIRTY 0 696 #define PAGECACHE_TAG_WRITEBACK 1 697 #define PAGECACHE_TAG_TOWRITE 2 698 699 int mapping_tagged(struct address_space *mapping, int tag); 700 701 /* 702 * Might pages of this file be mapped into userspace? 703 */ 704 static inline int mapping_mapped(struct address_space *mapping) 705 { 706 return !prio_tree_empty(&mapping->i_mmap) || 707 !list_empty(&mapping->i_mmap_nonlinear); 708 } 709 710 /* 711 * Might pages of this file have been modified in userspace? 712 * Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap_pgoff 713 * marks vma as VM_SHARED if it is shared, and the file was opened for 714 * writing i.e. vma may be mprotected writable even if now readonly. 715 */ 716 static inline int mapping_writably_mapped(struct address_space *mapping) 717 { 718 return mapping->i_mmap_writable != 0; 719 } 720 721 /* 722 * Use sequence counter to get consistent i_size on 32-bit processors. 723 */ 724 #if BITS_PER_LONG==32 && defined(CONFIG_SMP) 725 #include <linux/seqlock.h> 726 #define __NEED_I_SIZE_ORDERED 727 #define i_size_ordered_init(inode) seqcount_init(&inode->i_size_seqcount) 728 #else 729 #define i_size_ordered_init(inode) do { } while (0) 730 #endif 731 732 struct posix_acl; 733 #define ACL_NOT_CACHED ((void *)(-1)) 734 735 struct inode { 736 struct hlist_node i_hash; 737 struct list_head i_wb_list; /* backing dev IO list */ 738 struct list_head i_lru; /* inode LRU list */ 739 struct list_head i_sb_list; 740 struct list_head i_dentry; 741 unsigned long i_ino; 742 atomic_t i_count; 743 unsigned int i_nlink; 744 uid_t i_uid; 745 gid_t i_gid; 746 dev_t i_rdev; 747 unsigned int i_blkbits; 748 u64 i_version; 749 loff_t i_size; 750 #ifdef __NEED_I_SIZE_ORDERED 751 seqcount_t i_size_seqcount; 752 #endif 753 struct timespec i_atime; 754 struct timespec i_mtime; 755 struct timespec i_ctime; 756 blkcnt_t i_blocks; 757 unsigned short i_bytes; 758 umode_t i_mode; 759 spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */ 760 struct mutex i_mutex; 761 struct rw_semaphore i_alloc_sem; 762 const struct inode_operations *i_op; 763 const struct file_operations *i_fop; /* former ->i_op->default_file_ops */ 764 struct super_block *i_sb; 765 struct file_lock *i_flock; 766 struct address_space *i_mapping; 767 struct address_space i_data; 768 #ifdef CONFIG_QUOTA 769 struct dquot *i_dquot[MAXQUOTAS]; 770 #endif 771 struct list_head i_devices; 772 union { 773 struct pipe_inode_info *i_pipe; 774 struct block_device *i_bdev; 775 struct cdev *i_cdev; 776 }; 777 778 __u32 i_generation; 779 780 #ifdef CONFIG_FSNOTIFY 781 __u32 i_fsnotify_mask; /* all events this inode cares about */ 782 struct hlist_head i_fsnotify_marks; 783 #endif 784 785 unsigned long i_state; 786 unsigned long dirtied_when; /* jiffies of first dirtying */ 787 788 unsigned int i_flags; 789 790 #ifdef CONFIG_IMA 791 /* protected by i_lock */ 792 unsigned int i_readcount; /* struct files open RO */ 793 #endif 794 atomic_t i_writecount; 795 #ifdef CONFIG_SECURITY 796 void *i_security; 797 #endif 798 #ifdef CONFIG_FS_POSIX_ACL 799 struct posix_acl *i_acl; 800 struct posix_acl *i_default_acl; 801 #endif 802 void *i_private; /* fs or device private pointer */ 803 }; 804 805 static inline int inode_unhashed(struct inode *inode) 806 { 807 return hlist_unhashed(&inode->i_hash); 808 } 809 810 /* 811 * inode->i_mutex nesting subclasses for the lock validator: 812 * 813 * 0: the object of the current VFS operation 814 * 1: parent 815 * 2: child/target 816 * 3: quota file 817 * 818 * The locking order between these classes is 819 * parent -> child -> normal -> xattr -> quota 820 */ 821 enum inode_i_mutex_lock_class 822 { 823 I_MUTEX_NORMAL, 824 I_MUTEX_PARENT, 825 I_MUTEX_CHILD, 826 I_MUTEX_XATTR, 827 I_MUTEX_QUOTA 828 }; 829 830 /* 831 * NOTE: in a 32bit arch with a preemptable kernel and 832 * an UP compile the i_size_read/write must be atomic 833 * with respect to the local cpu (unlike with preempt disabled), 834 * but they don't need to be atomic with respect to other cpus like in 835 * true SMP (so they need either to either locally disable irq around 836 * the read or for example on x86 they can be still implemented as a 837 * cmpxchg8b without the need of the lock prefix). For SMP compiles 838 * and 64bit archs it makes no difference if preempt is enabled or not. 839 */ 840 static inline loff_t i_size_read(const struct inode *inode) 841 { 842 #if BITS_PER_LONG==32 && defined(CONFIG_SMP) 843 loff_t i_size; 844 unsigned int seq; 845 846 do { 847 seq = read_seqcount_begin(&inode->i_size_seqcount); 848 i_size = inode->i_size; 849 } while (read_seqcount_retry(&inode->i_size_seqcount, seq)); 850 return i_size; 851 #elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT) 852 loff_t i_size; 853 854 preempt_disable(); 855 i_size = inode->i_size; 856 preempt_enable(); 857 return i_size; 858 #else 859 return inode->i_size; 860 #endif 861 } 862 863 /* 864 * NOTE: unlike i_size_read(), i_size_write() does need locking around it 865 * (normally i_mutex), otherwise on 32bit/SMP an update of i_size_seqcount 866 * can be lost, resulting in subsequent i_size_read() calls spinning forever. 867 */ 868 static inline void i_size_write(struct inode *inode, loff_t i_size) 869 { 870 #if BITS_PER_LONG==32 && defined(CONFIG_SMP) 871 write_seqcount_begin(&inode->i_size_seqcount); 872 inode->i_size = i_size; 873 write_seqcount_end(&inode->i_size_seqcount); 874 #elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT) 875 preempt_disable(); 876 inode->i_size = i_size; 877 preempt_enable(); 878 #else 879 inode->i_size = i_size; 880 #endif 881 } 882 883 static inline unsigned iminor(const struct inode *inode) 884 { 885 return MINOR(inode->i_rdev); 886 } 887 888 static inline unsigned imajor(const struct inode *inode) 889 { 890 return MAJOR(inode->i_rdev); 891 } 892 893 extern struct block_device *I_BDEV(struct inode *inode); 894 895 struct fown_struct { 896 rwlock_t lock; /* protects pid, uid, euid fields */ 897 struct pid *pid; /* pid or -pgrp where SIGIO should be sent */ 898 enum pid_type pid_type; /* Kind of process group SIGIO should be sent to */ 899 uid_t uid, euid; /* uid/euid of process setting the owner */ 900 int signum; /* posix.1b rt signal to be delivered on IO */ 901 }; 902 903 /* 904 * Track a single file's readahead state 905 */ 906 struct file_ra_state { 907 pgoff_t start; /* where readahead started */ 908 unsigned int size; /* # of readahead pages */ 909 unsigned int async_size; /* do asynchronous readahead when 910 there are only # of pages ahead */ 911 912 unsigned int ra_pages; /* Maximum readahead window */ 913 unsigned int mmap_miss; /* Cache miss stat for mmap accesses */ 914 loff_t prev_pos; /* Cache last read() position */ 915 }; 916 917 /* 918 * Check if @index falls in the readahead windows. 919 */ 920 static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index) 921 { 922 return (index >= ra->start && 923 index < ra->start + ra->size); 924 } 925 926 #define FILE_MNT_WRITE_TAKEN 1 927 #define FILE_MNT_WRITE_RELEASED 2 928 929 struct file { 930 /* 931 * fu_list becomes invalid after file_free is called and queued via 932 * fu_rcuhead for RCU freeing 933 */ 934 union { 935 struct list_head fu_list; 936 struct rcu_head fu_rcuhead; 937 } f_u; 938 struct path f_path; 939 #define f_dentry f_path.dentry 940 #define f_vfsmnt f_path.mnt 941 const struct file_operations *f_op; 942 spinlock_t f_lock; /* f_ep_links, f_flags, no IRQ */ 943 #ifdef CONFIG_SMP 944 int f_sb_list_cpu; 945 #endif 946 atomic_long_t f_count; 947 unsigned int f_flags; 948 fmode_t f_mode; 949 loff_t f_pos; 950 struct fown_struct f_owner; 951 const struct cred *f_cred; 952 struct file_ra_state f_ra; 953 954 u64 f_version; 955 #ifdef CONFIG_SECURITY 956 void *f_security; 957 #endif 958 /* needed for tty driver, and maybe others */ 959 void *private_data; 960 961 #ifdef CONFIG_EPOLL 962 /* Used by fs/eventpoll.c to link all the hooks to this file */ 963 struct list_head f_ep_links; 964 #endif /* #ifdef CONFIG_EPOLL */ 965 struct address_space *f_mapping; 966 #ifdef CONFIG_DEBUG_WRITECOUNT 967 unsigned long f_mnt_write_state; 968 #endif 969 }; 970 971 #define get_file(x) atomic_long_inc(&(x)->f_count) 972 #define fput_atomic(x) atomic_long_add_unless(&(x)->f_count, -1, 1) 973 #define file_count(x) atomic_long_read(&(x)->f_count) 974 975 #ifdef CONFIG_DEBUG_WRITECOUNT 976 static inline void file_take_write(struct file *f) 977 { 978 WARN_ON(f->f_mnt_write_state != 0); 979 f->f_mnt_write_state = FILE_MNT_WRITE_TAKEN; 980 } 981 static inline void file_release_write(struct file *f) 982 { 983 f->f_mnt_write_state |= FILE_MNT_WRITE_RELEASED; 984 } 985 static inline void file_reset_write(struct file *f) 986 { 987 f->f_mnt_write_state = 0; 988 } 989 static inline void file_check_state(struct file *f) 990 { 991 /* 992 * At this point, either both or neither of these bits 993 * should be set. 994 */ 995 WARN_ON(f->f_mnt_write_state == FILE_MNT_WRITE_TAKEN); 996 WARN_ON(f->f_mnt_write_state == FILE_MNT_WRITE_RELEASED); 997 } 998 static inline int file_check_writeable(struct file *f) 999 { 1000 if (f->f_mnt_write_state == FILE_MNT_WRITE_TAKEN) 1001 return 0; 1002 printk(KERN_WARNING "writeable file with no " 1003 "mnt_want_write()\n"); 1004 WARN_ON(1); 1005 return -EINVAL; 1006 } 1007 #else /* !CONFIG_DEBUG_WRITECOUNT */ 1008 static inline void file_take_write(struct file *filp) {} 1009 static inline void file_release_write(struct file *filp) {} 1010 static inline void file_reset_write(struct file *filp) {} 1011 static inline void file_check_state(struct file *filp) {} 1012 static inline int file_check_writeable(struct file *filp) 1013 { 1014 return 0; 1015 } 1016 #endif /* CONFIG_DEBUG_WRITECOUNT */ 1017 1018 #define MAX_NON_LFS ((1UL<<31) - 1) 1019 1020 /* Page cache limit. The filesystems should put that into their s_maxbytes 1021 limits, otherwise bad things can happen in VM. */ 1022 #if BITS_PER_LONG==32 1023 #define MAX_LFS_FILESIZE (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) 1024 #elif BITS_PER_LONG==64 1025 #define MAX_LFS_FILESIZE 0x7fffffffffffffffUL 1026 #endif 1027 1028 #define FL_POSIX 1 1029 #define FL_FLOCK 2 1030 #define FL_ACCESS 8 /* not trying to lock, just looking */ 1031 #define FL_EXISTS 16 /* when unlocking, test for existence */ 1032 #define FL_LEASE 32 /* lease held on this file */ 1033 #define FL_CLOSE 64 /* unlock on close */ 1034 #define FL_SLEEP 128 /* A blocking lock */ 1035 1036 /* 1037 * Special return value from posix_lock_file() and vfs_lock_file() for 1038 * asynchronous locking. 1039 */ 1040 #define FILE_LOCK_DEFERRED 1 1041 1042 /* 1043 * The POSIX file lock owner is determined by 1044 * the "struct files_struct" in the thread group 1045 * (or NULL for no owner - BSD locks). 1046 * 1047 * Lockd stuffs a "host" pointer into this. 1048 */ 1049 typedef struct files_struct *fl_owner_t; 1050 1051 struct file_lock_operations { 1052 void (*fl_copy_lock)(struct file_lock *, struct file_lock *); 1053 void (*fl_release_private)(struct file_lock *); 1054 }; 1055 1056 struct lock_manager_operations { 1057 int (*fl_compare_owner)(struct file_lock *, struct file_lock *); 1058 void (*fl_notify)(struct file_lock *); /* unblock callback */ 1059 int (*fl_grant)(struct file_lock *, struct file_lock *, int); 1060 void (*fl_release_private)(struct file_lock *); 1061 void (*fl_break)(struct file_lock *); 1062 int (*fl_mylease)(struct file_lock *, struct file_lock *); 1063 int (*fl_change)(struct file_lock **, int); 1064 }; 1065 1066 struct lock_manager { 1067 struct list_head list; 1068 }; 1069 1070 void locks_start_grace(struct lock_manager *); 1071 void locks_end_grace(struct lock_manager *); 1072 int locks_in_grace(void); 1073 1074 /* that will die - we need it for nfs_lock_info */ 1075 #include <linux/nfs_fs_i.h> 1076 1077 struct file_lock { 1078 struct file_lock *fl_next; /* singly linked list for this inode */ 1079 struct list_head fl_link; /* doubly linked list of all locks */ 1080 struct list_head fl_block; /* circular list of blocked processes */ 1081 fl_owner_t fl_owner; 1082 unsigned char fl_flags; 1083 unsigned char fl_type; 1084 unsigned int fl_pid; 1085 struct pid *fl_nspid; 1086 wait_queue_head_t fl_wait; 1087 struct file *fl_file; 1088 loff_t fl_start; 1089 loff_t fl_end; 1090 1091 struct fasync_struct * fl_fasync; /* for lease break notifications */ 1092 unsigned long fl_break_time; /* for nonblocking lease breaks */ 1093 1094 const struct file_lock_operations *fl_ops; /* Callbacks for filesystems */ 1095 const struct lock_manager_operations *fl_lmops; /* Callbacks for lockmanagers */ 1096 union { 1097 struct nfs_lock_info nfs_fl; 1098 struct nfs4_lock_info nfs4_fl; 1099 struct { 1100 struct list_head link; /* link in AFS vnode's pending_locks list */ 1101 int state; /* state of grant or error if -ve */ 1102 } afs; 1103 } fl_u; 1104 }; 1105 1106 /* The following constant reflects the upper bound of the file/locking space */ 1107 #ifndef OFFSET_MAX 1108 #define INT_LIMIT(x) (~((x)1 << (sizeof(x)*8 - 1))) 1109 #define OFFSET_MAX INT_LIMIT(loff_t) 1110 #define OFFT_OFFSET_MAX INT_LIMIT(off_t) 1111 #endif 1112 1113 #include <linux/fcntl.h> 1114 1115 extern void send_sigio(struct fown_struct *fown, int fd, int band); 1116 1117 #ifdef CONFIG_FILE_LOCKING 1118 extern int fcntl_getlk(struct file *, struct flock __user *); 1119 extern int fcntl_setlk(unsigned int, struct file *, unsigned int, 1120 struct flock __user *); 1121 1122 #if BITS_PER_LONG == 32 1123 extern int fcntl_getlk64(struct file *, struct flock64 __user *); 1124 extern int fcntl_setlk64(unsigned int, struct file *, unsigned int, 1125 struct flock64 __user *); 1126 #endif 1127 1128 extern int fcntl_setlease(unsigned int fd, struct file *filp, long arg); 1129 extern int fcntl_getlease(struct file *filp); 1130 1131 /* fs/locks.c */ 1132 void locks_free_lock(struct file_lock *fl); 1133 extern void locks_init_lock(struct file_lock *); 1134 extern struct file_lock * locks_alloc_lock(void); 1135 extern void locks_copy_lock(struct file_lock *, struct file_lock *); 1136 extern void __locks_copy_lock(struct file_lock *, const struct file_lock *); 1137 extern void locks_remove_posix(struct file *, fl_owner_t); 1138 extern void locks_remove_flock(struct file *); 1139 extern void locks_release_private(struct file_lock *); 1140 extern void posix_test_lock(struct file *, struct file_lock *); 1141 extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *); 1142 extern int posix_lock_file_wait(struct file *, struct file_lock *); 1143 extern int posix_unblock_lock(struct file *, struct file_lock *); 1144 extern int vfs_test_lock(struct file *, struct file_lock *); 1145 extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *); 1146 extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl); 1147 extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl); 1148 extern int __break_lease(struct inode *inode, unsigned int flags); 1149 extern void lease_get_mtime(struct inode *, struct timespec *time); 1150 extern int generic_setlease(struct file *, long, struct file_lock **); 1151 extern int vfs_setlease(struct file *, long, struct file_lock **); 1152 extern int lease_modify(struct file_lock **, int); 1153 extern int lock_may_read(struct inode *, loff_t start, unsigned long count); 1154 extern int lock_may_write(struct inode *, loff_t start, unsigned long count); 1155 extern void lock_flocks(void); 1156 extern void unlock_flocks(void); 1157 #else /* !CONFIG_FILE_LOCKING */ 1158 static inline int fcntl_getlk(struct file *file, struct flock __user *user) 1159 { 1160 return -EINVAL; 1161 } 1162 1163 static inline int fcntl_setlk(unsigned int fd, struct file *file, 1164 unsigned int cmd, struct flock __user *user) 1165 { 1166 return -EACCES; 1167 } 1168 1169 #if BITS_PER_LONG == 32 1170 static inline int fcntl_getlk64(struct file *file, struct flock64 __user *user) 1171 { 1172 return -EINVAL; 1173 } 1174 1175 static inline int fcntl_setlk64(unsigned int fd, struct file *file, 1176 unsigned int cmd, struct flock64 __user *user) 1177 { 1178 return -EACCES; 1179 } 1180 #endif 1181 static inline int fcntl_setlease(unsigned int fd, struct file *filp, long arg) 1182 { 1183 return 0; 1184 } 1185 1186 static inline int fcntl_getlease(struct file *filp) 1187 { 1188 return 0; 1189 } 1190 1191 static inline void locks_init_lock(struct file_lock *fl) 1192 { 1193 return; 1194 } 1195 1196 static inline void __locks_copy_lock(struct file_lock *new, struct file_lock *fl) 1197 { 1198 return; 1199 } 1200 1201 static inline void locks_copy_lock(struct file_lock *new, struct file_lock *fl) 1202 { 1203 return; 1204 } 1205 1206 static inline void locks_remove_posix(struct file *filp, fl_owner_t owner) 1207 { 1208 return; 1209 } 1210 1211 static inline void locks_remove_flock(struct file *filp) 1212 { 1213 return; 1214 } 1215 1216 static inline void posix_test_lock(struct file *filp, struct file_lock *fl) 1217 { 1218 return; 1219 } 1220 1221 static inline int posix_lock_file(struct file *filp, struct file_lock *fl, 1222 struct file_lock *conflock) 1223 { 1224 return -ENOLCK; 1225 } 1226 1227 static inline int posix_lock_file_wait(struct file *filp, struct file_lock *fl) 1228 { 1229 return -ENOLCK; 1230 } 1231 1232 static inline int posix_unblock_lock(struct file *filp, 1233 struct file_lock *waiter) 1234 { 1235 return -ENOENT; 1236 } 1237 1238 static inline int vfs_test_lock(struct file *filp, struct file_lock *fl) 1239 { 1240 return 0; 1241 } 1242 1243 static inline int vfs_lock_file(struct file *filp, unsigned int cmd, 1244 struct file_lock *fl, struct file_lock *conf) 1245 { 1246 return -ENOLCK; 1247 } 1248 1249 static inline int vfs_cancel_lock(struct file *filp, struct file_lock *fl) 1250 { 1251 return 0; 1252 } 1253 1254 static inline int flock_lock_file_wait(struct file *filp, 1255 struct file_lock *request) 1256 { 1257 return -ENOLCK; 1258 } 1259 1260 static inline int __break_lease(struct inode *inode, unsigned int mode) 1261 { 1262 return 0; 1263 } 1264 1265 static inline void lease_get_mtime(struct inode *inode, struct timespec *time) 1266 { 1267 return; 1268 } 1269 1270 static inline int generic_setlease(struct file *filp, long arg, 1271 struct file_lock **flp) 1272 { 1273 return -EINVAL; 1274 } 1275 1276 static inline int vfs_setlease(struct file *filp, long arg, 1277 struct file_lock **lease) 1278 { 1279 return -EINVAL; 1280 } 1281 1282 static inline int lease_modify(struct file_lock **before, int arg) 1283 { 1284 return -EINVAL; 1285 } 1286 1287 static inline int lock_may_read(struct inode *inode, loff_t start, 1288 unsigned long len) 1289 { 1290 return 1; 1291 } 1292 1293 static inline int lock_may_write(struct inode *inode, loff_t start, 1294 unsigned long len) 1295 { 1296 return 1; 1297 } 1298 1299 static inline void lock_flocks(void) 1300 { 1301 } 1302 1303 static inline void unlock_flocks(void) 1304 { 1305 } 1306 1307 #endif /* !CONFIG_FILE_LOCKING */ 1308 1309 1310 struct fasync_struct { 1311 spinlock_t fa_lock; 1312 int magic; 1313 int fa_fd; 1314 struct fasync_struct *fa_next; /* singly linked list */ 1315 struct file *fa_file; 1316 struct rcu_head fa_rcu; 1317 }; 1318 1319 #define FASYNC_MAGIC 0x4601 1320 1321 /* SMP safe fasync helpers: */ 1322 extern int fasync_helper(int, struct file *, int, struct fasync_struct **); 1323 extern struct fasync_struct *fasync_insert_entry(int, struct file *, struct fasync_struct **, struct fasync_struct *); 1324 extern int fasync_remove_entry(struct file *, struct fasync_struct **); 1325 extern struct fasync_struct *fasync_alloc(void); 1326 extern void fasync_free(struct fasync_struct *); 1327 1328 /* can be called from interrupts */ 1329 extern void kill_fasync(struct fasync_struct **, int, int); 1330 1331 extern int __f_setown(struct file *filp, struct pid *, enum pid_type, int force); 1332 extern int f_setown(struct file *filp, unsigned long arg, int force); 1333 extern void f_delown(struct file *filp); 1334 extern pid_t f_getown(struct file *filp); 1335 extern int send_sigurg(struct fown_struct *fown); 1336 1337 /* 1338 * Umount options 1339 */ 1340 1341 #define MNT_FORCE 0x00000001 /* Attempt to forcibily umount */ 1342 #define MNT_DETACH 0x00000002 /* Just detach from the tree */ 1343 #define MNT_EXPIRE 0x00000004 /* Mark for expiry */ 1344 #define UMOUNT_NOFOLLOW 0x00000008 /* Don't follow symlink on umount */ 1345 #define UMOUNT_UNUSED 0x80000000 /* Flag guaranteed to be unused */ 1346 1347 extern struct list_head super_blocks; 1348 extern spinlock_t sb_lock; 1349 1350 struct super_block { 1351 struct list_head s_list; /* Keep this first */ 1352 dev_t s_dev; /* search index; _not_ kdev_t */ 1353 unsigned char s_dirt; 1354 unsigned char s_blocksize_bits; 1355 unsigned long s_blocksize; 1356 loff_t s_maxbytes; /* Max file size */ 1357 struct file_system_type *s_type; 1358 const struct super_operations *s_op; 1359 const struct dquot_operations *dq_op; 1360 const struct quotactl_ops *s_qcop; 1361 const struct export_operations *s_export_op; 1362 unsigned long s_flags; 1363 unsigned long s_magic; 1364 struct dentry *s_root; 1365 struct rw_semaphore s_umount; 1366 struct mutex s_lock; 1367 int s_count; 1368 atomic_t s_active; 1369 #ifdef CONFIG_SECURITY 1370 void *s_security; 1371 #endif 1372 const struct xattr_handler **s_xattr; 1373 1374 struct list_head s_inodes; /* all inodes */ 1375 struct hlist_head s_anon; /* anonymous dentries for (nfs) exporting */ 1376 #ifdef CONFIG_SMP 1377 struct list_head __percpu *s_files; 1378 #else 1379 struct list_head s_files; 1380 #endif 1381 /* s_dentry_lru and s_nr_dentry_unused are protected by dcache_lock */ 1382 struct list_head s_dentry_lru; /* unused dentry lru */ 1383 int s_nr_dentry_unused; /* # of dentry on lru */ 1384 1385 struct block_device *s_bdev; 1386 struct backing_dev_info *s_bdi; 1387 struct mtd_info *s_mtd; 1388 struct list_head s_instances; 1389 struct quota_info s_dquot; /* Diskquota specific options */ 1390 1391 int s_frozen; 1392 wait_queue_head_t s_wait_unfrozen; 1393 1394 char s_id[32]; /* Informational name */ 1395 1396 void *s_fs_info; /* Filesystem private info */ 1397 fmode_t s_mode; 1398 1399 /* Granularity of c/m/atime in ns. 1400 Cannot be worse than a second */ 1401 u32 s_time_gran; 1402 1403 /* 1404 * The next field is for VFS *only*. No filesystems have any business 1405 * even looking at it. You had been warned. 1406 */ 1407 struct mutex s_vfs_rename_mutex; /* Kludge */ 1408 1409 /* 1410 * Filesystem subtype. If non-empty the filesystem type field 1411 * in /proc/mounts will be "type.subtype" 1412 */ 1413 char *s_subtype; 1414 1415 /* 1416 * Saved mount options for lazy filesystems using 1417 * generic_show_options() 1418 */ 1419 char __rcu *s_options; 1420 }; 1421 1422 extern struct timespec current_fs_time(struct super_block *sb); 1423 1424 /* 1425 * Snapshotting support. 1426 */ 1427 enum { 1428 SB_UNFROZEN = 0, 1429 SB_FREEZE_WRITE = 1, 1430 SB_FREEZE_TRANS = 2, 1431 }; 1432 1433 #define vfs_check_frozen(sb, level) \ 1434 wait_event((sb)->s_wait_unfrozen, ((sb)->s_frozen < (level))) 1435 1436 #define get_fs_excl() atomic_inc(&current->fs_excl) 1437 #define put_fs_excl() atomic_dec(&current->fs_excl) 1438 #define has_fs_excl() atomic_read(&current->fs_excl) 1439 1440 #define is_owner_or_cap(inode) \ 1441 ((current_fsuid() == (inode)->i_uid) || capable(CAP_FOWNER)) 1442 1443 /* not quite ready to be deprecated, but... */ 1444 extern void lock_super(struct super_block *); 1445 extern void unlock_super(struct super_block *); 1446 1447 /* 1448 * VFS helper functions.. 1449 */ 1450 extern int vfs_create(struct inode *, struct dentry *, int, struct nameidata *); 1451 extern int vfs_mkdir(struct inode *, struct dentry *, int); 1452 extern int vfs_mknod(struct inode *, struct dentry *, int, dev_t); 1453 extern int vfs_symlink(struct inode *, struct dentry *, const char *); 1454 extern int vfs_link(struct dentry *, struct inode *, struct dentry *); 1455 extern int vfs_rmdir(struct inode *, struct dentry *); 1456 extern int vfs_unlink(struct inode *, struct dentry *); 1457 extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *); 1458 1459 /* 1460 * VFS dentry helper functions. 1461 */ 1462 extern void dentry_unhash(struct dentry *dentry); 1463 1464 /* 1465 * VFS file helper functions. 1466 */ 1467 extern int file_permission(struct file *, int); 1468 extern void inode_init_owner(struct inode *inode, const struct inode *dir, 1469 mode_t mode); 1470 /* 1471 * VFS FS_IOC_FIEMAP helper definitions. 1472 */ 1473 struct fiemap_extent_info { 1474 unsigned int fi_flags; /* Flags as passed from user */ 1475 unsigned int fi_extents_mapped; /* Number of mapped extents */ 1476 unsigned int fi_extents_max; /* Size of fiemap_extent array */ 1477 struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent 1478 * array */ 1479 }; 1480 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical, 1481 u64 phys, u64 len, u32 flags); 1482 int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags); 1483 1484 /* 1485 * File types 1486 * 1487 * NOTE! These match bits 12..15 of stat.st_mode 1488 * (ie "(i_mode >> 12) & 15"). 1489 */ 1490 #define DT_UNKNOWN 0 1491 #define DT_FIFO 1 1492 #define DT_CHR 2 1493 #define DT_DIR 4 1494 #define DT_BLK 6 1495 #define DT_REG 8 1496 #define DT_LNK 10 1497 #define DT_SOCK 12 1498 #define DT_WHT 14 1499 1500 /* 1501 * This is the "filldir" function type, used by readdir() to let 1502 * the kernel specify what kind of dirent layout it wants to have. 1503 * This allows the kernel to read directories into kernel space or 1504 * to have different dirent layouts depending on the binary type. 1505 */ 1506 typedef int (*filldir_t)(void *, const char *, int, loff_t, u64, unsigned); 1507 struct block_device_operations; 1508 1509 /* These macros are for out of kernel modules to test that 1510 * the kernel supports the unlocked_ioctl and compat_ioctl 1511 * fields in struct file_operations. */ 1512 #define HAVE_COMPAT_IOCTL 1 1513 #define HAVE_UNLOCKED_IOCTL 1 1514 1515 /* 1516 * NOTE: 1517 * all file operations except setlease can be called without 1518 * the big kernel lock held in all filesystems. 1519 */ 1520 struct file_operations { 1521 struct module *owner; 1522 loff_t (*llseek) (struct file *, loff_t, int); 1523 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *); 1524 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *); 1525 ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t); 1526 ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t); 1527 int (*readdir) (struct file *, void *, filldir_t); 1528 unsigned int (*poll) (struct file *, struct poll_table_struct *); 1529 long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); 1530 long (*compat_ioctl) (struct file *, unsigned int, unsigned long); 1531 int (*mmap) (struct file *, struct vm_area_struct *); 1532 int (*open) (struct inode *, struct file *); 1533 int (*flush) (struct file *, fl_owner_t id); 1534 int (*release) (struct inode *, struct file *); 1535 int (*fsync) (struct file *, int datasync); 1536 int (*aio_fsync) (struct kiocb *, int datasync); 1537 int (*fasync) (int, struct file *, int); 1538 int (*lock) (struct file *, int, struct file_lock *); 1539 ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int); 1540 unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); 1541 int (*check_flags)(int); 1542 int (*flock) (struct file *, int, struct file_lock *); 1543 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); 1544 ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); 1545 int (*setlease)(struct file *, long, struct file_lock **); 1546 }; 1547 1548 struct inode_operations { 1549 int (*create) (struct inode *,struct dentry *,int, struct nameidata *); 1550 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *); 1551 int (*link) (struct dentry *,struct inode *,struct dentry *); 1552 int (*unlink) (struct inode *,struct dentry *); 1553 int (*symlink) (struct inode *,struct dentry *,const char *); 1554 int (*mkdir) (struct inode *,struct dentry *,int); 1555 int (*rmdir) (struct inode *,struct dentry *); 1556 int (*mknod) (struct inode *,struct dentry *,int,dev_t); 1557 int (*rename) (struct inode *, struct dentry *, 1558 struct inode *, struct dentry *); 1559 int (*readlink) (struct dentry *, char __user *,int); 1560 void * (*follow_link) (struct dentry *, struct nameidata *); 1561 void (*put_link) (struct dentry *, struct nameidata *, void *); 1562 void (*truncate) (struct inode *); 1563 int (*permission) (struct inode *, int); 1564 int (*check_acl)(struct inode *, int); 1565 int (*setattr) (struct dentry *, struct iattr *); 1566 int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *); 1567 int (*setxattr) (struct dentry *, const char *,const void *,size_t,int); 1568 ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t); 1569 ssize_t (*listxattr) (struct dentry *, char *, size_t); 1570 int (*removexattr) (struct dentry *, const char *); 1571 void (*truncate_range)(struct inode *, loff_t, loff_t); 1572 long (*fallocate)(struct inode *inode, int mode, loff_t offset, 1573 loff_t len); 1574 int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, 1575 u64 len); 1576 }; 1577 1578 struct seq_file; 1579 1580 ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector, 1581 unsigned long nr_segs, unsigned long fast_segs, 1582 struct iovec *fast_pointer, 1583 struct iovec **ret_pointer); 1584 1585 extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *); 1586 extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *); 1587 extern ssize_t vfs_readv(struct file *, const struct iovec __user *, 1588 unsigned long, loff_t *); 1589 extern ssize_t vfs_writev(struct file *, const struct iovec __user *, 1590 unsigned long, loff_t *); 1591 1592 struct super_operations { 1593 struct inode *(*alloc_inode)(struct super_block *sb); 1594 void (*destroy_inode)(struct inode *); 1595 1596 void (*dirty_inode) (struct inode *); 1597 int (*write_inode) (struct inode *, struct writeback_control *wbc); 1598 int (*drop_inode) (struct inode *); 1599 void (*evict_inode) (struct inode *); 1600 void (*put_super) (struct super_block *); 1601 void (*write_super) (struct super_block *); 1602 int (*sync_fs)(struct super_block *sb, int wait); 1603 int (*freeze_fs) (struct super_block *); 1604 int (*unfreeze_fs) (struct super_block *); 1605 int (*statfs) (struct dentry *, struct kstatfs *); 1606 int (*remount_fs) (struct super_block *, int *, char *); 1607 void (*umount_begin) (struct super_block *); 1608 1609 int (*show_options)(struct seq_file *, struct vfsmount *); 1610 int (*show_stats)(struct seq_file *, struct vfsmount *); 1611 #ifdef CONFIG_QUOTA 1612 ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t); 1613 ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); 1614 #endif 1615 int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t); 1616 }; 1617 1618 /* 1619 * Inode state bits. Protected by inode_lock. 1620 * 1621 * Three bits determine the dirty state of the inode, I_DIRTY_SYNC, 1622 * I_DIRTY_DATASYNC and I_DIRTY_PAGES. 1623 * 1624 * Four bits define the lifetime of an inode. Initially, inodes are I_NEW, 1625 * until that flag is cleared. I_WILL_FREE, I_FREEING and I_CLEAR are set at 1626 * various stages of removing an inode. 1627 * 1628 * Two bits are used for locking and completion notification, I_NEW and I_SYNC. 1629 * 1630 * I_DIRTY_SYNC Inode is dirty, but doesn't have to be written on 1631 * fdatasync(). i_atime is the usual cause. 1632 * I_DIRTY_DATASYNC Data-related inode changes pending. We keep track of 1633 * these changes separately from I_DIRTY_SYNC so that we 1634 * don't have to write inode on fdatasync() when only 1635 * mtime has changed in it. 1636 * I_DIRTY_PAGES Inode has dirty pages. Inode itself may be clean. 1637 * I_NEW Serves as both a mutex and completion notification. 1638 * New inodes set I_NEW. If two processes both create 1639 * the same inode, one of them will release its inode and 1640 * wait for I_NEW to be released before returning. 1641 * Inodes in I_WILL_FREE, I_FREEING or I_CLEAR state can 1642 * also cause waiting on I_NEW, without I_NEW actually 1643 * being set. find_inode() uses this to prevent returning 1644 * nearly-dead inodes. 1645 * I_WILL_FREE Must be set when calling write_inode_now() if i_count 1646 * is zero. I_FREEING must be set when I_WILL_FREE is 1647 * cleared. 1648 * I_FREEING Set when inode is about to be freed but still has dirty 1649 * pages or buffers attached or the inode itself is still 1650 * dirty. 1651 * I_CLEAR Added by end_writeback(). In this state the inode is clean 1652 * and can be destroyed. Inode keeps I_FREEING. 1653 * 1654 * Inodes that are I_WILL_FREE, I_FREEING or I_CLEAR are 1655 * prohibited for many purposes. iget() must wait for 1656 * the inode to be completely released, then create it 1657 * anew. Other functions will just ignore such inodes, 1658 * if appropriate. I_NEW is used for waiting. 1659 * 1660 * I_SYNC Synchonized write of dirty inode data. The bits is 1661 * set during data writeback, and cleared with a wakeup 1662 * on the bit address once it is done. 1663 * 1664 * Q: What is the difference between I_WILL_FREE and I_FREEING? 1665 */ 1666 #define I_DIRTY_SYNC (1 << 0) 1667 #define I_DIRTY_DATASYNC (1 << 1) 1668 #define I_DIRTY_PAGES (1 << 2) 1669 #define __I_NEW 3 1670 #define I_NEW (1 << __I_NEW) 1671 #define I_WILL_FREE (1 << 4) 1672 #define I_FREEING (1 << 5) 1673 #define I_CLEAR (1 << 6) 1674 #define __I_SYNC 7 1675 #define I_SYNC (1 << __I_SYNC) 1676 #define I_REFERENCED (1 << 8) 1677 1678 #define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES) 1679 1680 extern void __mark_inode_dirty(struct inode *, int); 1681 static inline void mark_inode_dirty(struct inode *inode) 1682 { 1683 __mark_inode_dirty(inode, I_DIRTY); 1684 } 1685 1686 static inline void mark_inode_dirty_sync(struct inode *inode) 1687 { 1688 __mark_inode_dirty(inode, I_DIRTY_SYNC); 1689 } 1690 1691 /** 1692 * inc_nlink - directly increment an inode's link count 1693 * @inode: inode 1694 * 1695 * This is a low-level filesystem helper to replace any 1696 * direct filesystem manipulation of i_nlink. Currently, 1697 * it is only here for parity with dec_nlink(). 1698 */ 1699 static inline void inc_nlink(struct inode *inode) 1700 { 1701 inode->i_nlink++; 1702 } 1703 1704 static inline void inode_inc_link_count(struct inode *inode) 1705 { 1706 inc_nlink(inode); 1707 mark_inode_dirty(inode); 1708 } 1709 1710 /** 1711 * drop_nlink - directly drop an inode's link count 1712 * @inode: inode 1713 * 1714 * This is a low-level filesystem helper to replace any 1715 * direct filesystem manipulation of i_nlink. In cases 1716 * where we are attempting to track writes to the 1717 * filesystem, a decrement to zero means an imminent 1718 * write when the file is truncated and actually unlinked 1719 * on the filesystem. 1720 */ 1721 static inline void drop_nlink(struct inode *inode) 1722 { 1723 inode->i_nlink--; 1724 } 1725 1726 /** 1727 * clear_nlink - directly zero an inode's link count 1728 * @inode: inode 1729 * 1730 * This is a low-level filesystem helper to replace any 1731 * direct filesystem manipulation of i_nlink. See 1732 * drop_nlink() for why we care about i_nlink hitting zero. 1733 */ 1734 static inline void clear_nlink(struct inode *inode) 1735 { 1736 inode->i_nlink = 0; 1737 } 1738 1739 static inline void inode_dec_link_count(struct inode *inode) 1740 { 1741 drop_nlink(inode); 1742 mark_inode_dirty(inode); 1743 } 1744 1745 /** 1746 * inode_inc_iversion - increments i_version 1747 * @inode: inode that need to be updated 1748 * 1749 * Every time the inode is modified, the i_version field will be incremented. 1750 * The filesystem has to be mounted with i_version flag 1751 */ 1752 1753 static inline void inode_inc_iversion(struct inode *inode) 1754 { 1755 spin_lock(&inode->i_lock); 1756 inode->i_version++; 1757 spin_unlock(&inode->i_lock); 1758 } 1759 1760 extern void touch_atime(struct vfsmount *mnt, struct dentry *dentry); 1761 static inline void file_accessed(struct file *file) 1762 { 1763 if (!(file->f_flags & O_NOATIME)) 1764 touch_atime(file->f_path.mnt, file->f_path.dentry); 1765 } 1766 1767 int sync_inode(struct inode *inode, struct writeback_control *wbc); 1768 int sync_inode_metadata(struct inode *inode, int wait); 1769 1770 struct file_system_type { 1771 const char *name; 1772 int fs_flags; 1773 int (*get_sb) (struct file_system_type *, int, 1774 const char *, void *, struct vfsmount *); 1775 struct dentry *(*mount) (struct file_system_type *, int, 1776 const char *, void *); 1777 void (*kill_sb) (struct super_block *); 1778 struct module *owner; 1779 struct file_system_type * next; 1780 struct list_head fs_supers; 1781 1782 struct lock_class_key s_lock_key; 1783 struct lock_class_key s_umount_key; 1784 struct lock_class_key s_vfs_rename_key; 1785 1786 struct lock_class_key i_lock_key; 1787 struct lock_class_key i_mutex_key; 1788 struct lock_class_key i_mutex_dir_key; 1789 struct lock_class_key i_alloc_sem_key; 1790 }; 1791 1792 extern struct dentry *mount_ns(struct file_system_type *fs_type, int flags, 1793 void *data, int (*fill_super)(struct super_block *, void *, int)); 1794 extern struct dentry *mount_bdev(struct file_system_type *fs_type, 1795 int flags, const char *dev_name, void *data, 1796 int (*fill_super)(struct super_block *, void *, int)); 1797 extern int get_sb_bdev(struct file_system_type *fs_type, 1798 int flags, const char *dev_name, void *data, 1799 int (*fill_super)(struct super_block *, void *, int), 1800 struct vfsmount *mnt); 1801 extern struct dentry *mount_single(struct file_system_type *fs_type, 1802 int flags, void *data, 1803 int (*fill_super)(struct super_block *, void *, int)); 1804 extern int get_sb_single(struct file_system_type *fs_type, 1805 int flags, void *data, 1806 int (*fill_super)(struct super_block *, void *, int), 1807 struct vfsmount *mnt); 1808 extern struct dentry *mount_nodev(struct file_system_type *fs_type, 1809 int flags, void *data, 1810 int (*fill_super)(struct super_block *, void *, int)); 1811 extern int get_sb_nodev(struct file_system_type *fs_type, 1812 int flags, void *data, 1813 int (*fill_super)(struct super_block *, void *, int), 1814 struct vfsmount *mnt); 1815 void generic_shutdown_super(struct super_block *sb); 1816 void kill_block_super(struct super_block *sb); 1817 void kill_anon_super(struct super_block *sb); 1818 void kill_litter_super(struct super_block *sb); 1819 void deactivate_super(struct super_block *sb); 1820 void deactivate_locked_super(struct super_block *sb); 1821 int set_anon_super(struct super_block *s, void *data); 1822 struct super_block *sget(struct file_system_type *type, 1823 int (*test)(struct super_block *,void *), 1824 int (*set)(struct super_block *,void *), 1825 void *data); 1826 extern struct dentry *mount_pseudo(struct file_system_type *, char *, 1827 const struct super_operations *ops, unsigned long); 1828 extern void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb); 1829 1830 static inline void sb_mark_dirty(struct super_block *sb) 1831 { 1832 sb->s_dirt = 1; 1833 } 1834 static inline void sb_mark_clean(struct super_block *sb) 1835 { 1836 sb->s_dirt = 0; 1837 } 1838 static inline int sb_is_dirty(struct super_block *sb) 1839 { 1840 return sb->s_dirt; 1841 } 1842 1843 /* Alas, no aliases. Too much hassle with bringing module.h everywhere */ 1844 #define fops_get(fops) \ 1845 (((fops) && try_module_get((fops)->owner) ? (fops) : NULL)) 1846 #define fops_put(fops) \ 1847 do { if (fops) module_put((fops)->owner); } while(0) 1848 1849 extern int register_filesystem(struct file_system_type *); 1850 extern int unregister_filesystem(struct file_system_type *); 1851 extern struct vfsmount *kern_mount_data(struct file_system_type *, void *data); 1852 #define kern_mount(type) kern_mount_data(type, NULL) 1853 extern int may_umount_tree(struct vfsmount *); 1854 extern int may_umount(struct vfsmount *); 1855 extern long do_mount(char *, char *, char *, unsigned long, void *); 1856 extern struct vfsmount *collect_mounts(struct path *); 1857 extern void drop_collected_mounts(struct vfsmount *); 1858 extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *, 1859 struct vfsmount *); 1860 extern int vfs_statfs(struct path *, struct kstatfs *); 1861 extern int statfs_by_dentry(struct dentry *, struct kstatfs *); 1862 extern int freeze_super(struct super_block *super); 1863 extern int thaw_super(struct super_block *super); 1864 1865 extern int current_umask(void); 1866 1867 /* /sys/fs */ 1868 extern struct kobject *fs_kobj; 1869 1870 #define MAX_RW_COUNT (INT_MAX & PAGE_CACHE_MASK) 1871 extern int rw_verify_area(int, struct file *, loff_t *, size_t); 1872 1873 #define FLOCK_VERIFY_READ 1 1874 #define FLOCK_VERIFY_WRITE 2 1875 1876 #ifdef CONFIG_FILE_LOCKING 1877 extern int locks_mandatory_locked(struct inode *); 1878 extern int locks_mandatory_area(int, struct inode *, struct file *, loff_t, size_t); 1879 1880 /* 1881 * Candidates for mandatory locking have the setgid bit set 1882 * but no group execute bit - an otherwise meaningless combination. 1883 */ 1884 1885 static inline int __mandatory_lock(struct inode *ino) 1886 { 1887 return (ino->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID; 1888 } 1889 1890 /* 1891 * ... and these candidates should be on MS_MANDLOCK mounted fs, 1892 * otherwise these will be advisory locks 1893 */ 1894 1895 static inline int mandatory_lock(struct inode *ino) 1896 { 1897 return IS_MANDLOCK(ino) && __mandatory_lock(ino); 1898 } 1899 1900 static inline int locks_verify_locked(struct inode *inode) 1901 { 1902 if (mandatory_lock(inode)) 1903 return locks_mandatory_locked(inode); 1904 return 0; 1905 } 1906 1907 static inline int locks_verify_truncate(struct inode *inode, 1908 struct file *filp, 1909 loff_t size) 1910 { 1911 if (inode->i_flock && mandatory_lock(inode)) 1912 return locks_mandatory_area( 1913 FLOCK_VERIFY_WRITE, inode, filp, 1914 size < inode->i_size ? size : inode->i_size, 1915 (size < inode->i_size ? inode->i_size - size 1916 : size - inode->i_size) 1917 ); 1918 return 0; 1919 } 1920 1921 static inline int break_lease(struct inode *inode, unsigned int mode) 1922 { 1923 if (inode->i_flock) 1924 return __break_lease(inode, mode); 1925 return 0; 1926 } 1927 #else /* !CONFIG_FILE_LOCKING */ 1928 static inline int locks_mandatory_locked(struct inode *inode) 1929 { 1930 return 0; 1931 } 1932 1933 static inline int locks_mandatory_area(int rw, struct inode *inode, 1934 struct file *filp, loff_t offset, 1935 size_t count) 1936 { 1937 return 0; 1938 } 1939 1940 static inline int __mandatory_lock(struct inode *inode) 1941 { 1942 return 0; 1943 } 1944 1945 static inline int mandatory_lock(struct inode *inode) 1946 { 1947 return 0; 1948 } 1949 1950 static inline int locks_verify_locked(struct inode *inode) 1951 { 1952 return 0; 1953 } 1954 1955 static inline int locks_verify_truncate(struct inode *inode, struct file *filp, 1956 size_t size) 1957 { 1958 return 0; 1959 } 1960 1961 static inline int break_lease(struct inode *inode, unsigned int mode) 1962 { 1963 return 0; 1964 } 1965 1966 #endif /* CONFIG_FILE_LOCKING */ 1967 1968 /* fs/open.c */ 1969 1970 extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs, 1971 struct file *filp); 1972 extern int do_fallocate(struct file *file, int mode, loff_t offset, 1973 loff_t len); 1974 extern long do_sys_open(int dfd, const char __user *filename, int flags, 1975 int mode); 1976 extern struct file *filp_open(const char *, int, int); 1977 extern struct file * dentry_open(struct dentry *, struct vfsmount *, int, 1978 const struct cred *); 1979 extern int filp_close(struct file *, fl_owner_t id); 1980 extern char * getname(const char __user *); 1981 1982 /* fs/ioctl.c */ 1983 1984 extern int ioctl_preallocate(struct file *filp, void __user *argp); 1985 1986 /* fs/dcache.c */ 1987 extern void __init vfs_caches_init_early(void); 1988 extern void __init vfs_caches_init(unsigned long); 1989 1990 extern struct kmem_cache *names_cachep; 1991 1992 #define __getname_gfp(gfp) kmem_cache_alloc(names_cachep, (gfp)) 1993 #define __getname() __getname_gfp(GFP_KERNEL) 1994 #define __putname(name) kmem_cache_free(names_cachep, (void *)(name)) 1995 #ifndef CONFIG_AUDITSYSCALL 1996 #define putname(name) __putname(name) 1997 #else 1998 extern void putname(const char *name); 1999 #endif 2000 2001 #ifdef CONFIG_BLOCK 2002 extern int register_blkdev(unsigned int, const char *); 2003 extern void unregister_blkdev(unsigned int, const char *); 2004 extern struct block_device *bdget(dev_t); 2005 extern struct block_device *bdgrab(struct block_device *bdev); 2006 extern void bd_set_size(struct block_device *, loff_t size); 2007 extern void bd_forget(struct inode *inode); 2008 extern void bdput(struct block_device *); 2009 extern struct block_device *open_by_devnum(dev_t, fmode_t); 2010 extern void invalidate_bdev(struct block_device *); 2011 extern int sync_blockdev(struct block_device *bdev); 2012 extern struct super_block *freeze_bdev(struct block_device *); 2013 extern void emergency_thaw_all(void); 2014 extern int thaw_bdev(struct block_device *bdev, struct super_block *sb); 2015 extern int fsync_bdev(struct block_device *); 2016 #else 2017 static inline void bd_forget(struct inode *inode) {} 2018 static inline int sync_blockdev(struct block_device *bdev) { return 0; } 2019 static inline void invalidate_bdev(struct block_device *bdev) {} 2020 2021 static inline struct super_block *freeze_bdev(struct block_device *sb) 2022 { 2023 return NULL; 2024 } 2025 2026 static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb) 2027 { 2028 return 0; 2029 } 2030 #endif 2031 extern int sync_filesystem(struct super_block *); 2032 extern const struct file_operations def_blk_fops; 2033 extern const struct file_operations def_chr_fops; 2034 extern const struct file_operations bad_sock_fops; 2035 extern const struct file_operations def_fifo_fops; 2036 #ifdef CONFIG_BLOCK 2037 extern int ioctl_by_bdev(struct block_device *, unsigned, unsigned long); 2038 extern int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long); 2039 extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long); 2040 extern int blkdev_get(struct block_device *, fmode_t); 2041 extern int blkdev_put(struct block_device *, fmode_t); 2042 extern int bd_claim(struct block_device *, void *); 2043 extern void bd_release(struct block_device *); 2044 #ifdef CONFIG_SYSFS 2045 extern int bd_claim_by_disk(struct block_device *, void *, struct gendisk *); 2046 extern void bd_release_from_disk(struct block_device *, struct gendisk *); 2047 #else 2048 #define bd_claim_by_disk(bdev, holder, disk) bd_claim(bdev, holder) 2049 #define bd_release_from_disk(bdev, disk) bd_release(bdev) 2050 #endif 2051 #endif 2052 2053 /* fs/char_dev.c */ 2054 #define CHRDEV_MAJOR_HASH_SIZE 255 2055 extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *); 2056 extern int register_chrdev_region(dev_t, unsigned, const char *); 2057 extern int __register_chrdev(unsigned int major, unsigned int baseminor, 2058 unsigned int count, const char *name, 2059 const struct file_operations *fops); 2060 extern void __unregister_chrdev(unsigned int major, unsigned int baseminor, 2061 unsigned int count, const char *name); 2062 extern void unregister_chrdev_region(dev_t, unsigned); 2063 extern void chrdev_show(struct seq_file *,off_t); 2064 2065 static inline int register_chrdev(unsigned int major, const char *name, 2066 const struct file_operations *fops) 2067 { 2068 return __register_chrdev(major, 0, 256, name, fops); 2069 } 2070 2071 static inline void unregister_chrdev(unsigned int major, const char *name) 2072 { 2073 __unregister_chrdev(major, 0, 256, name); 2074 } 2075 2076 /* fs/block_dev.c */ 2077 #define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */ 2078 #define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */ 2079 2080 #ifdef CONFIG_BLOCK 2081 #define BLKDEV_MAJOR_HASH_SIZE 255 2082 extern const char *__bdevname(dev_t, char *buffer); 2083 extern const char *bdevname(struct block_device *bdev, char *buffer); 2084 extern struct block_device *lookup_bdev(const char *); 2085 extern struct block_device *open_bdev_exclusive(const char *, fmode_t, void *); 2086 extern void close_bdev_exclusive(struct block_device *, fmode_t); 2087 extern void blkdev_show(struct seq_file *,off_t); 2088 2089 #else 2090 #define BLKDEV_MAJOR_HASH_SIZE 0 2091 #endif 2092 2093 extern void init_special_inode(struct inode *, umode_t, dev_t); 2094 2095 /* Invalid inode operations -- fs/bad_inode.c */ 2096 extern void make_bad_inode(struct inode *); 2097 extern int is_bad_inode(struct inode *); 2098 2099 extern const struct file_operations read_pipefifo_fops; 2100 extern const struct file_operations write_pipefifo_fops; 2101 extern const struct file_operations rdwr_pipefifo_fops; 2102 2103 extern int fs_may_remount_ro(struct super_block *); 2104 2105 #ifdef CONFIG_BLOCK 2106 /* 2107 * return READ, READA, or WRITE 2108 */ 2109 #define bio_rw(bio) ((bio)->bi_rw & (RW_MASK | RWA_MASK)) 2110 2111 /* 2112 * return data direction, READ or WRITE 2113 */ 2114 #define bio_data_dir(bio) ((bio)->bi_rw & 1) 2115 2116 extern void check_disk_size_change(struct gendisk *disk, 2117 struct block_device *bdev); 2118 extern int revalidate_disk(struct gendisk *); 2119 extern int check_disk_change(struct block_device *); 2120 extern int __invalidate_device(struct block_device *); 2121 extern int invalidate_partition(struct gendisk *, int); 2122 #endif 2123 unsigned long invalidate_mapping_pages(struct address_space *mapping, 2124 pgoff_t start, pgoff_t end); 2125 2126 static inline void invalidate_remote_inode(struct inode *inode) 2127 { 2128 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 2129 S_ISLNK(inode->i_mode)) 2130 invalidate_mapping_pages(inode->i_mapping, 0, -1); 2131 } 2132 extern int invalidate_inode_pages2(struct address_space *mapping); 2133 extern int invalidate_inode_pages2_range(struct address_space *mapping, 2134 pgoff_t start, pgoff_t end); 2135 extern int write_inode_now(struct inode *, int); 2136 extern int filemap_fdatawrite(struct address_space *); 2137 extern int filemap_flush(struct address_space *); 2138 extern int filemap_fdatawait(struct address_space *); 2139 extern int filemap_fdatawait_range(struct address_space *, loff_t lstart, 2140 loff_t lend); 2141 extern int filemap_write_and_wait(struct address_space *mapping); 2142 extern int filemap_write_and_wait_range(struct address_space *mapping, 2143 loff_t lstart, loff_t lend); 2144 extern int __filemap_fdatawrite_range(struct address_space *mapping, 2145 loff_t start, loff_t end, int sync_mode); 2146 extern int filemap_fdatawrite_range(struct address_space *mapping, 2147 loff_t start, loff_t end); 2148 2149 extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end, 2150 int datasync); 2151 extern int vfs_fsync(struct file *file, int datasync); 2152 extern int generic_write_sync(struct file *file, loff_t pos, loff_t count); 2153 extern void sync_supers(void); 2154 extern void emergency_sync(void); 2155 extern void emergency_remount(void); 2156 #ifdef CONFIG_BLOCK 2157 extern sector_t bmap(struct inode *, sector_t); 2158 #endif 2159 extern int notify_change(struct dentry *, struct iattr *); 2160 extern int inode_permission(struct inode *, int); 2161 extern int generic_permission(struct inode *, int, 2162 int (*check_acl)(struct inode *, int)); 2163 2164 static inline bool execute_ok(struct inode *inode) 2165 { 2166 return (inode->i_mode & S_IXUGO) || S_ISDIR(inode->i_mode); 2167 } 2168 2169 extern int get_write_access(struct inode *); 2170 extern int deny_write_access(struct file *); 2171 static inline void put_write_access(struct inode * inode) 2172 { 2173 atomic_dec(&inode->i_writecount); 2174 } 2175 static inline void allow_write_access(struct file *file) 2176 { 2177 if (file) 2178 atomic_inc(&file->f_path.dentry->d_inode->i_writecount); 2179 } 2180 extern int do_pipe_flags(int *, int); 2181 extern struct file *create_read_pipe(struct file *f, int flags); 2182 extern struct file *create_write_pipe(int flags); 2183 extern void free_write_pipe(struct file *); 2184 2185 extern struct file *do_filp_open(int dfd, const char *pathname, 2186 int open_flag, int mode, int acc_mode); 2187 extern int may_open(struct path *, int, int); 2188 2189 extern int kernel_read(struct file *, loff_t, char *, unsigned long); 2190 extern struct file * open_exec(const char *); 2191 2192 /* fs/dcache.c -- generic fs support functions */ 2193 extern int is_subdir(struct dentry *, struct dentry *); 2194 extern int path_is_under(struct path *, struct path *); 2195 extern ino_t find_inode_number(struct dentry *, struct qstr *); 2196 2197 #include <linux/err.h> 2198 2199 /* needed for stackable file system support */ 2200 extern loff_t default_llseek(struct file *file, loff_t offset, int origin); 2201 2202 extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin); 2203 2204 extern int inode_init_always(struct super_block *, struct inode *); 2205 extern void inode_init_once(struct inode *); 2206 extern void ihold(struct inode * inode); 2207 extern void iput(struct inode *); 2208 extern struct inode * igrab(struct inode *); 2209 extern ino_t iunique(struct super_block *, ino_t); 2210 extern int inode_needs_sync(struct inode *inode); 2211 extern int generic_delete_inode(struct inode *inode); 2212 extern int generic_drop_inode(struct inode *inode); 2213 2214 extern struct inode *ilookup5_nowait(struct super_block *sb, 2215 unsigned long hashval, int (*test)(struct inode *, void *), 2216 void *data); 2217 extern struct inode *ilookup5(struct super_block *sb, unsigned long hashval, 2218 int (*test)(struct inode *, void *), void *data); 2219 extern struct inode *ilookup(struct super_block *sb, unsigned long ino); 2220 2221 extern struct inode * iget5_locked(struct super_block *, unsigned long, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *); 2222 extern struct inode * iget_locked(struct super_block *, unsigned long); 2223 extern int insert_inode_locked4(struct inode *, unsigned long, int (*test)(struct inode *, void *), void *); 2224 extern int insert_inode_locked(struct inode *); 2225 extern void unlock_new_inode(struct inode *); 2226 extern unsigned int get_next_ino(void); 2227 2228 extern void __iget(struct inode * inode); 2229 extern void iget_failed(struct inode *); 2230 extern void end_writeback(struct inode *); 2231 extern void __destroy_inode(struct inode *); 2232 extern struct inode *new_inode(struct super_block *); 2233 extern int should_remove_suid(struct dentry *); 2234 extern int file_remove_suid(struct file *); 2235 2236 extern void __insert_inode_hash(struct inode *, unsigned long hashval); 2237 extern void remove_inode_hash(struct inode *); 2238 static inline void insert_inode_hash(struct inode *inode) 2239 { 2240 __insert_inode_hash(inode, inode->i_ino); 2241 } 2242 extern void inode_sb_list_add(struct inode *inode); 2243 2244 #ifdef CONFIG_BLOCK 2245 extern void submit_bio(int, struct bio *); 2246 extern int bdev_read_only(struct block_device *); 2247 #endif 2248 extern int set_blocksize(struct block_device *, int); 2249 extern int sb_set_blocksize(struct super_block *, int); 2250 extern int sb_min_blocksize(struct super_block *, int); 2251 2252 extern int generic_file_mmap(struct file *, struct vm_area_struct *); 2253 extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *); 2254 extern int file_read_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size); 2255 int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk); 2256 extern ssize_t generic_file_aio_read(struct kiocb *, const struct iovec *, unsigned long, loff_t); 2257 extern ssize_t __generic_file_aio_write(struct kiocb *, const struct iovec *, unsigned long, 2258 loff_t *); 2259 extern ssize_t generic_file_aio_write(struct kiocb *, const struct iovec *, unsigned long, loff_t); 2260 extern ssize_t generic_file_direct_write(struct kiocb *, const struct iovec *, 2261 unsigned long *, loff_t, loff_t *, size_t, size_t); 2262 extern ssize_t generic_file_buffered_write(struct kiocb *, const struct iovec *, 2263 unsigned long, loff_t, loff_t *, size_t, ssize_t); 2264 extern ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos); 2265 extern ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos); 2266 extern int generic_segment_checks(const struct iovec *iov, 2267 unsigned long *nr_segs, size_t *count, int access_flags); 2268 2269 /* fs/block_dev.c */ 2270 extern ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov, 2271 unsigned long nr_segs, loff_t pos); 2272 extern int blkdev_fsync(struct file *filp, int datasync); 2273 2274 /* fs/splice.c */ 2275 extern ssize_t generic_file_splice_read(struct file *, loff_t *, 2276 struct pipe_inode_info *, size_t, unsigned int); 2277 extern ssize_t default_file_splice_read(struct file *, loff_t *, 2278 struct pipe_inode_info *, size_t, unsigned int); 2279 extern ssize_t generic_file_splice_write(struct pipe_inode_info *, 2280 struct file *, loff_t *, size_t, unsigned int); 2281 extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, 2282 struct file *out, loff_t *, size_t len, unsigned int flags); 2283 extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, 2284 size_t len, unsigned int flags); 2285 2286 extern void 2287 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping); 2288 extern loff_t noop_llseek(struct file *file, loff_t offset, int origin); 2289 extern loff_t no_llseek(struct file *file, loff_t offset, int origin); 2290 extern loff_t generic_file_llseek(struct file *file, loff_t offset, int origin); 2291 extern loff_t generic_file_llseek_unlocked(struct file *file, loff_t offset, 2292 int origin); 2293 extern int generic_file_open(struct inode * inode, struct file * filp); 2294 extern int nonseekable_open(struct inode * inode, struct file * filp); 2295 2296 #ifdef CONFIG_FS_XIP 2297 extern ssize_t xip_file_read(struct file *filp, char __user *buf, size_t len, 2298 loff_t *ppos); 2299 extern int xip_file_mmap(struct file * file, struct vm_area_struct * vma); 2300 extern ssize_t xip_file_write(struct file *filp, const char __user *buf, 2301 size_t len, loff_t *ppos); 2302 extern int xip_truncate_page(struct address_space *mapping, loff_t from); 2303 #else 2304 static inline int xip_truncate_page(struct address_space *mapping, loff_t from) 2305 { 2306 return 0; 2307 } 2308 #endif 2309 2310 #ifdef CONFIG_BLOCK 2311 typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode, 2312 loff_t file_offset); 2313 2314 enum { 2315 /* need locking between buffered and direct access */ 2316 DIO_LOCKING = 0x01, 2317 2318 /* filesystem does not support filling holes */ 2319 DIO_SKIP_HOLES = 0x02, 2320 }; 2321 2322 void dio_end_io(struct bio *bio, int error); 2323 2324 ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, 2325 struct block_device *bdev, const struct iovec *iov, loff_t offset, 2326 unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io, 2327 dio_submit_t submit_io, int flags); 2328 2329 static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb, 2330 struct inode *inode, struct block_device *bdev, const struct iovec *iov, 2331 loff_t offset, unsigned long nr_segs, get_block_t get_block, 2332 dio_iodone_t end_io) 2333 { 2334 return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, 2335 nr_segs, get_block, end_io, NULL, 2336 DIO_LOCKING | DIO_SKIP_HOLES); 2337 } 2338 #endif 2339 2340 extern const struct file_operations generic_ro_fops; 2341 2342 #define special_file(m) (S_ISCHR(m)||S_ISBLK(m)||S_ISFIFO(m)||S_ISSOCK(m)) 2343 2344 extern int vfs_readlink(struct dentry *, char __user *, int, const char *); 2345 extern int vfs_follow_link(struct nameidata *, const char *); 2346 extern int page_readlink(struct dentry *, char __user *, int); 2347 extern void *page_follow_link_light(struct dentry *, struct nameidata *); 2348 extern void page_put_link(struct dentry *, struct nameidata *, void *); 2349 extern int __page_symlink(struct inode *inode, const char *symname, int len, 2350 int nofs); 2351 extern int page_symlink(struct inode *inode, const char *symname, int len); 2352 extern const struct inode_operations page_symlink_inode_operations; 2353 extern int generic_readlink(struct dentry *, char __user *, int); 2354 extern void generic_fillattr(struct inode *, struct kstat *); 2355 extern int vfs_getattr(struct vfsmount *, struct dentry *, struct kstat *); 2356 void __inode_add_bytes(struct inode *inode, loff_t bytes); 2357 void inode_add_bytes(struct inode *inode, loff_t bytes); 2358 void inode_sub_bytes(struct inode *inode, loff_t bytes); 2359 loff_t inode_get_bytes(struct inode *inode); 2360 void inode_set_bytes(struct inode *inode, loff_t bytes); 2361 2362 extern int vfs_readdir(struct file *, filldir_t, void *); 2363 2364 extern int vfs_stat(const char __user *, struct kstat *); 2365 extern int vfs_lstat(const char __user *, struct kstat *); 2366 extern int vfs_fstat(unsigned int, struct kstat *); 2367 extern int vfs_fstatat(int , const char __user *, struct kstat *, int); 2368 2369 extern int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd, 2370 unsigned long arg); 2371 extern int __generic_block_fiemap(struct inode *inode, 2372 struct fiemap_extent_info *fieinfo, 2373 loff_t start, loff_t len, 2374 get_block_t *get_block); 2375 extern int generic_block_fiemap(struct inode *inode, 2376 struct fiemap_extent_info *fieinfo, u64 start, 2377 u64 len, get_block_t *get_block); 2378 2379 extern void get_filesystem(struct file_system_type *fs); 2380 extern void put_filesystem(struct file_system_type *fs); 2381 extern struct file_system_type *get_fs_type(const char *name); 2382 extern struct super_block *get_super(struct block_device *); 2383 extern struct super_block *get_active_super(struct block_device *bdev); 2384 extern struct super_block *user_get_super(dev_t); 2385 extern void drop_super(struct super_block *sb); 2386 extern void iterate_supers(void (*)(struct super_block *, void *), void *); 2387 2388 extern int dcache_dir_open(struct inode *, struct file *); 2389 extern int dcache_dir_close(struct inode *, struct file *); 2390 extern loff_t dcache_dir_lseek(struct file *, loff_t, int); 2391 extern int dcache_readdir(struct file *, void *, filldir_t); 2392 extern int simple_setattr(struct dentry *, struct iattr *); 2393 extern int simple_getattr(struct vfsmount *, struct dentry *, struct kstat *); 2394 extern int simple_statfs(struct dentry *, struct kstatfs *); 2395 extern int simple_link(struct dentry *, struct inode *, struct dentry *); 2396 extern int simple_unlink(struct inode *, struct dentry *); 2397 extern int simple_rmdir(struct inode *, struct dentry *); 2398 extern int simple_rename(struct inode *, struct dentry *, struct inode *, struct dentry *); 2399 extern int noop_fsync(struct file *, int); 2400 extern int simple_empty(struct dentry *); 2401 extern int simple_readpage(struct file *file, struct page *page); 2402 extern int simple_write_begin(struct file *file, struct address_space *mapping, 2403 loff_t pos, unsigned len, unsigned flags, 2404 struct page **pagep, void **fsdata); 2405 extern int simple_write_end(struct file *file, struct address_space *mapping, 2406 loff_t pos, unsigned len, unsigned copied, 2407 struct page *page, void *fsdata); 2408 2409 extern struct dentry *simple_lookup(struct inode *, struct dentry *, struct nameidata *); 2410 extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *); 2411 extern const struct file_operations simple_dir_operations; 2412 extern const struct inode_operations simple_dir_inode_operations; 2413 struct tree_descr { char *name; const struct file_operations *ops; int mode; }; 2414 struct dentry *d_alloc_name(struct dentry *, const char *); 2415 extern int simple_fill_super(struct super_block *, unsigned long, struct tree_descr *); 2416 extern int simple_pin_fs(struct file_system_type *, struct vfsmount **mount, int *count); 2417 extern void simple_release_fs(struct vfsmount **mount, int *count); 2418 2419 extern ssize_t simple_read_from_buffer(void __user *to, size_t count, 2420 loff_t *ppos, const void *from, size_t available); 2421 extern ssize_t simple_write_to_buffer(void *to, size_t available, loff_t *ppos, 2422 const void __user *from, size_t count); 2423 2424 extern int generic_file_fsync(struct file *, int); 2425 2426 extern int generic_check_addressable(unsigned, u64); 2427 2428 #ifdef CONFIG_MIGRATION 2429 extern int buffer_migrate_page(struct address_space *, 2430 struct page *, struct page *); 2431 #else 2432 #define buffer_migrate_page NULL 2433 #endif 2434 2435 extern int inode_change_ok(const struct inode *, struct iattr *); 2436 extern int inode_newsize_ok(const struct inode *, loff_t offset); 2437 extern void setattr_copy(struct inode *inode, const struct iattr *attr); 2438 2439 extern void file_update_time(struct file *file); 2440 2441 extern int generic_show_options(struct seq_file *m, struct vfsmount *mnt); 2442 extern void save_mount_options(struct super_block *sb, char *options); 2443 extern void replace_mount_options(struct super_block *sb, char *options); 2444 2445 static inline ino_t parent_ino(struct dentry *dentry) 2446 { 2447 ino_t res; 2448 2449 spin_lock(&dentry->d_lock); 2450 res = dentry->d_parent->d_inode->i_ino; 2451 spin_unlock(&dentry->d_lock); 2452 return res; 2453 } 2454 2455 /* Transaction based IO helpers */ 2456 2457 /* 2458 * An argresp is stored in an allocated page and holds the 2459 * size of the argument or response, along with its content 2460 */ 2461 struct simple_transaction_argresp { 2462 ssize_t size; 2463 char data[0]; 2464 }; 2465 2466 #define SIMPLE_TRANSACTION_LIMIT (PAGE_SIZE - sizeof(struct simple_transaction_argresp)) 2467 2468 char *simple_transaction_get(struct file *file, const char __user *buf, 2469 size_t size); 2470 ssize_t simple_transaction_read(struct file *file, char __user *buf, 2471 size_t size, loff_t *pos); 2472 int simple_transaction_release(struct inode *inode, struct file *file); 2473 2474 void simple_transaction_set(struct file *file, size_t n); 2475 2476 /* 2477 * simple attribute files 2478 * 2479 * These attributes behave similar to those in sysfs: 2480 * 2481 * Writing to an attribute immediately sets a value, an open file can be 2482 * written to multiple times. 2483 * 2484 * Reading from an attribute creates a buffer from the value that might get 2485 * read with multiple read calls. When the attribute has been read 2486 * completely, no further read calls are possible until the file is opened 2487 * again. 2488 * 2489 * All attributes contain a text representation of a numeric value 2490 * that are accessed with the get() and set() functions. 2491 */ 2492 #define DEFINE_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \ 2493 static int __fops ## _open(struct inode *inode, struct file *file) \ 2494 { \ 2495 __simple_attr_check_format(__fmt, 0ull); \ 2496 return simple_attr_open(inode, file, __get, __set, __fmt); \ 2497 } \ 2498 static const struct file_operations __fops = { \ 2499 .owner = THIS_MODULE, \ 2500 .open = __fops ## _open, \ 2501 .release = simple_attr_release, \ 2502 .read = simple_attr_read, \ 2503 .write = simple_attr_write, \ 2504 .llseek = generic_file_llseek, \ 2505 }; 2506 2507 static inline void __attribute__((format(printf, 1, 2))) 2508 __simple_attr_check_format(const char *fmt, ...) 2509 { 2510 /* don't do anything, just let the compiler check the arguments; */ 2511 } 2512 2513 int simple_attr_open(struct inode *inode, struct file *file, 2514 int (*get)(void *, u64 *), int (*set)(void *, u64), 2515 const char *fmt); 2516 int simple_attr_release(struct inode *inode, struct file *file); 2517 ssize_t simple_attr_read(struct file *file, char __user *buf, 2518 size_t len, loff_t *ppos); 2519 ssize_t simple_attr_write(struct file *file, const char __user *buf, 2520 size_t len, loff_t *ppos); 2521 2522 struct ctl_table; 2523 int proc_nr_files(struct ctl_table *table, int write, 2524 void __user *buffer, size_t *lenp, loff_t *ppos); 2525 int proc_nr_dentry(struct ctl_table *table, int write, 2526 void __user *buffer, size_t *lenp, loff_t *ppos); 2527 int proc_nr_inodes(struct ctl_table *table, int write, 2528 void __user *buffer, size_t *lenp, loff_t *ppos); 2529 int __init get_filesystem_list(char *buf); 2530 2531 #define ACC_MODE(x) ("\004\002\006\006"[(x)&O_ACCMODE]) 2532 #define OPEN_FMODE(flag) ((__force fmode_t)(((flag + 1) & O_ACCMODE) | \ 2533 (flag & FMODE_NONOTIFY))) 2534 2535 #endif /* __KERNEL__ */ 2536 #endif /* _LINUX_FS_H */
1 /* 2 * pci.h 3 * 4 * PCI defines and function prototypes 5 * Copyright 1994, Drew Eckhardt 6 * Copyright 1997--1999 Martin Mares <mj@ucw.cz> 7 * 8 * For more information, please consult the following manuals (look at 9 * http://www.pcisig.com/ for how to get them): 10 * 11 * PCI BIOS Specification 12 * PCI Local Bus Specification 13 * PCI to PCI Bridge Specification 14 * PCI System Design Guide 15 */ 16 17 #ifndef LINUX_PCI_H 18 #define LINUX_PCI_H 19 20 #include <linux/pci_regs.h> /* The pci register defines */ 21 22 /* 23 * The PCI interface treats multi-function devices as independent 24 * devices. The slot/function address of each device is encoded 25 * in a single byte as follows: 26 * 27 * 7:3 = slot 28 * 2:0 = function 29 */ 30 #define PCI_DEVFN(slot, func) ((((slot) & 0x1f) << 3) | ((func) & 0x07)) 31 #define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f) 32 #define PCI_FUNC(devfn) ((devfn) & 0x07) 33 34 /* Ioctls for /proc/bus/pci/X/Y nodes. */ 35 #define PCIIOC_BASE ('P' << 24 | 'C' << 16 | 'I' << 8) 36 #define PCIIOC_CONTROLLER (PCIIOC_BASE | 0x00) /* Get controller for PCI device. */ 37 #define PCIIOC_MMAP_IS_IO (PCIIOC_BASE | 0x01) /* Set mmap state to I/O space. */ 38 #define PCIIOC_MMAP_IS_MEM (PCIIOC_BASE | 0x02) /* Set mmap state to MEM space. */ 39 #define PCIIOC_WRITE_COMBINE (PCIIOC_BASE | 0x03) /* Enable/disable write-combining. */ 40 41 #ifdef __KERNEL__ 42 43 #include <linux/mod_devicetable.h> 44 45 #include <linux/types.h> 46 #include <linux/init.h> 47 #include <linux/ioport.h> 48 #include <linux/list.h> 49 #include <linux/compiler.h> 50 #include <linux/errno.h> 51 #include <linux/kobject.h> 52 #include <asm/atomic.h> 53 #include <linux/device.h> 54 #include <linux/io.h> 55 #include <linux/irqreturn.h> 56 57 /* Include the ID list */ 58 #include <linux/pci_ids.h> 59 60 /* pci_slot represents a physical slot */ 61 struct pci_slot { 62 struct pci_bus *bus; /* The bus this slot is on */ 63 struct list_head list; /* node in list of slots on this bus */ 64 struct hotplug_slot *hotplug; /* Hotplug info (migrate over time) */ 65 unsigned char number; /* PCI_SLOT(pci_dev->devfn) */ 66 struct kobject kobj; 67 }; 68 69 static inline const char *pci_slot_name(const struct pci_slot *slot) 70 { 71 return kobject_name(&slot->kobj); 72 } 73 74 /* File state for mmap()s on /proc/bus/pci/X/Y */ 75 enum pci_mmap_state { 76 pci_mmap_io, 77 pci_mmap_mem 78 }; 79 80 /* This defines the direction arg to the DMA mapping routines. */ 81 #define PCI_DMA_BIDIRECTIONAL 0 82 #define PCI_DMA_TODEVICE 1 83 #define PCI_DMA_FROMDEVICE 2 84 #define PCI_DMA_NONE 3 85 86 /* 87 * For PCI devices, the region numbers are assigned this way: 88 */ 89 enum { 90 /* #0-5: standard PCI resources */ 91 PCI_STD_RESOURCES, 92 PCI_STD_RESOURCE_END = 5, 93 94 /* #6: expansion ROM resource */ 95 PCI_ROM_RESOURCE, 96 97 /* device specific resources */ 98 #ifdef CONFIG_PCI_IOV 99 PCI_IOV_RESOURCES, 100 PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1, 101 #endif 102 103 /* resources assigned to buses behind the bridge */ 104 #define PCI_BRIDGE_RESOURCE_NUM 4 105 106 PCI_BRIDGE_RESOURCES, 107 PCI_BRIDGE_RESOURCE_END = PCI_BRIDGE_RESOURCES + 108 PCI_BRIDGE_RESOURCE_NUM - 1, 109 110 /* total resources associated with a PCI device */ 111 PCI_NUM_RESOURCES, 112 113 /* preserve this for compatibility */ 114 DEVICE_COUNT_RESOURCE 115 }; 116 117 typedef int __bitwise pci_power_t; 118 119 #define PCI_D0 ((pci_power_t __force) 0) 120 #define PCI_D1 ((pci_power_t __force) 1) 121 #define PCI_D2 ((pci_power_t __force) 2) 122 #define PCI_D3hot ((pci_power_t __force) 3) 123 #define PCI_D3cold ((pci_power_t __force) 4) 124 #define PCI_UNKNOWN ((pci_power_t __force) 5) 125 #define PCI_POWER_ERROR ((pci_power_t __force) -1) 126 127 /* Remember to update this when the list above changes! */ 128 extern const char *pci_power_names[]; 129 130 static inline const char *pci_power_name(pci_power_t state) 131 { 132 return pci_power_names[1 + (int) state]; 133 } 134 135 #define PCI_PM_D2_DELAY 200 136 #define PCI_PM_D3_WAIT 10 137 #define PCI_PM_BUS_WAIT 50 138 139 /** The pci_channel state describes connectivity between the CPU and 140 * the pci device. If some PCI bus between here and the pci device 141 * has crashed or locked up, this info is reflected here. 142 */ 143 typedef unsigned int __bitwise pci_channel_state_t; 144 145 enum pci_channel_state { 146 /* I/O channel is in normal state */ 147 pci_channel_io_normal = (__force pci_channel_state_t) 1, 148 149 /* I/O to channel is blocked */ 150 pci_channel_io_frozen = (__force pci_channel_state_t) 2, 151 152 /* PCI card is dead */ 153 pci_channel_io_perm_failure = (__force pci_channel_state_t) 3, 154 }; 155 156 typedef unsigned int __bitwise pcie_reset_state_t; 157 158 enum pcie_reset_state { 159 /* Reset is NOT asserted (Use to deassert reset) */ 160 pcie_deassert_reset = (__force pcie_reset_state_t) 1, 161 162 /* Use #PERST to reset PCI-E device */ 163 pcie_warm_reset = (__force pcie_reset_state_t) 2, 164 165 /* Use PCI-E Hot Reset to reset device */ 166 pcie_hot_reset = (__force pcie_reset_state_t) 3 167 }; 168 169 typedef unsigned short __bitwise pci_dev_flags_t; 170 enum pci_dev_flags { 171 /* INTX_DISABLE in PCI_COMMAND register disables MSI 172 * generation too. 173 */ 174 PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) 1, 175 /* Device configuration is irrevocably lost if disabled into D3 */ 176 PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2, 177 }; 178 179 enum pci_irq_reroute_variant { 180 INTEL_IRQ_REROUTE_VARIANT = 1, 181 MAX_IRQ_REROUTE_VARIANTS = 3 182 }; 183 184 typedef unsigned short __bitwise pci_bus_flags_t; 185 enum pci_bus_flags { 186 PCI_BUS_FLAGS_NO_MSI = (__force pci_bus_flags_t) 1, 187 PCI_BUS_FLAGS_NO_MMRBC = (__force pci_bus_flags_t) 2, 188 }; 189 190 /* Based on the PCI Hotplug Spec, but some values are made up by us */ 191 enum pci_bus_speed { 192 PCI_SPEED_33MHz = 0x00, 193 PCI_SPEED_66MHz = 0x01, 194 PCI_SPEED_66MHz_PCIX = 0x02, 195 PCI_SPEED_100MHz_PCIX = 0x03, 196 PCI_SPEED_133MHz_PCIX = 0x04, 197 PCI_SPEED_66MHz_PCIX_ECC = 0x05, 198 PCI_SPEED_100MHz_PCIX_ECC = 0x06, 199 PCI_SPEED_133MHz_PCIX_ECC = 0x07, 200 PCI_SPEED_66MHz_PCIX_266 = 0x09, 201 PCI_SPEED_100MHz_PCIX_266 = 0x0a, 202 PCI_SPEED_133MHz_PCIX_266 = 0x0b, 203 AGP_UNKNOWN = 0x0c, 204 AGP_1X = 0x0d, 205 AGP_2X = 0x0e, 206 AGP_4X = 0x0f, 207 AGP_8X = 0x10, 208 PCI_SPEED_66MHz_PCIX_533 = 0x11, 209 PCI_SPEED_100MHz_PCIX_533 = 0x12, 210 PCI_SPEED_133MHz_PCIX_533 = 0x13, 211 PCIE_SPEED_2_5GT = 0x14, 212 PCIE_SPEED_5_0GT = 0x15, 213 PCIE_SPEED_8_0GT = 0x16, 214 PCI_SPEED_UNKNOWN = 0xff, 215 }; 216 217 struct pci_cap_saved_state { 218 struct hlist_node next; 219 char cap_nr; 220 u32 data[0]; 221 }; 222 223 struct pcie_link_state; 224 struct pci_vpd; 225 struct pci_sriov; 226 struct pci_ats; 227 228 /* 229 * The pci_dev structure is used to describe PCI devices. 230 */ 231 struct pci_dev { 232 struct list_head bus_list; /* node in per-bus list */ 233 struct pci_bus *bus; /* bus this device is on */ 234 struct pci_bus *subordinate; /* bus this device bridges to */ 235 236 void *sysdata; /* hook for sys-specific extension */ 237 struct proc_dir_entry *procent; /* device entry in /proc/bus/pci */ 238 struct pci_slot *slot; /* Physical slot this device is in */ 239 240 unsigned int devfn; /* encoded device & function index */ 241 unsigned short vendor; 242 unsigned short device; 243 unsigned short subsystem_vendor; 244 unsigned short subsystem_device; 245 unsigned int class; /* 3 bytes: (base,sub,prog-if) */ 246 u8 revision; /* PCI revision, low byte of class word */ 247 u8 hdr_type; /* PCI header type (`multi' flag masked out) */ 248 u8 pcie_cap; /* PCI-E capability offset */ 249 u8 pcie_type; /* PCI-E device/port type */ 250 u8 rom_base_reg; /* which config register controls the ROM */ 251 u8 pin; /* which interrupt pin this device uses */ 252 253 struct pci_driver *driver; /* which driver has allocated this device */ 254 u64 dma_mask; /* Mask of the bits of bus address this 255 device implements. Normally this is 256 0xffffffff. You only need to change 257 this if your device has broken DMA 258 or supports 64-bit transfers. */ 259 260 struct device_dma_parameters dma_parms; 261 262 pci_power_t current_state; /* Current operating state. In ACPI-speak, 263 this is D0-D3, D0 being fully functional, 264 and D3 being off. */ 265 int pm_cap; /* PM capability offset in the 266 configuration space */ 267 unsigned int pme_support:5; /* Bitmask of states from which PME# 268 can be generated */ 269 unsigned int pme_interrupt:1; 270 unsigned int d1_support:1; /* Low power state D1 is supported */ 271 unsigned int d2_support:1; /* Low power state D2 is supported */ 272 unsigned int no_d1d2:1; /* Only allow D0 and D3 */ 273 unsigned int mmio_always_on:1; /* disallow turning off io/mem 274 decoding during bar sizing */ 275 unsigned int wakeup_prepared:1; 276 unsigned int d3_delay; /* D3->D0 transition time in ms */ 277 278 #ifdef CONFIG_PCIEASPM 279 struct pcie_link_state *link_state; /* ASPM link state. */ 280 #endif 281 282 pci_channel_state_t error_state; /* current connectivity state */ 283 struct device dev; /* Generic device interface */ 284 285 int cfg_size; /* Size of configuration space */ 286 287 /* 288 * Instead of touching interrupt line and base address registers 289 * directly, use the values stored here. They might be different! 290 */ 291 unsigned int irq; 292 struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */ 293 resource_size_t fw_addr[DEVICE_COUNT_RESOURCE]; /* FW-assigned addr */ 294 295 /* These fields are used by common fixups */ 296 unsigned int transparent:1; /* Transparent PCI bridge */ 297 unsigned int multifunction:1;/* Part of multi-function device */ 298 /* keep track of device state */ 299 unsigned int is_added:1; 300 unsigned int is_busmaster:1; /* device is busmaster */ 301 unsigned int no_msi:1; /* device may not use msi */ 302 unsigned int block_ucfg_access:1; /* userspace config space access is blocked */ 303 unsigned int broken_parity_status:1; /* Device generates false positive parity */ 304 unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */ 305 unsigned int msi_enabled:1; 306 unsigned int msix_enabled:1; 307 unsigned int ari_enabled:1; /* ARI forwarding */ 308 unsigned int is_managed:1; 309 unsigned int is_pcie:1; /* Obsolete. Will be removed. 310 Use pci_is_pcie() instead */ 311 unsigned int needs_freset:1; /* Dev requires fundamental reset */ 312 unsigned int state_saved:1; 313 unsigned int is_physfn:1; 314 unsigned int is_virtfn:1; 315 unsigned int reset_fn:1; 316 unsigned int is_hotplug_bridge:1; 317 unsigned int __aer_firmware_first_valid:1; 318 unsigned int __aer_firmware_first:1; 319 pci_dev_flags_t dev_flags; 320 atomic_t enable_cnt; /* pci_enable_device has been called */ 321 322 u32 saved_config_space[16]; /* config space saved at suspend time */ 323 struct hlist_head saved_cap_space; 324 struct bin_attribute *rom_attr; /* attribute descriptor for sysfs ROM entry */ 325 int rom_attr_enabled; /* has display of the rom attribute been enabled? */ 326 struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */ 327 struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */ 328 #ifdef CONFIG_PCI_MSI 329 struct list_head msi_list; 330 #endif 331 struct pci_vpd *vpd; 332 #ifdef CONFIG_PCI_IOV 333 union { 334 struct pci_sriov *sriov; /* SR-IOV capability related */ 335 struct pci_dev *physfn; /* the PF this VF is associated with */ 336 }; 337 struct pci_ats *ats; /* Address Translation Service */ 338 #endif 339 }; 340 341 static inline struct pci_dev *pci_physfn(struct pci_dev *dev) 342 { 343 #ifdef CONFIG_PCI_IOV 344 if (dev->is_virtfn) 345 dev = dev->physfn; 346 #endif 347 348 return dev; 349 } 350 351 extern struct pci_dev *alloc_pci_dev(void); 352 353 #define pci_dev_b(n) list_entry(n, struct pci_dev, bus_list) 354 #define to_pci_dev(n) container_of(n, struct pci_dev, dev) 355 #define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL) 356 357 static inline int pci_channel_offline(struct pci_dev *pdev) 358 { 359 return (pdev->error_state != pci_channel_io_normal); 360 } 361 362 static inline struct pci_cap_saved_state *pci_find_saved_cap( 363 struct pci_dev *pci_dev, char cap) 364 { 365 struct pci_cap_saved_state *tmp; 366 struct hlist_node *pos; 367 368 hlist_for_each_entry(tmp, pos, &pci_dev->saved_cap_space, next) { 369 if (tmp->cap_nr == cap) 370 return tmp; 371 } 372 return NULL; 373 } 374 375 static inline void pci_add_saved_cap(struct pci_dev *pci_dev, 376 struct pci_cap_saved_state *new_cap) 377 { 378 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space); 379 } 380 381 /* 382 * The first PCI_BRIDGE_RESOURCE_NUM PCI bus resources (those that correspond 383 * to P2P or CardBus bridge windows) go in a table. Additional ones (for 384 * buses below host bridges or subtractive decode bridges) go in the list. 385 * Use pci_bus_for_each_resource() to iterate through all the resources. 386 */ 387 388 /* 389 * PCI_SUBTRACTIVE_DECODE means the bridge forwards the window implicitly 390 * and there's no way to program the bridge with the details of the window. 391 * This does not apply to ACPI _CRS windows, even with the _DEC subtractive- 392 * decode bit set, because they are explicit and can be programmed with _SRS. 393 */ 394 #define PCI_SUBTRACTIVE_DECODE 0x1 395 396 struct pci_bus_resource { 397 struct list_head list; 398 struct resource *res; 399 unsigned int flags; 400 }; 401 402 #define PCI_REGION_FLAG_MASK 0x0fU /* These bits of resource flags tell us the PCI region flags */ 403 404 struct pci_bus { 405 struct list_head node; /* node in list of buses */ 406 struct pci_bus *parent; /* parent bus this bridge is on */ 407 struct list_head children; /* list of child buses */ 408 struct list_head devices; /* list of devices on this bus */ 409 struct pci_dev *self; /* bridge device as seen by parent */ 410 struct list_head slots; /* list of slots on this bus */ 411 struct resource *resource[PCI_BRIDGE_RESOURCE_NUM]; 412 struct list_head resources; /* address space routed to this bus */ 413 414 struct pci_ops *ops; /* configuration access functions */ 415 void *sysdata; /* hook for sys-specific extension */ 416 struct proc_dir_entry *procdir; /* directory entry in /proc/bus/pci */ 417 418 unsigned char number; /* bus number */ 419 unsigned char primary; /* number of primary bridge */ 420 unsigned char secondary; /* number of secondary bridge */ 421 unsigned char subordinate; /* max number of subordinate buses */ 422 unsigned char max_bus_speed; /* enum pci_bus_speed */ 423 unsigned char cur_bus_speed; /* enum pci_bus_speed */ 424 425 char name[48]; 426 427 unsigned short bridge_ctl; /* manage NO_ISA/FBB/et al behaviors */ 428 pci_bus_flags_t bus_flags; /* Inherited by child busses */ 429 struct device *bridge; 430 struct device dev; 431 struct bin_attribute *legacy_io; /* legacy I/O for this bus */ 432 struct bin_attribute *legacy_mem; /* legacy mem */ 433 unsigned int is_added:1; 434 }; 435 436 #define pci_bus_b(n) list_entry(n, struct pci_bus, node) 437 #define to_pci_bus(n) container_of(n, struct pci_bus, dev) 438 439 /* 440 * Returns true if the pci bus is root (behind host-pci bridge), 441 * false otherwise 442 */ 443 static inline bool pci_is_root_bus(struct pci_bus *pbus) 444 { 445 return !(pbus->parent); 446 } 447 448 #ifdef CONFIG_PCI_MSI 449 static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) 450 { 451 return pci_dev->msi_enabled || pci_dev->msix_enabled; 452 } 453 #else 454 static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false; } 455 #endif 456 457 /* 458 * Error values that may be returned by PCI functions. 459 */ 460 #define PCIBIOS_SUCCESSFUL 0x00 461 #define PCIBIOS_FUNC_NOT_SUPPORTED 0x81 462 #define PCIBIOS_BAD_VENDOR_ID 0x83 463 #define PCIBIOS_DEVICE_NOT_FOUND 0x86 464 #define PCIBIOS_BAD_REGISTER_NUMBER 0x87 465 #define PCIBIOS_SET_FAILED 0x88 466 #define PCIBIOS_BUFFER_TOO_SMALL 0x89 467 468 /* Low-level architecture-dependent routines */ 469 470 struct pci_ops { 471 int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val); 472 int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val); 473 }; 474 475 /* 476 * ACPI needs to be able to access PCI config space before we've done a 477 * PCI bus scan and created pci_bus structures. 478 */ 479 extern int raw_pci_read(unsigned int domain, unsigned int bus, 480 unsigned int devfn, int reg, int len, u32 *val); 481 extern int raw_pci_write(unsigned int domain, unsigned int bus, 482 unsigned int devfn, int reg, int len, u32 val); 483 484 struct pci_bus_region { 485 resource_size_t start; 486 resource_size_t end; 487 }; 488 489 struct pci_dynids { 490 spinlock_t lock; /* protects list, index */ 491 struct list_head list; /* for IDs added at runtime */ 492 }; 493 494 /* ---------------------------------------------------------------- */ 495 /** PCI Error Recovery System (PCI-ERS). If a PCI device driver provides 496 * a set of callbacks in struct pci_error_handlers, then that device driver 497 * will be notified of PCI bus errors, and will be driven to recovery 498 * when an error occurs. 499 */ 500 501 typedef unsigned int __bitwise pci_ers_result_t; 502 503 enum pci_ers_result { 504 /* no result/none/not supported in device driver */ 505 PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1, 506 507 /* Device driver can recover without slot reset */ 508 PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2, 509 510 /* Device driver wants slot to be reset. */ 511 PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3, 512 513 /* Device has completely failed, is unrecoverable */ 514 PCI_ERS_RESULT_DISCONNECT = (__force pci_ers_result_t) 4, 515 516 /* Device driver is fully recovered and operational */ 517 PCI_ERS_RESULT_RECOVERED = (__force pci_ers_result_t) 5, 518 }; 519 520 /* PCI bus error event callbacks */ 521 struct pci_error_handlers { 522 /* PCI bus error detected on this device */ 523 pci_ers_result_t (*error_detected)(struct pci_dev *dev, 524 enum pci_channel_state error); 525 526 /* MMIO has been re-enabled, but not DMA */ 527 pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev); 528 529 /* PCI Express link has been reset */ 530 pci_ers_result_t (*link_reset)(struct pci_dev *dev); 531 532 /* PCI slot has been reset */ 533 pci_ers_result_t (*slot_reset)(struct pci_dev *dev); 534 535 /* Device driver may resume normal operations */ 536 void (*resume)(struct pci_dev *dev); 537 }; 538 539 /* ---------------------------------------------------------------- */ 540 541 struct module; 542 struct pci_driver { 543 struct list_head node; 544 const char *name; 545 const struct pci_device_id *id_table; /* must be non-NULL for probe to be called */ 546 int (*probe) (struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */ 547 void (*remove) (struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */ 548 int (*suspend) (struct pci_dev *dev, pm_message_t state); /* Device suspended */ 549 int (*suspend_late) (struct pci_dev *dev, pm_message_t state); 550 int (*resume_early) (struct pci_dev *dev); 551 int (*resume) (struct pci_dev *dev); /* Device woken up */ 552 void (*shutdown) (struct pci_dev *dev); 553 struct pci_error_handlers *err_handler; 554 struct device_driver driver; 555 struct pci_dynids dynids; 556 }; 557 558 #define to_pci_driver(drv) container_of(drv, struct pci_driver, driver) 559 560 /** 561 * DEFINE_PCI_DEVICE_TABLE - macro used to describe a pci device table 562 * @_table: device table name 563 * 564 * This macro is used to create a struct pci_device_id array (a device table) 565 * in a generic manner. 566 */ 567 #define DEFINE_PCI_DEVICE_TABLE(_table) \ 568 const struct pci_device_id _table[] __devinitconst 569 570 /** 571 * PCI_DEVICE - macro used to describe a specific pci device 572 * @vend: the 16 bit PCI Vendor ID 573 * @dev: the 16 bit PCI Device ID 574 * 575 * This macro is used to create a struct pci_device_id that matches a 576 * specific device. The subvendor and subdevice fields will be set to 577 * PCI_ANY_ID. 578 */ 579 #define PCI_DEVICE(vend,dev) \ 580 .vendor = (vend), .device = (dev), \ 581 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID 582 583 /** 584 * PCI_DEVICE_CLASS - macro used to describe a specific pci device class 585 * @dev_class: the class, subclass, prog-if triple for this device 586 * @dev_class_mask: the class mask for this device 587 * 588 * This macro is used to create a struct pci_device_id that matches a 589 * specific PCI class. The vendor, device, subvendor, and subdevice 590 * fields will be set to PCI_ANY_ID. 591 */ 592 #define PCI_DEVICE_CLASS(dev_class,dev_class_mask) \ 593 .class = (dev_class), .class_mask = (dev_class_mask), \ 594 .vendor = PCI_ANY_ID, .device = PCI_ANY_ID, \ 595 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID 596 597 /** 598 * PCI_VDEVICE - macro used to describe a specific pci device in short form 599 * @vendor: the vendor name 600 * @device: the 16 bit PCI Device ID 601 * 602 * This macro is used to create a struct pci_device_id that matches a 603 * specific PCI device. The subvendor, and subdevice fields will be set 604 * to PCI_ANY_ID. The macro allows the next field to follow as the device 605 * private data. 606 */ 607 608 #define PCI_VDEVICE(vendor, device) \ 609 PCI_VENDOR_ID_##vendor, (device), \ 610 PCI_ANY_ID, PCI_ANY_ID, 0, 0 611 612 /* these external functions are only available when PCI support is enabled */ 613 #ifdef CONFIG_PCI 614 615 extern struct bus_type pci_bus_type; 616 617 /* Do NOT directly access these two variables, unless you are arch specific pci 618 * code, or pci core code. */ 619 extern struct list_head pci_root_buses; /* list of all known PCI buses */ 620 /* Some device drivers need know if pci is initiated */ 621 extern int no_pci_devices(void); 622 623 void pcibios_fixup_bus(struct pci_bus *); 624 int __must_check pcibios_enable_device(struct pci_dev *, int mask); 625 char *pcibios_setup(char *str); 626 627 /* Used only when drivers/pci/setup.c is used */ 628 resource_size_t pcibios_align_resource(void *, const struct resource *, 629 resource_size_t, 630 resource_size_t); 631 void pcibios_update_irq(struct pci_dev *, int irq); 632 633 /* Weak but can be overriden by arch */ 634 void pci_fixup_cardbus(struct pci_bus *); 635 636 /* Generic PCI functions used internally */ 637 638 void pcibios_scan_specific_bus(int busn); 639 extern struct pci_bus *pci_find_bus(int domain, int busnr); 640 void pci_bus_add_devices(const struct pci_bus *bus); 641 struct pci_bus *pci_scan_bus_parented(struct device *parent, int bus, 642 struct pci_ops *ops, void *sysdata); 643 static inline struct pci_bus * __devinit pci_scan_bus(int bus, struct pci_ops *ops, 644 void *sysdata) 645 { 646 struct pci_bus *root_bus; 647 root_bus = pci_scan_bus_parented(NULL, bus, ops, sysdata); 648 if (root_bus) 649 pci_bus_add_devices(root_bus); 650 return root_bus; 651 } 652 struct pci_bus *pci_create_bus(struct device *parent, int bus, 653 struct pci_ops *ops, void *sysdata); 654 struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, 655 int busnr); 656 void pcie_update_link_speed(struct pci_bus *bus, u16 link_status); 657 struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr, 658 const char *name, 659 struct hotplug_slot *hotplug); 660 void pci_destroy_slot(struct pci_slot *slot); 661 void pci_renumber_slot(struct pci_slot *slot, int slot_nr); 662 int pci_scan_slot(struct pci_bus *bus, int devfn); 663 struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn); 664 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus); 665 unsigned int pci_scan_child_bus(struct pci_bus *bus); 666 int __must_check pci_bus_add_device(struct pci_dev *dev); 667 void pci_read_bridge_bases(struct pci_bus *child); 668 struct resource *pci_find_parent_resource(const struct pci_dev *dev, 669 struct resource *res); 670 u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin); 671 int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge); 672 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp); 673 extern struct pci_dev *pci_dev_get(struct pci_dev *dev); 674 extern void pci_dev_put(struct pci_dev *dev); 675 extern void pci_remove_bus(struct pci_bus *b); 676 extern void pci_remove_bus_device(struct pci_dev *dev); 677 extern void pci_stop_bus_device(struct pci_dev *dev); 678 void pci_setup_cardbus(struct pci_bus *bus); 679 extern void pci_sort_breadthfirst(void); 680 #define dev_is_pci(d) ((d)->bus == &pci_bus_type) 681 #define dev_is_pf(d) ((dev_is_pci(d) ? to_pci_dev(d)->is_physfn : false)) 682 #define dev_num_vf(d) ((dev_is_pci(d) ? pci_num_vf(to_pci_dev(d)) : 0)) 683 684 /* Generic PCI functions exported to card drivers */ 685 686 enum pci_lost_interrupt_reason { 687 PCI_LOST_IRQ_NO_INFORMATION = 0, 688 PCI_LOST_IRQ_DISABLE_MSI, 689 PCI_LOST_IRQ_DISABLE_MSIX, 690 PCI_LOST_IRQ_DISABLE_ACPI, 691 }; 692 enum pci_lost_interrupt_reason pci_lost_interrupt(struct pci_dev *dev); 693 int pci_find_capability(struct pci_dev *dev, int cap); 694 int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap); 695 int pci_find_ext_capability(struct pci_dev *dev, int cap); 696 int pci_bus_find_ext_capability(struct pci_bus *bus, unsigned int devfn, 697 int cap); 698 int pci_find_ht_capability(struct pci_dev *dev, int ht_cap); 699 int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap); 700 struct pci_bus *pci_find_next_bus(const struct pci_bus *from); 701 702 struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device, 703 struct pci_dev *from); 704 struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device, 705 unsigned int ss_vendor, unsigned int ss_device, 706 struct pci_dev *from); 707 struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn); 708 struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus, 709 unsigned int devfn); 710 static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus, 711 unsigned int devfn) 712 { 713 return pci_get_domain_bus_and_slot(0, bus, devfn); 714 } 715 struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from); 716 int pci_dev_present(const struct pci_device_id *ids); 717 718 int pci_bus_read_config_byte(struct pci_bus *bus, unsigned int devfn, 719 int where, u8 *val); 720 int pci_bus_read_config_word(struct pci_bus *bus, unsigned int devfn, 721 int where, u16 *val); 722 int pci_bus_read_config_dword(struct pci_bus *bus, unsigned int devfn, 723 int where, u32 *val); 724 int pci_bus_write_config_byte(struct pci_bus *bus, unsigned int devfn, 725 int where, u8 val); 726 int pci_bus_write_config_word(struct pci_bus *bus, unsigned int devfn, 727 int where, u16 val); 728 int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn, 729 int where, u32 val); 730 struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops); 731 732 static inline int pci_read_config_byte(struct pci_dev *dev, int where, u8 *val) 733 { 734 return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val); 735 } 736 static inline int pci_read_config_word(struct pci_dev *dev, int where, u16 *val) 737 { 738 return pci_bus_read_config_word(dev->bus, dev->devfn, where, val); 739 } 740 static inline int pci_read_config_dword(struct pci_dev *dev, int where, 741 u32 *val) 742 { 743 return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val); 744 } 745 static inline int pci_write_config_byte(struct pci_dev *dev, int where, u8 val) 746 { 747 return pci_bus_write_config_byte(dev->bus, dev->devfn, where, val); 748 } 749 static inline int pci_write_config_word(struct pci_dev *dev, int where, u16 val) 750 { 751 return pci_bus_write_config_word(dev->bus, dev->devfn, where, val); 752 } 753 static inline int pci_write_config_dword(struct pci_dev *dev, int where, 754 u32 val) 755 { 756 return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val); 757 } 758 759 int __must_check pci_enable_device(struct pci_dev *dev); 760 int __must_check pci_enable_device_io(struct pci_dev *dev); 761 int __must_check pci_enable_device_mem(struct pci_dev *dev); 762 int __must_check pci_reenable_device(struct pci_dev *); 763 int __must_check pcim_enable_device(struct pci_dev *pdev); 764 void pcim_pin_device(struct pci_dev *pdev); 765 766 static inline int pci_is_enabled(struct pci_dev *pdev) 767 { 768 return (atomic_read(&pdev->enable_cnt) > 0); 769 } 770 771 static inline int pci_is_managed(struct pci_dev *pdev) 772 { 773 return pdev->is_managed; 774 } 775 776 void pci_disable_device(struct pci_dev *dev); 777 void pci_set_master(struct pci_dev *dev); 778 void pci_clear_master(struct pci_dev *dev); 779 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state); 780 int pci_set_cacheline_size(struct pci_dev *dev); 781 #define HAVE_PCI_SET_MWI 782 int __must_check pci_set_mwi(struct pci_dev *dev); 783 int pci_try_set_mwi(struct pci_dev *dev); 784 void pci_clear_mwi(struct pci_dev *dev); 785 void pci_intx(struct pci_dev *dev, int enable); 786 void pci_msi_off(struct pci_dev *dev); 787 int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size); 788 int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask); 789 int pcix_get_max_mmrbc(struct pci_dev *dev); 790 int pcix_get_mmrbc(struct pci_dev *dev); 791 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc); 792 int pcie_get_readrq(struct pci_dev *dev); 793 int pcie_set_readrq(struct pci_dev *dev, int rq); 794 int __pci_reset_function(struct pci_dev *dev); 795 int pci_reset_function(struct pci_dev *dev); 796 void pci_update_resource(struct pci_dev *dev, int resno); 797 int __must_check pci_assign_resource(struct pci_dev *dev, int i); 798 int pci_select_bars(struct pci_dev *dev, unsigned long flags); 799 800 /* ROM control related routines */ 801 int pci_enable_rom(struct pci_dev *pdev); 802 void pci_disable_rom(struct pci_dev *pdev); 803 void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size); 804 void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom); 805 size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size); 806 807 /* Power management related routines */ 808 int pci_save_state(struct pci_dev *dev); 809 int pci_restore_state(struct pci_dev *dev); 810 int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state); 811 int pci_set_power_state(struct pci_dev *dev, pci_power_t state); 812 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state); 813 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state); 814 void pci_pme_active(struct pci_dev *dev, bool enable); 815 int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, 816 bool runtime, bool enable); 817 int pci_wake_from_d3(struct pci_dev *dev, bool enable); 818 pci_power_t pci_target_state(struct pci_dev *dev); 819 int pci_prepare_to_sleep(struct pci_dev *dev); 820 int pci_back_from_sleep(struct pci_dev *dev); 821 bool pci_dev_run_wake(struct pci_dev *dev); 822 bool pci_check_pme_status(struct pci_dev *dev); 823 void pci_wakeup_event(struct pci_dev *dev); 824 void pci_pme_wakeup_bus(struct pci_bus *bus); 825 826 static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state, 827 bool enable) 828 { 829 return __pci_enable_wake(dev, state, false, enable); 830 } 831 832 /* For use by arch with custom probe code */ 833 void set_pcie_port_type(struct pci_dev *pdev); 834 void set_pcie_hotplug_bridge(struct pci_dev *pdev); 835 836 /* Functions for PCI Hotplug drivers to use */ 837 int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap); 838 #ifdef CONFIG_HOTPLUG 839 unsigned int pci_rescan_bus(struct pci_bus *bus); 840 #endif 841 842 /* Vital product data routines */ 843 ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf); 844 ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf); 845 int pci_vpd_truncate(struct pci_dev *dev, size_t size); 846 847 /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */ 848 void pci_bus_assign_resources(const struct pci_bus *bus); 849 void pci_bus_size_bridges(struct pci_bus *bus); 850 int pci_claim_resource(struct pci_dev *, int); 851 void pci_assign_unassigned_resources(void); 852 void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge); 853 void pdev_enable_device(struct pci_dev *); 854 void pdev_sort_resources(struct pci_dev *, struct resource_list *); 855 int pci_enable_resources(struct pci_dev *, int mask); 856 void pci_fixup_irqs(u8 (*)(struct pci_dev *, u8 *), 857 int (*)(struct pci_dev *, u8, u8)); 858 #define HAVE_PCI_REQ_REGIONS 2 859 int __must_check pci_request_regions(struct pci_dev *, const char *); 860 int __must_check pci_request_regions_exclusive(struct pci_dev *, const char *); 861 void pci_release_regions(struct pci_dev *); 862 int __must_check pci_request_region(struct pci_dev *, int, const char *); 863 int __must_check pci_request_region_exclusive(struct pci_dev *, int, const char *); 864 void pci_release_region(struct pci_dev *, int); 865 int pci_request_selected_regions(struct pci_dev *, int, const char *); 866 int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *); 867 void pci_release_selected_regions(struct pci_dev *, int); 868 869 /* drivers/pci/bus.c */ 870 void pci_bus_add_resource(struct pci_bus *bus, struct resource *res, unsigned int flags); 871 struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n); 872 void pci_bus_remove_resources(struct pci_bus *bus); 873 874 #define pci_bus_for_each_resource(bus, res, i) \ 875 for (i = 0; \ 876 (res = pci_bus_resource_n(bus, i)) || i < PCI_BRIDGE_RESOURCE_NUM; \ 877 i++) 878 879 int __must_check pci_bus_alloc_resource(struct pci_bus *bus, 880 struct resource *res, resource_size_t size, 881 resource_size_t align, resource_size_t min, 882 unsigned int type_mask, 883 resource_size_t (*alignf)(void *, 884 const struct resource *, 885 resource_size_t, 886 resource_size_t), 887 void *alignf_data); 888 void pci_enable_bridges(struct pci_bus *bus); 889 890 /* Proper probing supporting hot-pluggable devices */ 891 int __must_check __pci_register_driver(struct pci_driver *, struct module *, 892 const char *mod_name); 893 894 /* 895 * pci_register_driver must be a macro so that KBUILD_MODNAME can be expanded 896 */ 897 #define pci_register_driver(driver) \ 898 __pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME) 899 900 void pci_unregister_driver(struct pci_driver *dev); 901 void pci_remove_behind_bridge(struct pci_dev *dev); 902 struct pci_driver *pci_dev_driver(const struct pci_dev *dev); 903 int pci_add_dynid(struct pci_driver *drv, 904 unsigned int vendor, unsigned int device, 905 unsigned int subvendor, unsigned int subdevice, 906 unsigned int class, unsigned int class_mask, 907 unsigned long driver_data); 908 const struct pci_device_id *pci_match_id(const struct pci_device_id *ids, 909 struct pci_dev *dev); 910 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, 911 int pass); 912 913 void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), 914 void *userdata); 915 int pci_cfg_space_size_ext(struct pci_dev *dev); 916 int pci_cfg_space_size(struct pci_dev *dev); 917 unsigned char pci_bus_max_busnr(struct pci_bus *bus); 918 919 int pci_set_vga_state(struct pci_dev *pdev, bool decode, 920 unsigned int command_bits, bool change_bridge); 921 /* kmem_cache style wrapper around pci_alloc_consistent() */ 922 923 #include <linux/pci-dma.h> 924 #include <linux/dmapool.h> 925 926 #define pci_pool dma_pool 927 #define pci_pool_create(name, pdev, size, align, allocation) \ 928 dma_pool_create(name, &pdev->dev, size, align, allocation) 929 #define pci_pool_destroy(pool) dma_pool_destroy(pool) 930 #define pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle) 931 #define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr) 932 933 enum pci_dma_burst_strategy { 934 PCI_DMA_BURST_INFINITY, /* make bursts as large as possible, 935 strategy_parameter is N/A */ 936 PCI_DMA_BURST_BOUNDARY, /* disconnect at every strategy_parameter 937 byte boundaries */ 938 PCI_DMA_BURST_MULTIPLE, /* disconnect at some multiple of 939 strategy_parameter byte boundaries */ 940 }; 941 942 struct msix_entry { 943 u32 vector; /* kernel uses to write allocated vector */ 944 u16 entry; /* driver uses to specify entry, OS writes */ 945 }; 946 947 948 #ifndef CONFIG_PCI_MSI 949 static inline int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec) 950 { 951 return -1; 952 } 953 954 static inline void pci_msi_shutdown(struct pci_dev *dev) 955 { } 956 static inline void pci_disable_msi(struct pci_dev *dev) 957 { } 958 959 static inline int pci_msix_table_size(struct pci_dev *dev) 960 { 961 return 0; 962 } 963 static inline int pci_enable_msix(struct pci_dev *dev, 964 struct msix_entry *entries, int nvec) 965 { 966 return -1; 967 } 968 969 static inline void pci_msix_shutdown(struct pci_dev *dev) 970 { } 971 static inline void pci_disable_msix(struct pci_dev *dev) 972 { } 973 974 static inline void msi_remove_pci_irq_vectors(struct pci_dev *dev) 975 { } 976 977 static inline void pci_restore_msi_state(struct pci_dev *dev) 978 { } 979 static inline int pci_msi_enabled(void) 980 { 981 return 0; 982 } 983 #else 984 extern int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec); 985 extern void pci_msi_shutdown(struct pci_dev *dev); 986 extern void pci_disable_msi(struct pci_dev *dev); 987 extern int pci_msix_table_size(struct pci_dev *dev); 988 extern int pci_enable_msix(struct pci_dev *dev, 989 struct msix_entry *entries, int nvec); 990 extern void pci_msix_shutdown(struct pci_dev *dev); 991 extern void pci_disable_msix(struct pci_dev *dev); 992 extern void msi_remove_pci_irq_vectors(struct pci_dev *dev); 993 extern void pci_restore_msi_state(struct pci_dev *dev); 994 extern int pci_msi_enabled(void); 995 #endif 996 997 #ifndef CONFIG_PCIEASPM 998 static inline int pcie_aspm_enabled(void) 999 { 1000 return 0; 1001 } 1002 #else 1003 extern int pcie_aspm_enabled(void); 1004 #endif 1005 1006 #ifndef CONFIG_PCIE_ECRC 1007 static inline void pcie_set_ecrc_checking(struct pci_dev *dev) 1008 { 1009 return; 1010 } 1011 static inline void pcie_ecrc_get_policy(char *str) {}; 1012 #else 1013 extern void pcie_set_ecrc_checking(struct pci_dev *dev); 1014 extern void pcie_ecrc_get_policy(char *str); 1015 #endif 1016 1017 #define pci_enable_msi(pdev) pci_enable_msi_block(pdev, 1) 1018 1019 #ifdef CONFIG_HT_IRQ 1020 /* The functions a driver should call */ 1021 int ht_create_irq(struct pci_dev *dev, int idx); 1022 void ht_destroy_irq(unsigned int irq); 1023 #endif /* CONFIG_HT_IRQ */ 1024 1025 extern void pci_block_user_cfg_access(struct pci_dev *dev); 1026 extern void pci_unblock_user_cfg_access(struct pci_dev *dev); 1027 1028 /* 1029 * PCI domain support. Sometimes called PCI segment (eg by ACPI), 1030 * a PCI domain is defined to be a set of PCI busses which share 1031 * configuration space. 1032 */ 1033 #ifdef CONFIG_PCI_DOMAINS 1034 extern int pci_domains_supported; 1035 #else 1036 enum { pci_domains_supported = 0 }; 1037 static inline int pci_domain_nr(struct pci_bus *bus) 1038 { 1039 return 0; 1040 } 1041 1042 static inline int pci_proc_domain(struct pci_bus *bus) 1043 { 1044 return 0; 1045 } 1046 #endif /* CONFIG_PCI_DOMAINS */ 1047 1048 /* some architectures require additional setup to direct VGA traffic */ 1049 typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode, 1050 unsigned int command_bits, bool change_bridge); 1051 extern void pci_register_set_vga_state(arch_set_vga_state_t func); 1052 1053 #else /* CONFIG_PCI is not enabled */ 1054 1055 /* 1056 * If the system does not have PCI, clearly these return errors. Define 1057 * these as simple inline functions to avoid hair in drivers. 1058 */ 1059 1060 #define _PCI_NOP(o, s, t) \ 1061 static inline int pci_##o##_config_##s(struct pci_dev *dev, \ 1062 int where, t val) \ 1063 { return PCIBIOS_FUNC_NOT_SUPPORTED; } 1064 1065 #define _PCI_NOP_ALL(o, x) _PCI_NOP(o, byte, u8 x) \ 1066 _PCI_NOP(o, word, u16 x) \ 1067 _PCI_NOP(o, dword, u32 x) 1068 _PCI_NOP_ALL(read, *) 1069 _PCI_NOP_ALL(write,) 1070 1071 static inline struct pci_dev *pci_get_device(unsigned int vendor, 1072 unsigned int device, 1073 struct pci_dev *from) 1074 { 1075 return NULL; 1076 } 1077 1078 static inline struct pci_dev *pci_get_subsys(unsigned int vendor, 1079 unsigned int device, 1080 unsigned int ss_vendor, 1081 unsigned int ss_device, 1082 struct pci_dev *from) 1083 { 1084 return NULL; 1085 } 1086 1087 static inline struct pci_dev *pci_get_class(unsigned int class, 1088 struct pci_dev *from) 1089 { 1090 return NULL; 1091 } 1092 1093 #define pci_dev_present(ids) (0) 1094 #define no_pci_devices() (1) 1095 #define pci_dev_put(dev) do { } while (0) 1096 1097 static inline void pci_set_master(struct pci_dev *dev) 1098 { } 1099 1100 static inline int pci_enable_device(struct pci_dev *dev) 1101 { 1102 return -EIO; 1103 } 1104 1105 static inline void pci_disable_device(struct pci_dev *dev) 1106 { } 1107 1108 static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask) 1109 { 1110 return -EIO; 1111 } 1112 1113 static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) 1114 { 1115 return -EIO; 1116 } 1117 1118 static inline int pci_set_dma_max_seg_size(struct pci_dev *dev, 1119 unsigned int size) 1120 { 1121 return -EIO; 1122 } 1123 1124 static inline int pci_set_dma_seg_boundary(struct pci_dev *dev, 1125 unsigned long mask) 1126 { 1127 return -EIO; 1128 } 1129 1130 static inline int pci_assign_resource(struct pci_dev *dev, int i) 1131 { 1132 return -EBUSY; 1133 } 1134 1135 static inline int __pci_register_driver(struct pci_driver *drv, 1136 struct module *owner) 1137 { 1138 return 0; 1139 } 1140 1141 static inline int pci_register_driver(struct pci_driver *drv) 1142 { 1143 return 0; 1144 } 1145 1146 static inline void pci_unregister_driver(struct pci_driver *drv) 1147 { } 1148 1149 static inline int pci_find_capability(struct pci_dev *dev, int cap) 1150 { 1151 return 0; 1152 } 1153 1154 static inline int pci_find_next_capability(struct pci_dev *dev, u8 post, 1155 int cap) 1156 { 1157 return 0; 1158 } 1159 1160 static inline int pci_find_ext_capability(struct pci_dev *dev, int cap) 1161 { 1162 return 0; 1163 } 1164 1165 /* Power management related routines */ 1166 static inline int pci_save_state(struct pci_dev *dev) 1167 { 1168 return 0; 1169 } 1170 1171 static inline int pci_restore_state(struct pci_dev *dev) 1172 { 1173 return 0; 1174 } 1175 1176 static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state) 1177 { 1178 return 0; 1179 } 1180 1181 static inline pci_power_t pci_choose_state(struct pci_dev *dev, 1182 pm_message_t state) 1183 { 1184 return PCI_D0; 1185 } 1186 1187 static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state, 1188 int enable) 1189 { 1190 return 0; 1191 } 1192 1193 static inline int pci_request_regions(struct pci_dev *dev, const char *res_name) 1194 { 1195 return -EIO; 1196 } 1197 1198 static inline void pci_release_regions(struct pci_dev *dev) 1199 { } 1200 1201 #define pci_dma_burst_advice(pdev, strat, strategy_parameter) do { } while (0) 1202 1203 static inline void pci_block_user_cfg_access(struct pci_dev *dev) 1204 { } 1205 1206 static inline void pci_unblock_user_cfg_access(struct pci_dev *dev) 1207 { } 1208 1209 static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from) 1210 { return NULL; } 1211 1212 static inline struct pci_dev *pci_get_slot(struct pci_bus *bus, 1213 unsigned int devfn) 1214 { return NULL; } 1215 1216 static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus, 1217 unsigned int devfn) 1218 { return NULL; } 1219 1220 static inline int pci_domain_nr(struct pci_bus *bus) 1221 { return 0; } 1222 1223 #define dev_is_pci(d) (false) 1224 #define dev_is_pf(d) (false) 1225 #define dev_num_vf(d) (0) 1226 #endif /* CONFIG_PCI */ 1227 1228 /* Include architecture-dependent settings and functions */ 1229 1230 #include <asm/pci.h> 1231 1232 #ifndef PCIBIOS_MAX_MEM_32 1233 #define PCIBIOS_MAX_MEM_32 (-1) 1234 #endif 1235 1236 /* these helpers provide future and backwards compatibility 1237 * for accessing popular PCI BAR info */ 1238 #define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start) 1239 #define pci_resource_end(dev, bar) ((dev)->resource[(bar)].end) 1240 #define pci_resource_flags(dev, bar) ((dev)->resource[(bar)].flags) 1241 #define pci_resource_len(dev,bar) \ 1242 ((pci_resource_start((dev), (bar)) == 0 && \ 1243 pci_resource_end((dev), (bar)) == \ 1244 pci_resource_start((dev), (bar))) ? 0 : \ 1245 \ 1246 (pci_resource_end((dev), (bar)) - \ 1247 pci_resource_start((dev), (bar)) + 1)) 1248 1249 /* Similar to the helpers above, these manipulate per-pci_dev 1250 * driver-specific data. They are really just a wrapper around 1251 * the generic device structure functions of these calls. 1252 */ 1253 static inline void *pci_get_drvdata(struct pci_dev *pdev) 1254 { 1255 return dev_get_drvdata(&pdev->dev); 1256 } 1257 1258 static inline void pci_set_drvdata(struct pci_dev *pdev, void *data) 1259 { 1260 dev_set_drvdata(&pdev->dev, data); 1261 } 1262 1263 /* If you want to know what to call your pci_dev, ask this function. 1264 * Again, it's a wrapper around the generic device. 1265 */ 1266 static inline const char *pci_name(const struct pci_dev *pdev) 1267 { 1268 return dev_name(&pdev->dev); 1269 } 1270 1271 1272 /* Some archs don't want to expose struct resource to userland as-is 1273 * in sysfs and /proc 1274 */ 1275 #ifndef HAVE_ARCH_PCI_RESOURCE_TO_USER 1276 static inline void pci_resource_to_user(const struct pci_dev *dev, int bar, 1277 const struct resource *rsrc, resource_size_t *start, 1278 resource_size_t *end) 1279 { 1280 *start = rsrc->start; 1281 *end = rsrc->end; 1282 } 1283 #endif /* HAVE_ARCH_PCI_RESOURCE_TO_USER */ 1284 1285 1286 /* 1287 * The world is not perfect and supplies us with broken PCI devices. 1288 * For at least a part of these bugs we need a work-around, so both 1289 * generic (drivers/pci/quirks.c) and per-architecture code can define 1290 * fixup hooks to be called for particular buggy devices. 1291 */ 1292 1293 struct pci_fixup { 1294 u16 vendor, device; /* You can use PCI_ANY_ID here of course */ 1295 void (*hook)(struct pci_dev *dev); 1296 }; 1297 1298 enum pci_fixup_pass { 1299 pci_fixup_early, /* Before probing BARs */ 1300 pci_fixup_header, /* After reading configuration header */ 1301 pci_fixup_final, /* Final phase of device fixups */ 1302 pci_fixup_enable, /* pci_enable_device() time */ 1303 pci_fixup_resume, /* pci_device_resume() */ 1304 pci_fixup_suspend, /* pci_device_suspend */ 1305 pci_fixup_resume_early, /* pci_device_resume_early() */ 1306 }; 1307 1308 /* Anonymous variables would be nice... */ 1309 #define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, hook) \ 1310 static const struct pci_fixup __pci_fixup_##name __used \ 1311 __attribute__((__section__(#section))) = { vendor, device, hook }; 1312 #define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \ 1313 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \ 1314 vendor##device##hook, vendor, device, hook) 1315 #define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook) \ 1316 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \ 1317 vendor##device##hook, vendor, device, hook) 1318 #define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook) \ 1319 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \ 1320 vendor##device##hook, vendor, device, hook) 1321 #define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook) \ 1322 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \ 1323 vendor##device##hook, vendor, device, hook) 1324 #define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \ 1325 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ 1326 resume##vendor##device##hook, vendor, device, hook) 1327 #define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \ 1328 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \ 1329 resume_early##vendor##device##hook, vendor, device, hook) 1330 #define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \ 1331 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ 1332 suspend##vendor##device##hook, vendor, device, hook) 1333 1334 #ifdef CONFIG_PCI_QUIRKS 1335 void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev); 1336 #else 1337 static inline void pci_fixup_device(enum pci_fixup_pass pass, 1338 struct pci_dev *dev) {} 1339 #endif 1340 1341 void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen); 1342 void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr); 1343 void __iomem * const *pcim_iomap_table(struct pci_dev *pdev); 1344 int pcim_iomap_regions(struct pci_dev *pdev, u16 mask, const char *name); 1345 int pcim_iomap_regions_request_all(struct pci_dev *pdev, u16 mask, 1346 const char *name); 1347 void pcim_iounmap_regions(struct pci_dev *pdev, u16 mask); 1348 1349 extern int pci_pci_problems; 1350 #define PCIPCI_FAIL 1 /* No PCI PCI DMA */ 1351 #define PCIPCI_TRITON 2 1352 #define PCIPCI_NATOMA 4 1353 #define PCIPCI_VIAETBF 8 1354 #define PCIPCI_VSFX 16 1355 #define PCIPCI_ALIMAGIK 32 /* Need low latency setting */ 1356 #define PCIAGP_FAIL 64 /* No PCI to AGP DMA */ 1357 1358 extern unsigned long pci_cardbus_io_size; 1359 extern unsigned long pci_cardbus_mem_size; 1360 extern u8 __devinitdata pci_dfl_cache_line_size; 1361 extern u8 pci_cache_line_size; 1362 1363 extern unsigned long pci_hotplug_io_size; 1364 extern unsigned long pci_hotplug_mem_size; 1365 1366 int pcibios_add_platform_entries(struct pci_dev *dev); 1367 void pcibios_disable_device(struct pci_dev *dev); 1368 int pcibios_set_pcie_reset_state(struct pci_dev *dev, 1369 enum pcie_reset_state state); 1370 1371 #ifdef CONFIG_PCI_MMCONFIG 1372 extern void __init pci_mmcfg_early_init(void); 1373 extern void __init pci_mmcfg_late_init(void); 1374 #else 1375 static inline void pci_mmcfg_early_init(void) { } 1376 static inline void pci_mmcfg_late_init(void) { } 1377 #endif 1378 1379 int pci_ext_cfg_avail(struct pci_dev *dev); 1380 1381 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar); 1382 1383 #ifdef CONFIG_PCI_IOV 1384 extern int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn); 1385 extern void pci_disable_sriov(struct pci_dev *dev); 1386 extern irqreturn_t pci_sriov_migration(struct pci_dev *dev); 1387 extern int pci_num_vf(struct pci_dev *dev); 1388 #else 1389 static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn) 1390 { 1391 return -ENODEV; 1392 } 1393 static inline void pci_disable_sriov(struct pci_dev *dev) 1394 { 1395 } 1396 static inline irqreturn_t pci_sriov_migration(struct pci_dev *dev) 1397 { 1398 return IRQ_NONE; 1399 } 1400 static inline int pci_num_vf(struct pci_dev *dev) 1401 { 1402 return 0; 1403 } 1404 #endif 1405 1406 #if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE) 1407 extern void pci_hp_create_module_link(struct pci_slot *pci_slot); 1408 extern void pci_hp_remove_module_link(struct pci_slot *pci_slot); 1409 #endif 1410 1411 /** 1412 * pci_pcie_cap - get the saved PCIe capability offset 1413 * @dev: PCI device 1414 * 1415 * PCIe capability offset is calculated at PCI device initialization 1416 * time and saved in the data structure. This function returns saved 1417 * PCIe capability offset. Using this instead of pci_find_capability() 1418 * reduces unnecessary search in the PCI configuration space. If you 1419 * need to calculate PCIe capability offset from raw device for some 1420 * reasons, please use pci_find_capability() instead. 1421 */ 1422 static inline int pci_pcie_cap(struct pci_dev *dev) 1423 { 1424 return dev->pcie_cap; 1425 } 1426 1427 /** 1428 * pci_is_pcie - check if the PCI device is PCI Express capable 1429 * @dev: PCI device 1430 * 1431 * Retrun true if the PCI device is PCI Express capable, false otherwise. 1432 */ 1433 static inline bool pci_is_pcie(struct pci_dev *dev) 1434 { 1435 return !!pci_pcie_cap(dev); 1436 } 1437 1438 void pci_request_acs(void); 1439 1440 1441 #define PCI_VPD_LRDT 0x80 /* Large Resource Data Type */ 1442 #define PCI_VPD_LRDT_ID(x) (x | PCI_VPD_LRDT) 1443 1444 /* Large Resource Data Type Tag Item Names */ 1445 #define PCI_VPD_LTIN_ID_STRING 0x02 /* Identifier String */ 1446 #define PCI_VPD_LTIN_RO_DATA 0x10 /* Read-Only Data */ 1447 #define PCI_VPD_LTIN_RW_DATA 0x11 /* Read-Write Data */ 1448 1449 #define PCI_VPD_LRDT_ID_STRING PCI_VPD_LRDT_ID(PCI_VPD_LTIN_ID_STRING) 1450 #define PCI_VPD_LRDT_RO_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RO_DATA) 1451 #define PCI_VPD_LRDT_RW_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RW_DATA) 1452 1453 /* Small Resource Data Type Tag Item Names */ 1454 #define PCI_VPD_STIN_END 0x78 /* End */ 1455 1456 #define PCI_VPD_SRDT_END PCI_VPD_STIN_END 1457 1458 #define PCI_VPD_SRDT_TIN_MASK 0x78 1459 #define PCI_VPD_SRDT_LEN_MASK 0x07 1460 1461 #define PCI_VPD_LRDT_TAG_SIZE 3 1462 #define PCI_VPD_SRDT_TAG_SIZE 1 1463 1464 #define PCI_VPD_INFO_FLD_HDR_SIZE 3 1465 1466 #define PCI_VPD_RO_KEYWORD_PARTNO "PN" 1467 #define PCI_VPD_RO_KEYWORD_MFR_ID "MN" 1468 #define PCI_VPD_RO_KEYWORD_VENDOR0 "V0" 1469 1470 /** 1471 * pci_vpd_lrdt_size - Extracts the Large Resource Data Type length 1472 * @lrdt: Pointer to the beginning of the Large Resource Data Type tag 1473 * 1474 * Returns the extracted Large Resource Data Type length. 1475 */ 1476 static inline u16 pci_vpd_lrdt_size(const u8 *lrdt) 1477 { 1478 return (u16)lrdt[1] + ((u16)lrdt[2] << 8); 1479 } 1480 1481 /** 1482 * pci_vpd_srdt_size - Extracts the Small Resource Data Type length 1483 * @lrdt: Pointer to the beginning of the Small Resource Data Type tag 1484 * 1485 * Returns the extracted Small Resource Data Type length. 1486 */ 1487 static inline u8 pci_vpd_srdt_size(const u8 *srdt) 1488 { 1489 return (*srdt) & PCI_VPD_SRDT_LEN_MASK; 1490 } 1491 1492 /** 1493 * pci_vpd_info_field_size - Extracts the information field length 1494 * @lrdt: Pointer to the beginning of an information field header 1495 * 1496 * Returns the extracted information field length. 1497 */ 1498 static inline u8 pci_vpd_info_field_size(const u8 *info_field) 1499 { 1500 return info_field[2]; 1501 } 1502 1503 /** 1504 * pci_vpd_find_tag - Locates the Resource Data Type tag provided 1505 * @buf: Pointer to buffered vpd data 1506 * @off: The offset into the buffer at which to begin the search 1507 * @len: The length of the vpd buffer 1508 * @rdt: The Resource Data Type to search for 1509 * 1510 * Returns the index where the Resource Data Type was found or 1511 * -ENOENT otherwise. 1512 */ 1513 int pci_vpd_find_tag(const u8 *buf, unsigned int off, unsigned int len, u8 rdt); 1514 1515 /** 1516 * pci_vpd_find_info_keyword - Locates an information field keyword in the VPD 1517 * @buf: Pointer to buffered vpd data 1518 * @off: The offset into the buffer at which to begin the search 1519 * @len: The length of the buffer area, relative to off, in which to search 1520 * @kw: The keyword to search for 1521 * 1522 * Returns the index where the information field keyword was found or 1523 * -ENOENT otherwise. 1524 */ 1525 int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off, 1526 unsigned int len, const char *kw); 1527 1528 #endif /* __KERNEL__ */ 1529 #endif /* LINUX_PCI_H */

Here is the explanation of the rule violation arisen in your driver for the corresponding kernel.

Note that there may be no error indeed. Please see on error trace and source code to understand whether there is an error in your driver.

The Error trace column contains the path on which rule is violated. You can choose some entity classes to be shown or hiden by clicking on the corresponding checkboxes or in the advanced Others menu. Also you can show or hide each particular entity by clicking on the corresponding - or +. In hovering on some entities you can see their descriptions and meaning. Also the error trace is binded with the source code. Line numbers are shown as links on the left. You can click on them to open the corresponding line in source code. Line numbers and file names are shown in entity descriptions.

The Source code column contains content of files related with the error trace. There are your driver (note that there are some our modifications at the end), kernel headers and rule source code. Tabs show the currently opened file and other available files. In hovering you can see file names in titles. On clicking the corresponding file content will be shown.