Error trace
{
-__BLAST_initialize_/home/ldv/ldv-new/ldv-tools-inst/tmp/run/work/current--X--xordev.tar.gz_89--X--defaultlinux-3.0--X--39_7/linux-3.0/csd_deg_dscv/3/dscv_tempdir/dscv/rcv/39_7/main-ldv_main1_sequence_infinite_withcheck_stateful/preprocess/1-cpp/circ_dma_buf.c.common.i();
{
30 -CDB_SIZE = 4096;
ldv_lock_TEMPLATE = 1;
return 0; }
-__BLAST_initialize_/home/ldv/ldv-new/ldv-tools-inst/tmp/run/work/current--X--xordev.tar.gz_89--X--defaultlinux-3.0--X--39_7/linux-3.0/csd_deg_dscv/3/dscv_tempdir/dscv/rcv/39_7/main-ldv_main1_sequence_infinite_withcheck_stateful/preprocess/1-cpp/drv_xordev.i();
{
38 -__mod_author38[ 0 ] = 97;
__mod_author38[ 1 ] = 117;
__mod_author38[ 2 ] = 116;
__mod_author38[ 3 ] = 104;
__mod_author38[ 4 ] = 111;
__mod_author38[ 5 ] = 114;
__mod_author38[ 6 ] = 61;
__mod_author38[ 7 ] = 77;
__mod_author38[ 8 ] = 105;
__mod_author38[ 9 ] = 99;
__mod_author38[ 10 ] = 104;
__mod_author38[ 11 ] = 97;
__mod_author38[ 12 ] = 108;
__mod_author38[ 13 ] = 32;
__mod_author38[ 14 ] = 77;
__mod_author38[ 15 ] = 97;
__mod_author38[ 16 ] = 114;
__mod_author38[ 17 ] = 115;
__mod_author38[ 18 ] = 99;
__mod_author38[ 19 ] = 104;
__mod_author38[ 20 ] = 97;
__mod_author38[ 21 ] = 108;
__mod_author38[ 22 ] = 108;
__mod_author38[ 23 ] = 0;
__mod_license39[ 0 ] = 108;
__mod_license39[ 1 ] = 105;
__mod_license39[ 2 ] = 99;
__mod_license39[ 3 ] = 101;
__mod_license39[ 4 ] = 110;
__mod_license39[ 5 ] = 115;
__mod_license39[ 6 ] = 101;
__mod_license39[ 7 ] = 61;
__mod_license39[ 8 ] = 71;
__mod_license39[ 9 ] = 80;
__mod_license39[ 10 ] = 76;
__mod_license39[ 11 ] = 0;
TYPES = 3;
REG_INTR_EN = 0;
REG_SRC1 = 4;
REG_SRC2 = 8;
REG_DST = 12;
REG_COUNT = 16;
REG_INTR_COUNT = 20;
IRQ_OFF = 0;
IRQ_ON = 1;
xordev_fops.owner = &(__this_module);
xordev_fops.llseek = 0;
xordev_fops.read = 0;
xordev_fops.write = 0;
xordev_fops.aio_read = 0;
xordev_fops.aio_write = 0;
xordev_fops.readdir = 0;
xordev_fops.poll = 0;
xordev_fops.unlocked_ioctl = 0;
xordev_fops.compat_ioctl = 0;
xordev_fops.mmap = 0;
xordev_fops.open = &(xordev_open);
xordev_fops.flush = 0;
xordev_fops.release = &(xordev_release);
xordev_fops.fsync = 0;
xordev_fops.aio_fsync = 0;
xordev_fops.fasync = 0;
xordev_fops.lock = 0;
xordev_fops.sendpage = 0;
xordev_fops.get_unmapped_area = 0;
xordev_fops.check_flags = 0;
xordev_fops.flock = 0;
xordev_fops.splice_write = 0;
xordev_fops.splice_read = 0;
xordev_fops.setlease = 0;
xordev_fops.fallocate = 0;
xordev_dst_fops.owner = &(__this_module);
xordev_dst_fops.llseek = 0;
xordev_dst_fops.read = &(xordev_read);
xordev_dst_fops.write = 0;
xordev_dst_fops.aio_read = 0;
xordev_dst_fops.aio_write = 0;
xordev_dst_fops.readdir = 0;
xordev_dst_fops.poll = 0;
xordev_dst_fops.unlocked_ioctl = 0;
xordev_dst_fops.compat_ioctl = 0;
xordev_dst_fops.mmap = 0;
xordev_dst_fops.open = &(xordev_open);
xordev_dst_fops.flush = 0;
xordev_dst_fops.release = &(xordev_release);
xordev_dst_fops.fsync = 0;
xordev_dst_fops.aio_fsync = 0;
xordev_dst_fops.fasync = 0;
xordev_dst_fops.lock = 0;
xordev_dst_fops.sendpage = 0;
xordev_dst_fops.get_unmapped_area = 0;
xordev_dst_fops.check_flags = 0;
xordev_dst_fops.flock = 0;
xordev_dst_fops.splice_write = 0;
xordev_dst_fops.splice_read = 0;
xordev_dst_fops.setlease = 0;
xordev_dst_fops.fallocate = 0;
xordev_input_fops.owner = &(__this_module);
xordev_input_fops.llseek = 0;
xordev_input_fops.read = 0;
xordev_input_fops.write = &(xordev_write);
xordev_input_fops.aio_read = 0;
xordev_input_fops.aio_write = 0;
xordev_input_fops.readdir = 0;
xordev_input_fops.poll = 0;
xordev_input_fops.unlocked_ioctl = 0;
xordev_input_fops.compat_ioctl = 0;
xordev_input_fops.mmap = 0;
xordev_input_fops.open = &(xordev_open);
xordev_input_fops.flush = 0;
xordev_input_fops.release = &(xordev_release);
xordev_input_fops.fsync = 0;
xordev_input_fops.aio_fsync = 0;
xordev_input_fops.fasync = 0;
xordev_input_fops.lock = 0;
xordev_input_fops.sendpage = 0;
xordev_input_fops.get_unmapped_area = 0;
xordev_input_fops.check_flags = 0;
xordev_input_fops.flock = 0;
xordev_input_fops.splice_write = 0;
xordev_input_fops.splice_read = 0;
xordev_input_fops.setlease = 0;
xordev_input_fops.fallocate = 0;
xordev_id.vendor = 6900;
xordev_id.device = 4349;
xordev_id.subvendor = ~(0);
xordev_id.subdevice = ~(0);
xordev_id.class = 0;
xordev_id.class_mask = 0;
xordev_id.driver_data = 0;
xordev_driver.node.next = 0;
xordev_driver.node.prev = 0;
xordev_driver.name = "xordev_pci";
xordev_driver.id_table = &(xordev_id);
xordev_driver.probe = &(xordev_probe);
xordev_driver.remove = &(xordev_remove);
xordev_driver.suspend = 0;
xordev_driver.suspend_late = 0;
xordev_driver.resume_early = 0;
xordev_driver.resume = 0;
xordev_driver.shutdown = 0;
xordev_driver.err_handler = 0;
xordev_driver.driver.name = 0;
xordev_driver.driver.bus = 0;
xordev_driver.driver.owner = 0;
xordev_driver.driver.mod_name = 0;
xordev_driver.driver.suppress_bind_attrs = 0;
xordev_driver.driver.of_match_table = 0;
xordev_driver.driver.probe = 0;
xordev_driver.driver.remove = 0;
xordev_driver.driver.shutdown = 0;
xordev_driver.driver.suspend = 0;
xordev_driver.driver.resume = 0;
xordev_driver.driver.groups = 0;
xordev_driver.driver.pm = 0;
xordev_driver.driver.p = 0;
xordev_driver.dynids.lock.__annonCompField17.rlock.raw_lock.slock = 0;
xordev_driver.dynids.lock.__annonCompField17.rlock.magic = 0;
xordev_driver.dynids.lock.__annonCompField17.rlock.owner_cpu = 0;
xordev_driver.dynids.lock.__annonCompField17.rlock.owner = 0;
xordev_driver.dynids.list.next = 0;
xordev_driver.dynids.list.prev = 0;
return 0; }
559 LDV_IN_INTERRUPT = 1;
568 -ldv_initialize_FOREACH();
584 -tmp___7 = xordev_init_module();
{
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
459 -devices[ i ].present = 0;
devices[ i ].number = i;
i = i + 1;
464 -xordev_major = register_chrdev(0 /* major */, "xordev_char" /* name */, &(xordev_fops) /* fops */);
{
2100 tmp = __register_chrdev(major, 0, 256, name, fops) { /* The function body is undefined. */ };
}
465 assert( xordev_major >= 0);
469 tmp___7 = __class_create(&(__this_module), "xordev_class", &(__key___7)) { /* The function body is undefined. */ };
469 xordev_class = tmp___7;
470 -tmp___8 = IS_ERR(xordev_class /* ptr */);
{
34 -tmp = __builtin_expect(__cil_tmp3 /* val */, 0 /* res */);
}
470 assert( tmp___8 == 0);
474 ret_value = __pci_register_driver(&(xordev_driver), &(__this_module), "xordev") { /* The function body is undefined. */ };
475 assert( ret_value >= 0);
}
584 assert( tmp___7 == 0);
589 tmp___9 = nondet_int() { /* The function body is undefined. */ };
589 assert( tmp___9 != 0);
592 tmp___8 = nondet_int() { /* The function body is undefined. */ };
594 assert( tmp___8 == 0);
597 LDV_IN_INTERRUPT = 2;
612 -xordev_irq_handler(var_xordev_irq_handler_12_p0 /* irq */, var_xordev_irq_handler_12_p1 /* device */);
{
311 -tmp___7 = pci_get_drvdata(device /* pdev */);
{
1318 tmp = dev_get_drvdata(&(pdev)->dev) { /* The function body is undefined. */ };
}
315 -ldv_spin_lock_irqsave_lock(&(xordev)->lock /* lock */, irq_flags /* flags */);
{
203 assert( ldv_lock_lock == 1);
return 0; }
316 tmp___8 = ioread32(*(xordev).iomem + REG_INTR_EN) { /* The function body is undefined. */ };
316 assert( tmp___8 == IRQ_ON);
320 tmp___9 = ioread32(*(xordev).iomem + REG_COUNT) { /* The function body is undefined. */ };
321 tmp___10 = ioread32(*(xordev).iomem + REG_INTR_COUNT) { /* The function body is undefined. */ };
321 intr_count = tmp___10;
322 assert( count <= intr_count);
328 -update_buffer(*(xordev).src1 /* chardev */);
{
107 buffer = *(chardev).buffer;
111 -ldv_spin_lock_irqsave_lock(&(buffer)->lock /* lock */, irq_flags /* flags */);
{
203 assert( ldv_lock_lock != 1);
{ } } } } }
|
Source code
1 #include <linux/spinlock.h>
2 void ldv_spin_lock_irqsave(spinlock_t *lock, unsigned long flags);
3 void ldv_spin_lock_nested(spinlock_t *lock, int subclass);
4 void ldv_spin_lock_nest_lock(spinlock_t *lock, void *map);
5 void ldv_spin_lock_irqsave_nested(spinlock_t *lock, int subclass);
6 int ldv_spin_trylock_irqsave(spinlock_t *lock, unsigned long flags);
7 void ldv_spin_lock(spinlock_t *lock);
8 void ldv_spin_lock_bh(spinlock_t *lock);
9 void ldv_spin_lock_irq(spinlock_t *lock);
10 int ldv_spin_trylock(spinlock_t *lock);
11 int ldv_spin_trylock_bh(spinlock_t *lock);
12 int ldv_spin_trylock_irq(spinlock_t *lock);
13 void ldv_spin_unlock(spinlock_t *lock);
14 void ldv_spin_unlock_bh(spinlock_t *lock);
15 void ldv_spin_unlock_irq(spinlock_t *lock);
16 void ldv_spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags);
17 void ldv_spin_unlock_wait(spinlock_t *lock);
18 int ldv_spin_is_locked(spinlock_t *lock);
19 int ldv_spin_is_contended(spinlock_t *lock);
20 int ldv_spin_can_lock(spinlock_t *lock);
21 int ldv_atomic_dec_and_lock(spinlock_t *lock, atomic_t *atomic);
22 #define ldv_atomic_dec_and_lock_macro(atomic,lock) ldv_atomic_dec_and_lock(lock,atomic)
23 // Michal Marschall
24 // Numer indeksu: 291693
25
26 #include "circ_dma_buf.h"
27 #include <linux/kernel.h>
28 #include <linux/pci.h>
29
30 const int CDB_SIZE = 4096;
31
32 struct circ_dma_buf *cdb_alloc( struct device *device) {
33 struct circ_dma_buf *buffer = kmalloc( sizeof( struct circ_dma_buf), GFP_KERNEL);
34 if(buffer == NULL)
35 return NULL;
36 buffer->data = dma_alloc_coherent(device, CDB_SIZE, &buffer->handle, GFP_KERNEL);
37 if(buffer->data == NULL) {
38 kfree(buffer); // avoid memory leak
39 return NULL;
40 }
41 buffer->start = 0;
42 buffer->fill = 0;
43 buffer->device = device;
44 spin_lock_init(&buffer->lock);
45 return buffer;
46 }
47
48 void cdb_free( struct circ_dma_buf *buffer) {
49 dma_free_coherent(buffer->device, CDB_SIZE, buffer->data, buffer->handle);
50 kfree(buffer);
51 }
52
53 int cdb_inc_begin( struct circ_dma_buf *buffer, int value) {
54 if(value > buffer->fill)
55 value = buffer->fill;
56 buffer->fill -= value;
57 buffer->start = (buffer->start + value) % CDB_SIZE;
58 return value;
59 }
60
61 int cdb_inc_end( struct circ_dma_buf *buffer, int value) {
62 if(value > CDB_SIZE - buffer->fill)
63 value = CDB_SIZE - buffer->fill;
64 buffer->fill += value;
65 return value;
66 }
67
68 int cdb_copy_from( struct circ_dma_buf *source, void *dest, int count, int (*copy_func)( void *, void *, int)) {
69 int first, copied1, copied2;
70 unsigned long irq_flags;
71
72 ldv_spin_lock_irqsave(&source->lock, irq_flags);
73 if(count > source->fill)
74 count = source->fill;
75
76 // copy first part:
77 if(count > CDB_SIZE - source->start)
78 first = CDB_SIZE - source->start;
79 else
80 first = count;
81 ldv_spin_unlock_irqrestore(&source->lock, irq_flags);
82 copied1 = copy_func(source->data + source->start, dest, first);
83 ldv_spin_lock_irqsave(&source->lock, irq_flags);
84 cdb_inc_begin(source, copied1);
85 ldv_spin_unlock_irqrestore(&source->lock, irq_flags);
86 if(first == count || copied1 < first) // copied everything or error
87 return copied1;
88
89 // copy second part:
90 copied2 = copy_func(source->data, dest + first, count - first);
91 cdb_inc_begin(source, copied2);
92
93 return copied1 + copied2;
94 }
95
96 int cdb_copy_to( void *source, struct circ_dma_buf *dest, int count, int (*copy_func)( void *, void *, int)) {
97 int end, first, copied1, copied2;
98 unsigned long irq_flags;
99
100 ldv_spin_lock_irqsave(&dest->lock, irq_flags);
101 if(count > CDB_SIZE - dest->fill)
102 count = CDB_SIZE - dest->fill;
103
104 // copy first part:
105 end = (dest->start + dest->fill) % CDB_SIZE;
106 if(end + count > CDB_SIZE)
107 first = CDB_SIZE - end;
108 else
109 first = count;
110 ldv_spin_unlock_irqrestore(&dest->lock, irq_flags);
111 copied1 = copy_func(source, dest->data + end, first);
112 ldv_spin_lock_irqsave(&dest->lock, irq_flags);
113 cdb_inc_end(dest, copied1);
114 ldv_spin_unlock_irqrestore(&dest->lock, irq_flags);
115 if(first == count || copied1 < first) // copied everything or error
116 return copied1;
117
118 // copy second part:
119 copied2 = copy_func(source + first, dest->data, count - first);
120 cdb_inc_end(dest, copied2);
121
122 return copied1 + copied2;
123 }
124 /* LDV_COMMENT_BEGIN_MODEL */
125 #include <linux/kernel.h>
126 #include <linux/spinlock.h>
127
128 /*
129 CONFIG_DEBUG_SPINLOCK should be true
130 make menuconfig
131 Kernel hacking->Kernel debugging->Spinlock and rw-lock debugging: basic checks
132 */
133
134 /* the function works only without aspectator */
135 long __builtin_expect( long val, long res) {
136 return val;
137 }
138
139 #include "engine-blast.h"
140
141
142 /* Need this because rerouter is buggy!.. */
143 extern int ldv_lock_TEMPLATE;
144 /* Now the actual variable goes... */
145 int ldv_lock_TEMPLATE = 1;
146
147 #define __ldv_spin_lock() \
148 do {\
149 /* LDV_COMMENT_ASSERT Lock should be in a free state*/\
150 ldv_assert(ldv_lock_TEMPLATE== 1);\
151 /* LDV_COMMENT_CHANGE_STATE Goto locked state*/\
152 ldv_lock_TEMPLATE= 2;\
153 } while( 0)
154
155 #define __ldv_spin_unlock() \
156 do {\
157 /* LDV_COMMENT_ASSERT Lock should be in a locked state*/\
158 ldv_assert(ldv_lock_TEMPLATE!= 1);\
159 /* LDV_COMMENT_CHANGE_STATE Goto free state*/\
160 ldv_lock_TEMPLATE= 1;\
161 } while( 0)
162
163 #define __ldv_spin_trylock() \
164 do {\
165 int is_lock_held_by_another_thread;\
166 /* LDV_COMMENT_OTHER Construct an arbitrary flag*/\
167 is_lock_held_by_another_thread = ldv_undef_int();\
168 /* LDV_COMMENT_OTHER If lock is free choose arbitrary action*/\
169 if(ldv_lock_TEMPLATE== 1 && is_lock_held_by_another_thread)\
170 {\
171 /* LDV_COMMENT_CHANGE_STATE Goto locked state*/\
172 ldv_lock_TEMPLATE= 2;\
173 /* LDV_COMMENT_RETURN The lock is acquired*/\
174 return 1;\
175 }\
176 else\
177 {\
178 /* LDV_COMMENT_RETURN The lock is not acquired*/\
179 return 0;\
180 }\
181 } while( 0)
182
183 #define __ldv_spin_checklocked(free,busy) \
184 do {\
185 int is_lock_held_by_another_thread;\
186 /* LDV_COMMENT_OTHER Construct an arbitrary flag*/\
187 is_lock_held_by_another_thread = ldv_undef_int();\
188 /* LDV_COMMENT_OTHER If lock is free choose arbitrary action*/\
189 if(ldv_lock_TEMPLATE== 1 && is_lock_held_by_another_thread)\
190 {\
191 /* LDV_COMMENT_RETURN The lock is free*/\
192 return free;\
193 }\
194 else\
195 {\
196 /* LDV_COMMENT_RETURN The lock is not free*/\
197 return busy;\
198 }\
199 } while( 0)
200
201 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_irqsave(?!_nested)') Acquires the lock and checks for double spin lock*/
202 void ldv_spin_lock_irqsave_TEMPLATE(spinlock_t *lock, unsigned long flags) {
203 __ldv_spin_lock();
204 }
205
206 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_nested') Acquires the lock and checks for double spin lock*/
207 void ldv_spin_lock_nested_TEMPLATE(spinlock_t *lock, int subclass) {
208 __ldv_spin_lock();
209 }
210
211 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_nest_lock') Acquires the lock and checks for double spin lock*/
212 void ldv_spin_lock_nest_lock_TEMPLATE(spinlock_t *lock, void *map) {
213 __ldv_spin_lock();
214 }
215
216 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_irqsave_nested') Acquires the lock and checks for double spin lock*/
217 void ldv_spin_lock_irqsave_nested_TEMPLATE(spinlock_t *lock, unsigned long flags, int subclass) {
218 __ldv_spin_lock();
219 }
220
221 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock_irqsave(?!_nested)') Tryies to acquire the lock and returns one if successful*/
222 int ldv_spin_trylock_irqsave_TEMPLATE(spinlock_t *lock, unsigned long flags) {
223 __ldv_spin_trylock();
224 }
225
226 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock(?!_bh|_irq|_irqsave|_nested|_irqsave_nested|_nest_lock)') Acquires the lock and checks for double spin lock*/
227 void ldv_spin_lock_TEMPLATE(spinlock_t *lock) {
228 __ldv_spin_lock();
229 }
230
231 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_bh') Acquires the lock and checks for double spin lock*/
232 void ldv_spin_lock_bh_TEMPLATE(spinlock_t *lock) {
233 __ldv_spin_lock();
234 }
235
236 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_lock_irq(?!save|save_nested)') Acquires the lock and checks for double spin lock*/
237 void ldv_spin_lock_irq_TEMPLATE(spinlock_t *lock) {
238 __ldv_spin_lock();
239 }
240
241 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock(?!_bh|_irq|_irqsave|_irqsave_nested)') Tryies to acquire the lock and returns one if successful*/
242 int ldv_spin_trylock_TEMPLATE(spinlock_t *lock) {
243 __ldv_spin_trylock();
244 }
245
246 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock_bh') Tryies to acquire the lock and returns one if successful*/
247 int ldv_spin_trylock_bh_TEMPLATE(spinlock_t *lock) {
248 __ldv_spin_trylock();
249 }
250
251 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_trylock_irq(?!save|save_nested)') Tryies to acquire the lock and returns one if successful*/
252 int ldv_spin_trylock_irq_TEMPLATE(spinlock_t *lock) {
253 __ldv_spin_trylock();
254 }
255
256 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock(?!_bh|_irq|_irqrestore)') Releases the lock and checks that lock was acquired before*/
257 void ldv_spin_unlock_TEMPLATE(spinlock_t *lock) {
258 __ldv_spin_unlock();
259 }
260
261 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_bh') Releases the lock and checks that lock was acquired before*/
262 void ldv_spin_unlock_bh_TEMPLATE(spinlock_t *lock) {
263 __ldv_spin_unlock();
264 }
265
266 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_irq(?!restore)') Releases the lock and checks that lock was acquired before*/
267 void ldv_spin_unlock_irq_TEMPLATE(spinlock_t *lock) {
268 __ldv_spin_unlock();
269 }
270
271 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_irqrestore') Releases the lock and checks that lock was acquired before*/
272 void ldv_spin_unlock_irqrestore_TEMPLATE(spinlock_t *lock, unsigned long flags) {
273 __ldv_spin_unlock();
274 }
275
276 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_unlock_wait') If the some process is holding the lock then it waits until it will be released*/
277 void ldv_spin_unlock_wait_TEMPLATE(spinlock_t *lock) {
278 /* LDV_COMMENT_ASSERT The spinlock must not be locked by the current process*/
279 ldv_assert(ldv_lock_TEMPLATE== 1);
280 }
281
282 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_locked') Checks whether the lock is free or not*/
283 int ldv_spin_is_locked_TEMPLATE(spinlock_t *lock) {
284 __ldv_spin_checklocked( 0, 1);
285 }
286
287 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_is_contended')*/
288 int ldv_spin_is_contended_TEMPLATE(spinlock_t *lock) {
289 int is_lock_contended;
290 /* LDV_COMMENT_OTHER Construct an arbitrary flag*/
291 is_lock_contended = ldv_undef_int();
292 /* LDV_COMMENT_OTHER Choose arbitrary action*/
293 if(is_lock_contended)
294 {
295 /* LDV_COMMENT_RETURN The lock is contended*/
296 return 1;
297 }
298 else
299 {
300 /* LDV_COMMENT_RETURN The lock is not contended*/
301 return 0;
302 }
303 }
304
305 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_spin_can_lock') Checks whether the lock is free or not*/
306 int ldv_spin_can_lock_TEMPLATE(spinlock_t *lock) {
307 __ldv_spin_checklocked( 1, 0);
308 }
309
310 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_atomic_dec_and_lock') Locks on reaching reference count zero*/
311 int ldv_atomic_dec_and_lock_TEMPLATE(spinlock_t *lock, atomic_t *atomic) {
312 int is_atomic_counter_is_one;
313 /* LDV_COMMENT_OTHER Construct an arbitrary flag*/
314 is_atomic_counter_is_one = ldv_undef_int();
315 /* LDV_COMMENT_OTHER Choose arbitrary action*/
316 if(is_atomic_counter_is_one) {
317 /* LDV_COMMENT_RETURN Set counter to zero*/
318 atomic_dec(atomic);
319 /* LDV_COMMENT_RETURN Acquire the lock and return true*/
320 __ldv_spin_lock();
321 return 1;
322 } else {
323 /* LDV_COMMENT_RETURN Return false*/
324 return 0;
325 }
326 }
327
328 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_check_final_state') Check that the spinlock is unlocked at the end*/
329 void ldv_check_final_state_TEMPLATE( void)
330 {
331 /* LDV_COMMENT_ASSERT The spinlock must be unlocked at the end*/
332 ldv_assert(ldv_lock_TEMPLATE == 1);
333 }
334
335 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_initialize') Initialize spinlock variables*/
336 void ldv_initialize_TEMPLATE( void)
337 {
338 /* LDV_COMMENT_ASSERT Initialize spinlock with initial model value*/
339 ldv_lock_TEMPLATE = 1;
340 }
1 #include <linux/spinlock.h>
2 void ldv_spin_lock_irqsave(spinlock_t *lock, unsigned long flags);
3 void ldv_spin_lock_nested(spinlock_t *lock, int subclass);
4 void ldv_spin_lock_nest_lock(spinlock_t *lock, void *map);
5 void ldv_spin_lock_irqsave_nested(spinlock_t *lock, int subclass);
6 int ldv_spin_trylock_irqsave(spinlock_t *lock, unsigned long flags);
7 void ldv_spin_lock(spinlock_t *lock);
8 void ldv_spin_lock_bh(spinlock_t *lock);
9 void ldv_spin_lock_irq(spinlock_t *lock);
10 int ldv_spin_trylock(spinlock_t *lock);
11 int ldv_spin_trylock_bh(spinlock_t *lock);
12 int ldv_spin_trylock_irq(spinlock_t *lock);
13 void ldv_spin_unlock(spinlock_t *lock);
14 void ldv_spin_unlock_bh(spinlock_t *lock);
15 void ldv_spin_unlock_irq(spinlock_t *lock);
16 void ldv_spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags);
17 void ldv_spin_unlock_wait(spinlock_t *lock);
18 int ldv_spin_is_locked(spinlock_t *lock);
19 int ldv_spin_is_contended(spinlock_t *lock);
20 int ldv_spin_can_lock(spinlock_t *lock);
21 int ldv_atomic_dec_and_lock(spinlock_t *lock, atomic_t *atomic);
22 #define ldv_atomic_dec_and_lock_macro(atomic,lock) ldv_atomic_dec_and_lock(lock,atomic)
23
24 // Michal Marschall
25 // Numer indeksu: 291693
26
27 #include <linux/module.h>
28 #include <linux/kernel.h>
29 #include <linux/pci.h>
30 #include <linux/interrupt.h>
31 #include <linux/fs.h>
32 #include <linux/errno.h>
33 #include <linux/spinlock.h>
34 #include <linux/wait.h>
35 #include <linux/sched.h>
36 #include "circ_dma_buf.h"
37
38 MODULE_AUTHOR( "Michal Marschall");
39 MODULE_LICENSE( "GPL");
40
41 #define MAX_DEVICES 64
42
43 // types of character devices:
44 enum chardev_type {TYPE_SRC1, TYPE_SRC2, TYPE_DST};
45 const int TYPES = 3; // total number of types
46
47 #define XORDEV_MINOR(number, type) ((number) * TYPES + (type))
48 #define GET_NUMBER(minor) ((minor) / TYPES)
49 #define GET_TYPE(minor) ((minor) % TYPES)
50
51 // names used in various places:
52 #define NAME_CLASS "xordev_class"
53 #define NAME_PCI "xordev_pci"
54 #define NAME_CHAR "xordev_char"
55
56 // ids:
57 #define ID_VENDOR 0x1af4
58 #define ID_DEVICE 0x10fd
59
60 // xordev PCI registers:
61 static const int REG_INTR_EN = 0x00;
62 static const int REG_SRC1 = 0x04;
63 static const int REG_SRC2 = 0x08;
64 static const int REG_DST = 0x0c;
65 static const int REG_COUNT = 0x10;
66 static const int REG_INTR_COUNT = 0x14;
67
68 // values of REG_INTR_EN:
69 static const int IRQ_OFF = 0;
70 static const int IRQ_ON = 1;
71
72 struct xordev_device {
73 int present;
74 int number; // from 0 to MAX_DEVICES - 1
75 struct pci_dev *pcidev;
76 void __iomem *iomem;
77 int sent; // bytes sent to xor
78 wait_queue_head_t wait_queue;
79 spinlock_t lock;
80 struct char_device *src1;
81 struct char_device *src2;
82 struct char_device *dst;
83 };
84
85 struct char_device {
86 dev_t node;
87 struct xordev_device *xordev;
88 struct circ_dma_buf *buffer;
89 struct mutex mutex;
90 // for src* devices only:
91 int end;
92 };
93
94 static int xordev_major;
95 static struct class *xordev_class;
96 struct xordev_device devices[MAX_DEVICES];
97
98 static int cdb_copy_to_user( void *source, void *dest, int count) { // for argument to cdb_copy_*
99 return count - copy_to_user(dest, source, count);
100 }
101
102 static int cdb_copy_from_user( void *source, void *dest, int count) { // for argument to cdb_copy_*
103 return count - copy_from_user(dest, source, count);
104 }
105
106 static int update_buffer( struct char_device *chardev) {
107 struct circ_dma_buf *buffer = chardev->buffer;
108 unsigned long irq_flags;
109 int position, result;
110
111 ldv_spin_lock_irqsave(&buffer->lock, irq_flags);
112 position = chardev->xordev->sent - ioread32(chardev->xordev->iomem + REG_COUNT);
113 switch(GET_TYPE(chardev->node)) {
114 case TYPE_SRC1:
115 case TYPE_SRC2:
116 result = position - buffer->start;
117 if(result < 0)
118 result += CDB_SIZE;
119 if(cdb_inc_begin(buffer, result) != result) // this should never happen
120 result = -EIO;
121 break;
122 case TYPE_DST:
123 result = position - (buffer->start + buffer->fill);
124 if(result < 0)
125 result += CDB_SIZE;
126 if(cdb_inc_end(buffer, result) != result) // this should never happen
127 result = -EIO;
128 break;
129 default:
130 result = -EINVAL;
131 }
132 ldv_spin_unlock_irqrestore(&buffer->lock, irq_flags);
133
134 return result;
135 }
136
137 static void start_xoring( struct xordev_device *xordev) {
138 unsigned long irq_flags1, irq_flags2;
139 int ready, space, send_now;
140
141 ldv_spin_lock_irqsave(&xordev->lock, irq_flags1);
142 ldv_spin_lock_irqsave(&xordev->dst->buffer->lock, irq_flags2);
143 ready = min_t( int, xordev->src1->end, xordev->src2->end) - xordev->sent;
144 space = xordev->dst->buffer->start - xordev->sent;
145 if(space < 0 || (space == 0 && xordev->dst->buffer->fill == 0))
146 space = CDB_SIZE - xordev->sent;
147 send_now = min_t( int, ready, space);
148 if(send_now > 0) {
149 xordev->sent += send_now;
150 iowrite32(send_now, xordev->iomem + REG_COUNT);
151 iowrite32(IRQ_ON, xordev->iomem + REG_INTR_EN);
152 wake_up_interruptible(&xordev->wait_queue);
153 }
154 ldv_spin_unlock_irqrestore(&xordev->dst->buffer->lock, irq_flags2);
155 ldv_spin_unlock_irqrestore(&xordev->lock, irq_flags1);
156 }
157
158 static void irq_one_byte( struct xordev_device *xordev) {
159 int count = ioread32(xordev->iomem + REG_COUNT);
160 if(count > 0)
161 iowrite32(count - 1, xordev->iomem + REG_INTR_COUNT);
162 }
163
164 static ssize_t xordev_read( struct file *file, char *buffer, size_t count, loff_t *file_pos) {
165 struct char_device *chardev = ( struct char_device*)file->private_data;
166 struct xordev_device *xordev = chardev->xordev;
167 ssize_t result;
168 int ret_value;
169
170 ret_value = update_buffer(chardev);
171 if(ret_value < 0)
172 return ret_value;
173 mutex_lock(&chardev->mutex);
174 while(chardev->buffer->fill == 0) {
175 irq_one_byte(xordev);
176 mutex_unlock(&chardev->mutex);
177 if(wait_event_interruptible(xordev->wait_queue, chardev->buffer->fill > 0) < 0)
178 return -ERESTARTSYS;
179 mutex_lock(&chardev->mutex);
180 }
181 result = cdb_copy_from(chardev->buffer, ( void *)buffer, count, cdb_copy_to_user);
182 if(result == 0) { // this should never happen
183 mutex_unlock(&chardev->mutex);
184 return -EIO;
185 }
186 *file_pos = file->f_pos + result;
187 mutex_unlock(&chardev->mutex);
188 start_xoring(xordev);
189
190 return result;
191 }
192
193 static ssize_t xordev_write( struct file *file, const char *buffer, size_t count, loff_t *file_pos) {
194 struct char_device *chardev = ( struct char_device*)file->private_data;
195 struct xordev_device *xordev = chardev->xordev;
196 ssize_t result;
197 int ret_value;
198 unsigned long irq_flags;
199
200 ret_value = update_buffer(chardev);
201 if(ret_value < 0)
202 return ret_value;
203 mutex_lock(&chardev->mutex);
204 while(chardev->buffer->fill == CDB_SIZE) {
205 irq_one_byte(xordev);
206 mutex_unlock(&chardev->mutex);
207 if(wait_event_interruptible(xordev->wait_queue, chardev->buffer->fill < CDB_SIZE) < 0)
208 return -ERESTARTSYS;
209 mutex_lock(&chardev->mutex);
210 }
211 result = cdb_copy_to(( void *)buffer, chardev->buffer, count, cdb_copy_from_user);
212 if(result == 0) { // this should never happen
213 mutex_unlock(&chardev->mutex);
214 return -ENOSPC;
215 }
216 *file_pos = file->f_pos + result;
217 mutex_unlock(&chardev->mutex);
218 ldv_spin_lock_irqsave(&xordev->lock, irq_flags);
219 chardev->end += result;
220 ldv_spin_unlock_irqrestore(&xordev->lock, irq_flags);
221 start_xoring(xordev);
222
223 return result;
224 }
225
226 static struct file_operations xordev_dst_fops;
227 static struct file_operations xordev_input_fops; // definitions below not visible
228
229 static int xordev_open( struct inode *inode, struct file *file) {
230 int minor = MINOR(inode->i_rdev);
231 struct xordev_device *xordev = &devices[GET_NUMBER(minor)];
232 struct char_device *chardev;
233 struct file_operations *file_ops;
234
235 switch(GET_TYPE(minor)) {
236 case TYPE_SRC1:
237 file_ops = &xordev_input_fops;
238 chardev = xordev->src1;
239 break;
240 case TYPE_SRC2:
241 file_ops = &xordev_input_fops;
242 chardev = xordev->src2;
243 break;
244 case TYPE_DST:
245 file_ops = &xordev_dst_fops;
246 chardev = xordev->dst;
247 break;
248 default: // this should never happen
249 return -EINVAL;
250 }
251
252 file->f_op = file_ops;
253 file->private_data = ( void *)chardev;
254
255 return 0;
256 }
257
258 static int xordev_release( struct inode *inode, struct file *file) {
259 return 0;
260 }
261
262 static struct file_operations xordev_fops = {
263 owner: THIS_MODULE,
264 open: xordev_open,
265 release: xordev_release
266 };
267
268 static struct file_operations xordev_dst_fops = {
269 owner: THIS_MODULE,
270 read: xordev_read,
271 open: xordev_open,
272 release: xordev_release
273 };
274
275 static struct file_operations xordev_input_fops = {
276 owner: THIS_MODULE,
277 write: xordev_write,
278 open: xordev_open,
279 release: xordev_release
280 };
281
282 static struct char_device *chardev_alloc( struct xordev_device *xordev, int type) {
283 struct char_device *chardev = kmalloc( sizeof( struct char_device), GFP_KERNEL);
284 if(chardev == NULL)
285 return NULL;
286 chardev->node = MKDEV(xordev_major, XORDEV_MINOR(xordev->number, type));
287 chardev->xordev = xordev;
288 chardev->buffer = cdb_alloc(&xordev->pcidev->dev);
289 if(chardev->buffer == NULL) {
290 kfree(chardev); // avoid memory leak
291 return NULL;
292 }
293 mutex_init(&chardev->mutex);
294 chardev->end = 0;
295 return chardev;
296 }
297
298 static void chardev_free( struct char_device *chardev) {
299 cdb_free(chardev->buffer);
300 kfree(chardev);
301 }
302
303 static void init_reg_values( struct xordev_device *xordev) {
304 iowrite32( 0, xordev->iomem + REG_INTR_COUNT);
305 iowrite32(xordev->src1->buffer->handle, xordev->iomem + REG_SRC1);
306 iowrite32(xordev->src2->buffer->handle, xordev->iomem + REG_SRC2);
307 iowrite32(xordev->dst->buffer->handle, xordev->iomem + REG_DST);
308 }
309
310 static irqreturn_t xordev_irq_handler( int irq, void *device) {
311 struct xordev_device *xordev = ( struct xordev_device *)pci_get_drvdata(( struct pci_dev *)device);
312 int count, intr_count;
313 unsigned long irq_flags;
314
315 ldv_spin_lock_irqsave(&xordev->lock, irq_flags);
316 if(ioread32(xordev->iomem + REG_INTR_EN) != IRQ_ON) {
317 ldv_spin_unlock_irqrestore(&xordev->lock, irq_flags);
318 return IRQ_NONE;
319 }
320 count = ioread32(xordev->iomem + REG_COUNT);
321 intr_count = ioread32(xordev->iomem + REG_INTR_COUNT);
322 if(count > intr_count) {
323 ldv_spin_unlock_irqrestore(&xordev->lock, irq_flags);
324 return IRQ_NONE;
325 }
326
327 // handle interrupt:
328 update_buffer(xordev->src1);
329 update_buffer(xordev->src2);
330 update_buffer(xordev->dst);
331 if(count == 0) {
332 iowrite32(IRQ_OFF, xordev->iomem + REG_INTR_EN);
333 if(xordev->sent == CDB_SIZE) {
334 xordev->sent -= CDB_SIZE;
335 xordev->src1->end -= CDB_SIZE;
336 xordev->src2->end -= CDB_SIZE;
337 init_reg_values(xordev);
338 }
339 }
340 if(intr_count != 0)
341 iowrite32( 0, xordev->iomem + REG_INTR_COUNT);
342 wake_up_interruptible(&xordev->wait_queue);
343 ldv_spin_unlock_irqrestore(&xordev->lock, irq_flags);
344 start_xoring(xordev);
345
346 return IRQ_HANDLED;
347 }
348
349 static int xordev_probe( struct pci_dev *pcidev, const struct pci_device_id *id) {
350 int i, ret_value;
351 struct xordev_device *xordev;
352 struct device *node;
353
354 // look for first not used device:
355 for(i = 0; i < MAX_DEVICES; ++i)
356 if(!devices[i].present)
357 break;
358 if(i == MAX_DEVICES)
359 return -ENOMEM;
360 xordev = &devices[i];
361
362 // enable device, map region:
363 xordev->pcidev = pcidev;
364 ret_value = pci_enable_device(pcidev);
365 if(ret_value < 0)
366 return ret_value;
367 ret_value = pci_request_regions(pcidev, NAME_PCI);
368 if(ret_value < 0)
369 return ret_value;
370 xordev->iomem = pci_iomap(pcidev, 0, 0);
371 if(xordev->iomem == NULL)
372 return -EIO;
373
374 // enable DMA:
375 pci_set_master(pcidev);
376 ret_value = pci_set_dma_mask(pcidev, DMA_BIT_MASK( 32));
377 if(ret_value < 0)
378 return ret_value;
379 ret_value = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK( 32));
380 if(ret_value < 0)
381 return ret_value;
382
383 // allocate other resources:
384 xordev->src1 = chardev_alloc(xordev, TYPE_SRC1);
385 if(xordev->src1 == NULL)
386 return -ENOMEM;
387 xordev->src2 = chardev_alloc(xordev, TYPE_SRC2);
388 if(xordev->src2 == NULL)
389 return -ENOMEM;
390 xordev->dst = chardev_alloc(xordev, TYPE_DST);
391 if(xordev->dst == NULL)
392 return -ENOMEM;
393 init_waitqueue_head(&xordev->wait_queue);
394
395 // set initial values:
396 spin_lock_init(&xordev->lock);
397 xordev->sent = 0;
398 pci_set_drvdata(pcidev, ( void *)xordev);
399 init_reg_values(xordev);
400 iowrite32(IRQ_OFF, xordev->iomem + REG_INTR_EN);
401
402 // request interrupts:
403 ret_value = request_irq(pcidev->irq, xordev_irq_handler, IRQF_SHARED, NAME_PCI, ( void *)pcidev);
404 if(ret_value < 0)
405 return ret_value;
406
407 // create nodes in /dev:
408 node = device_create(xordev_class, NULL, xordev->src1->node, NULL, "xor%ds1", xordev->number);
409 if(IS_ERR(node))
410 return -EIO;
411 node = device_create(xordev_class, NULL, xordev->src2->node, NULL, "xor%ds2", xordev->number);
412 if(IS_ERR(node))
413 return -EIO;
414 node = device_create(xordev_class, NULL, xordev->dst->node, NULL, "xor%dd", xordev->number);
415 if(IS_ERR(node))
416 return -EIO;
417
418 xordev->present = 1;
419
420 return 0;
421 }
422
423 static void xordev_remove( struct pci_dev *pcidev) {
424 struct xordev_device *xordev = ( struct xordev_device *)pci_get_drvdata(pcidev);
425
426 xordev->present = false;
427
428 // remove nodes from /dev:
429 device_destroy(xordev_class, xordev->src1->node);
430 device_destroy(xordev_class, xordev->src2->node);
431 device_destroy(xordev_class, xordev->dst->node);
432
433 // release resources, disable device:
434 free_irq(pcidev->irq, ( void *)pcidev);
435 pci_iounmap(pcidev, xordev->iomem);
436 pci_release_regions(pcidev);
437 pci_disable_device(pcidev);
438 chardev_free(xordev->src1);
439 chardev_free(xordev->src2);
440 chardev_free(xordev->dst);
441 }
442
443 static struct pci_device_id xordev_id = {
444 PCI_DEVICE(ID_VENDOR, ID_DEVICE)
445 };
446
447 static struct pci_driver xordev_driver = {
448 name: NAME_PCI,
449 id_table: &xordev_id,
450 probe: xordev_probe,
451 remove: xordev_remove,
452 };
453
454 static int xordev_init_module( void) {
455 int i, ret_value;
456
457 // initialize array devices:
458 for(i = 0; i < MAX_DEVICES; ++i) {
459 devices[i].present = 0;
460 devices[i].number = i;
461 }
462
463 // register devices:
464 xordev_major = register_chrdev( 0, NAME_CHAR, &xordev_fops);
465 if(xordev_major < 0) {
466 ret_value = xordev_major;
467 goto err_register_chrdev;
468 }
469 xordev_class = class_create(THIS_MODULE, NAME_CLASS);
470 if(IS_ERR(xordev_class)) {
471 ret_value = -EIO;
472 goto err_class_create;
473 }
474 ret_value = pci_register_driver(&xordev_driver);
475 if(ret_value < 0)
476 goto err_register_driver;
477
478 return 0;
479
480 err_register_driver:
481 pci_unregister_driver(&xordev_driver);
482 err_class_create:
483 class_destroy(xordev_class);
484 err_register_chrdev:
485 unregister_chrdev(xordev_major, NAME_CHAR);
486 return ret_value;
487 }
488
489 static void xordev_cleanup_module( void) {
490 pci_unregister_driver(&xordev_driver);
491 class_destroy(xordev_class);
492 unregister_chrdev(xordev_major, NAME_CHAR);
493 }
494
495 module_init(xordev_init_module);
496 module_exit(xordev_cleanup_module);
497
498
499
500
501
502 /* LDV_COMMENT_BEGIN_MAIN */
503 #ifdef LDV_MAIN1_sequence_infinite_withcheck_stateful
504
505 /*###########################################################################*/
506
507 /*############## Driver Environment Generator 0.2 output ####################*/
508
509 /*###########################################################################*/
510
511
512
513 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
514 void ldv_check_final_state( void);
515
516 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
517 void ldv_check_return_value( int res);
518
519 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
520 void ldv_initialize( void);
521
522 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
523 int nondet_int( void);
524
525 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
526 int LDV_IN_INTERRUPT;
527
528 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
529 void ldv_main1_sequence_infinite_withcheck_stateful( void) {
530
531
532
533 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
534 /*============================= VARIABLE DECLARATION PART =============================*/
535 /** CALLBACK SECTION request_irq **/
536 /* content: static irqreturn_t xordev_irq_handler(int irq, void *device)*/
537 /* LDV_COMMENT_BEGIN_PREP */
538 #define MAX_DEVICES 64
539 #define XORDEV_MINOR(number, type) ((number) * TYPES + (type))
540 #define GET_NUMBER(minor) ((minor) / TYPES)
541 #define GET_TYPE(minor) ((minor) % TYPES)
542 #define NAME_CLASS "xordev_class"
543 #define NAME_PCI "xordev_pci"
544 #define NAME_CHAR "xordev_char"
545 #define ID_VENDOR 0x1af4
546 #define ID_DEVICE 0x10fd
547 /* LDV_COMMENT_END_PREP */
548 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "xordev_irq_handler" */
549 int var_xordev_irq_handler_12_p0;
550 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "xordev_irq_handler" */
551 void * var_xordev_irq_handler_12_p1;
552
553
554
555
556 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
557 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
558 /*============================= VARIABLE INITIALIZING PART =============================*/
559 LDV_IN_INTERRUPT= 1;
560
561
562
563
564 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
565 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
566 /*============================= FUNCTION CALL SECTION =============================*/
567 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
568 ldv_initialize();
569
570 /** INIT: init_type: ST_MODULE_INIT **/
571 /* content: static int xordev_init_module(void)*/
572 /* LDV_COMMENT_BEGIN_PREP */
573 #define MAX_DEVICES 64
574 #define XORDEV_MINOR(number, type) ((number) * TYPES + (type))
575 #define GET_NUMBER(minor) ((minor) / TYPES)
576 #define GET_TYPE(minor) ((minor) % TYPES)
577 #define NAME_CLASS "xordev_class"
578 #define NAME_PCI "xordev_pci"
579 #define NAME_CHAR "xordev_char"
580 #define ID_VENDOR 0x1af4
581 #define ID_DEVICE 0x10fd
582 /* LDV_COMMENT_END_PREP */
583 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver init function after driver loading to kernel. This function declared as "MODULE_INIT(function name)". */
584 if(xordev_init_module())
585 goto ldv_final;
586
587
588
589 while( nondet_int()
590 ) {
591
592 switch(nondet_int()) {
593
594 case 0: {
595
596 /** CALLBACK SECTION request_irq **/
597 LDV_IN_INTERRUPT= 2;
598
599 /* content: static irqreturn_t xordev_irq_handler(int irq, void *device)*/
600 /* LDV_COMMENT_BEGIN_PREP */
601 #define MAX_DEVICES 64
602 #define XORDEV_MINOR(number, type) ((number) * TYPES + (type))
603 #define GET_NUMBER(minor) ((minor) / TYPES)
604 #define GET_TYPE(minor) ((minor) % TYPES)
605 #define NAME_CLASS "xordev_class"
606 #define NAME_PCI "xordev_pci"
607 #define NAME_CHAR "xordev_char"
608 #define ID_VENDOR 0x1af4
609 #define ID_DEVICE 0x10fd
610 /* LDV_COMMENT_END_PREP */
611 /* LDV_COMMENT_FUNCTION_CALL */
612 xordev_irq_handler( var_xordev_irq_handler_12_p0, var_xordev_irq_handler_12_p1);
613 LDV_IN_INTERRUPT= 1;
614
615
616
617 }
618
619 break;
620 default: break;
621
622 }
623
624 }
625
626 ldv_module_exit:
627
628 /** INIT: init_type: ST_MODULE_EXIT **/
629 /* content: static void xordev_cleanup_module(void)*/
630 /* LDV_COMMENT_BEGIN_PREP */
631 #define MAX_DEVICES 64
632 #define XORDEV_MINOR(number, type) ((number) * TYPES + (type))
633 #define GET_NUMBER(minor) ((minor) / TYPES)
634 #define GET_TYPE(minor) ((minor) % TYPES)
635 #define NAME_CLASS "xordev_class"
636 #define NAME_PCI "xordev_pci"
637 #define NAME_CHAR "xordev_char"
638 #define ID_VENDOR 0x1af4
639 #define ID_DEVICE 0x10fd
640 /* LDV_COMMENT_END_PREP */
641 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver release function before driver will be uploaded from kernel. This function declared as "MODULE_EXIT(function name)". */
642 xordev_cleanup_module();
643
644 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
645 ldv_final: ldv_check_final_state();
646
647 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
648 return;
649
650 }
651 #endif
652
653 /* LDV_COMMENT_END_MAIN */
1 #ifndef _LINUX_ERR_H
2 #define _LINUX_ERR_H
3
4 #include <linux/compiler.h>
5
6 #include <asm/errno.h>
7
8 /*
9 * Kernel pointers have redundant information, so we can use a
10 * scheme where we can return either an error code or a dentry
11 * pointer with the same return value.
12 *
13 * This should be a per-architecture thing, to allow different
14 * error and pointer decisions.
15 */
16 #define MAX_ERRNO 4095
17
18 #ifndef __ASSEMBLY__
19
20 #define IS_ERR_VALUE(x) unlikely((x) >= ( unsigned long)-MAX_ERRNO)
21
22 static inline void * __must_check ERR_PTR( long error)
23 {
24 return ( void *) error;
25 }
26
27 static inline long __must_check PTR_ERR( const void *ptr)
28 {
29 return ( long) ptr;
30 }
31
32 static inline long __must_check IS_ERR( const void *ptr)
33 {
34 return IS_ERR_VALUE(( unsigned long)ptr);
35 }
36
37 static inline long __must_check IS_ERR_OR_NULL( const void *ptr)
38 {
39 return !ptr || IS_ERR_VALUE(( unsigned long)ptr);
40 }
41
42 /**
43 * ERR_CAST - Explicitly cast an error-valued pointer to another pointer type
44 * @ptr: The pointer to cast.
45 *
46 * Explicitly cast an error-valued pointer to another pointer type in such a
47 * way as to make it clear that's what's going on.
48 */
49 static inline void * __must_check ERR_CAST( const void *ptr)
50 {
51 /* cast away the const */
52 return ( void *) ptr;
53 }
54
55 static inline int __must_check PTR_RET( const void *ptr)
56 {
57 if (IS_ERR(ptr))
58 return PTR_ERR(ptr);
59 else
60 return 0;
61 }
62
63 #endif
64
65 #endif /* _LINUX_ERR_H */
1 #ifndef _LINUX_FS_H
2 #define _LINUX_FS_H
3
4 /*
5 * This file has definitions for some important file table
6 * structures etc.
7 */
8
9 #include <linux/limits.h>
10 #include <linux/ioctl.h>
11 #include <linux/blk_types.h>
12 #include <linux/types.h>
13
14 /*
15 * It's silly to have NR_OPEN bigger than NR_FILE, but you can change
16 * the file limit at runtime and only root can increase the per-process
17 * nr_file rlimit, so it's safe to set up a ridiculously high absolute
18 * upper limit on files-per-process.
19 *
20 * Some programs (notably those using select()) may have to be
21 * recompiled to take full advantage of the new limits..
22 */
23
24 /* Fixed constants first: */
25 #undef NR_OPEN
26 #define INR_OPEN_CUR 1024 /* Initial setting for nfile rlimits */
27 #define INR_OPEN_MAX 4096 /* Hard limit for nfile rlimits */
28
29 #define BLOCK_SIZE_BITS 10
30 #define BLOCK_SIZE ( 1<<BLOCK_SIZE_BITS)
31
32 #define SEEK_SET 0 /* seek relative to beginning of file */
33 #define SEEK_CUR 1 /* seek relative to current file position */
34 #define SEEK_END 2 /* seek relative to end of file */
35 #define SEEK_MAX SEEK_END
36
37 struct fstrim_range {
38 __u64 start;
39 __u64 len;
40 __u64 minlen;
41 };
42
43 /* And dynamically-tunable limits and defaults: */
44 struct files_stat_struct {
45 unsigned long nr_files; /* read only */
46 unsigned long nr_free_files; /* read only */
47 unsigned long max_files; /* tunable */
48 };
49
50 struct inodes_stat_t {
51 int nr_inodes;
52 int nr_unused;
53 int dummy[ 5]; /* padding for sysctl ABI compatibility */
54 };
55
56
57 #define NR_FILE 8192 /* this can well be larger on a larger system */
58
59 #define MAY_EXEC 1
60 #define MAY_WRITE 2
61 #define MAY_READ 4
62 #define MAY_APPEND 8
63 #define MAY_ACCESS 16
64 #define MAY_OPEN 32
65 #define MAY_CHDIR 64
66
67 /*
68 * flags in file.f_mode. Note that FMODE_READ and FMODE_WRITE must correspond
69 * to O_WRONLY and O_RDWR via the strange trick in __dentry_open()
70 */
71
72 /* file is open for reading */
73 #define FMODE_READ ((__force fmode_t)0x1)
74 /* file is open for writing */
75 #define FMODE_WRITE ((__force fmode_t)0x2)
76 /* file is seekable */
77 #define FMODE_LSEEK ((__force fmode_t)0x4)
78 /* file can be accessed using pread */
79 #define FMODE_PREAD ((__force fmode_t)0x8)
80 /* file can be accessed using pwrite */
81 #define FMODE_PWRITE ((__force fmode_t)0x10)
82 /* File is opened for execution with sys_execve / sys_uselib */
83 #define FMODE_EXEC ((__force fmode_t)0x20)
84 /* File is opened with O_NDELAY (only set for block devices) */
85 #define FMODE_NDELAY ((__force fmode_t)0x40)
86 /* File is opened with O_EXCL (only set for block devices) */
87 #define FMODE_EXCL ((__force fmode_t)0x80)
88 /* File is opened using open(.., 3, ..) and is writeable only for ioctls
89 (specialy hack for floppy.c) */
90 #define FMODE_WRITE_IOCTL ((__force fmode_t)0x100)
91
92 /*
93 * Don't update ctime and mtime.
94 *
95 * Currently a special hack for the XFS open_by_handle ioctl, but we'll
96 * hopefully graduate it to a proper O_CMTIME flag supported by open(2) soon.
97 */
98 #define FMODE_NOCMTIME ((__force fmode_t)0x800)
99
100 /* Expect random access pattern */
101 #define FMODE_RANDOM ((__force fmode_t)0x1000)
102
103 /* File is huge (eg. /dev/kmem): treat loff_t as unsigned */
104 #define FMODE_UNSIGNED_OFFSET ((__force fmode_t)0x2000)
105
106 /* File is opened with O_PATH; almost nothing can be done with it */
107 #define FMODE_PATH ((__force fmode_t)0x4000)
108
109 /* File was opened by fanotify and shouldn't generate fanotify events */
110 #define FMODE_NONOTIFY ((__force fmode_t)0x1000000)
111
112 /*
113 * The below are the various read and write types that we support. Some of
114 * them include behavioral modifiers that send information down to the
115 * block layer and IO scheduler. Terminology:
116 *
117 * The block layer uses device plugging to defer IO a little bit, in
118 * the hope that we will see more IO very shortly. This increases
119 * coalescing of adjacent IO and thus reduces the number of IOs we
120 * have to send to the device. It also allows for better queuing,
121 * if the IO isn't mergeable. If the caller is going to be waiting
122 * for the IO, then he must ensure that the device is unplugged so
123 * that the IO is dispatched to the driver.
124 *
125 * All IO is handled async in Linux. This is fine for background
126 * writes, but for reads or writes that someone waits for completion
127 * on, we want to notify the block layer and IO scheduler so that they
128 * know about it. That allows them to make better scheduling
129 * decisions. So when the below references 'sync' and 'async', it
130 * is referencing this priority hint.
131 *
132 * With that in mind, the available types are:
133 *
134 * READ A normal read operation. Device will be plugged.
135 * READ_SYNC A synchronous read. Device is not plugged, caller can
136 * immediately wait on this read without caring about
137 * unplugging.
138 * READA Used for read-ahead operations. Lower priority, and the
139 * block layer could (in theory) choose to ignore this
140 * request if it runs into resource problems.
141 * WRITE A normal async write. Device will be plugged.
142 * WRITE_SYNC Synchronous write. Identical to WRITE, but passes down
143 * the hint that someone will be waiting on this IO
144 * shortly. The write equivalent of READ_SYNC.
145 * WRITE_ODIRECT Special case write for O_DIRECT only.
146 * WRITE_FLUSH Like WRITE_SYNC but with preceding cache flush.
147 * WRITE_FUA Like WRITE_SYNC but data is guaranteed to be on
148 * non-volatile media on completion.
149 * WRITE_FLUSH_FUA Combination of WRITE_FLUSH and FUA. The IO is preceded
150 * by a cache flush and data is guaranteed to be on
151 * non-volatile media on completion.
152 *
153 */
154 #define RW_MASK REQ_WRITE
155 #define RWA_MASK REQ_RAHEAD
156
157 #define READ 0
158 #define WRITE RW_MASK
159 #define READA RWA_MASK
160
161 #define READ_SYNC (READ | REQ_SYNC)
162 #define READ_META (READ | REQ_META)
163 #define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE)
164 #define WRITE_ODIRECT (WRITE | REQ_SYNC)
165 #define WRITE_META (WRITE | REQ_META)
166 #define WRITE_FLUSH (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH)
167 #define WRITE_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FUA)
168 #define WRITE_FLUSH_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH | REQ_FUA)
169
170 #define SEL_IN 1
171 #define SEL_OUT 2
172 #define SEL_EX 4
173
174 /* public flags for file_system_type */
175 #define FS_REQUIRES_DEV 1
176 #define FS_BINARY_MOUNTDATA 2
177 #define FS_HAS_SUBTYPE 4
178 #define FS_REVAL_DOT 16384 /* Check the paths ".", ".." for staleness */
179 #define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move()
180 * during rename() internally.
181 */
182
183 /*
184 * These are the fs-independent mount-flags: up to 32 flags are supported
185 */
186 #define MS_RDONLY 1 /* Mount read-only */
187 #define MS_NOSUID 2 /* Ignore suid and sgid bits */
188 #define MS_NODEV 4 /* Disallow access to device special files */
189 #define MS_NOEXEC 8 /* Disallow program execution */
190 #define MS_SYNCHRONOUS 16 /* Writes are synced at once */
191 #define MS_REMOUNT 32 /* Alter flags of a mounted FS */
192 #define MS_MANDLOCK 64 /* Allow mandatory locks on an FS */
193 #define MS_DIRSYNC 128 /* Directory modifications are synchronous */
194 #define MS_NOATIME 1024 /* Do not update access times. */
195 #define MS_NODIRATIME 2048 /* Do not update directory access times */
196 #define MS_BIND 4096
197 #define MS_MOVE 8192
198 #define MS_REC 16384
199 #define MS_VERBOSE 32768 /* War is peace. Verbosity is silence.
200 MS_VERBOSE is deprecated. */
201 #define MS_SILENT 32768
202 #define MS_POSIXACL ( 1<< 16) /* VFS does not apply the umask */
203 #define MS_UNBINDABLE ( 1<< 17) /* change to unbindable */
204 #define MS_PRIVATE ( 1<< 18) /* change to private */
205 #define MS_SLAVE ( 1<< 19) /* change to slave */
206 #define MS_SHARED ( 1<< 20) /* change to shared */
207 #define MS_RELATIME ( 1<< 21) /* Update atime relative to mtime/ctime. */
208 #define MS_KERNMOUNT ( 1<< 22) /* this is a kern_mount call */
209 #define MS_I_VERSION ( 1<< 23) /* Update inode I_version field */
210 #define MS_STRICTATIME ( 1<< 24) /* Always perform atime updates */
211 #define MS_NOSEC ( 1<< 28)
212 #define MS_BORN ( 1<< 29)
213 #define MS_ACTIVE ( 1<< 30)
214 #define MS_NOUSER ( 1<< 31)
215
216 /*
217 * Superblock flags that can be altered by MS_REMOUNT
218 */
219 #define MS_RMT_MASK (MS_RDONLY|MS_SYNCHRONOUS|MS_MANDLOCK|MS_I_VERSION)
220
221 /*
222 * Old magic mount flag and mask
223 */
224 #define MS_MGC_VAL 0xC0ED0000
225 #define MS_MGC_MSK 0xffff0000
226
227 /* Inode flags - they have nothing to superblock flags now */
228
229 #define S_SYNC 1 /* Writes are synced at once */
230 #define S_NOATIME 2 /* Do not update access times */
231 #define S_APPEND 4 /* Append-only file */
232 #define S_IMMUTABLE 8 /* Immutable file */
233 #define S_DEAD 16 /* removed, but still open directory */
234 #define S_NOQUOTA 32 /* Inode is not counted to quota */
235 #define S_DIRSYNC 64 /* Directory modifications are synchronous */
236 #define S_NOCMTIME 128 /* Do not update file c/mtime */
237 #define S_SWAPFILE 256 /* Do not truncate: swapon got its bmaps */
238 #define S_PRIVATE 512 /* Inode is fs-internal */
239 #define S_IMA 1024 /* Inode has an associated IMA struct */
240 #define S_AUTOMOUNT 2048 /* Automount/referral quasi-directory */
241 #define S_NOSEC 4096 /* no suid or xattr security attributes */
242
243 /*
244 * Note that nosuid etc flags are inode-specific: setting some file-system
245 * flags just means all the inodes inherit those flags by default. It might be
246 * possible to override it selectively if you really wanted to with some
247 * ioctl() that is not currently implemented.
248 *
249 * Exception: MS_RDONLY is always applied to the entire file system.
250 *
251 * Unfortunately, it is possible to change a filesystems flags with it mounted
252 * with files in use. This means that all of the inodes will not have their
253 * i_flags updated. Hence, i_flags no longer inherit the superblock mount
254 * flags, so these have to be checked separately. -- rmk@arm.uk.linux.org
255 */
256 #define __IS_FLG(inode,flg) ((inode)->i_sb->s_flags & (flg))
257
258 #define IS_RDONLY(inode) ((inode)->i_sb->s_flags & MS_RDONLY)
259 #define IS_SYNC(inode) (__IS_FLG(inode, MS_SYNCHRONOUS) || \
260 ((inode)->i_flags & S_SYNC))
261 #define IS_DIRSYNC(inode) (__IS_FLG(inode, MS_SYNCHRONOUS|MS_DIRSYNC) || \
262 ((inode)->i_flags & (S_SYNC|S_DIRSYNC)))
263 #define IS_MANDLOCK(inode) __IS_FLG(inode, MS_MANDLOCK)
264 #define IS_NOATIME(inode) __IS_FLG(inode, MS_RDONLY|MS_NOATIME)
265 #define IS_I_VERSION(inode) __IS_FLG(inode, MS_I_VERSION)
266
267 #define IS_NOQUOTA(inode) ((inode)->i_flags & S_NOQUOTA)
268 #define IS_APPEND(inode) ((inode)->i_flags & S_APPEND)
269 #define IS_IMMUTABLE(inode) ((inode)->i_flags & S_IMMUTABLE)
270 #define IS_POSIXACL(inode) __IS_FLG(inode, MS_POSIXACL)
271
272 #define IS_DEADDIR(inode) ((inode)->i_flags & S_DEAD)
273 #define IS_NOCMTIME(inode) ((inode)->i_flags & S_NOCMTIME)
274 #define IS_SWAPFILE(inode) ((inode)->i_flags & S_SWAPFILE)
275 #define IS_PRIVATE(inode) ((inode)->i_flags & S_PRIVATE)
276 #define IS_IMA(inode) ((inode)->i_flags & S_IMA)
277 #define IS_AUTOMOUNT(inode) ((inode)->i_flags & S_AUTOMOUNT)
278 #define IS_NOSEC(inode) ((inode)->i_flags & S_NOSEC)
279
280 /* the read-only stuff doesn't really belong here, but any other place is
281 probably as bad and I don't want to create yet another include file. */
282
283 #define BLKROSET _IO(0x12, 93) /* set device read-only (0 = read-write) */
284 #define BLKROGET _IO(0x12, 94) /* get read-only status (0 = read_write) */
285 #define BLKRRPART _IO(0x12, 95) /* re-read partition table */
286 #define BLKGETSIZE _IO(0x12, 96) /* return device size /512 (long *arg) */
287 #define BLKFLSBUF _IO(0x12, 97) /* flush buffer cache */
288 #define BLKRASET _IO(0x12, 98) /* set read ahead for block device */
289 #define BLKRAGET _IO(0x12, 99) /* get current read ahead setting */
290 #define BLKFRASET _IO(0x12, 100) /* set filesystem (mm/filemap.c) read-ahead */
291 #define BLKFRAGET _IO(0x12, 101) /* get filesystem (mm/filemap.c) read-ahead */
292 #define BLKSECTSET _IO(0x12, 102) /* set max sectors per request (ll_rw_blk.c) */
293 #define BLKSECTGET _IO(0x12, 103) /* get max sectors per request (ll_rw_blk.c) */
294 #define BLKSSZGET _IO(0x12, 104) /* get block device sector size */
295 #if 0
296 #define BLKPG _IO(0x12, 105) /* See blkpg.h */
297
298 /* Some people are morons. Do not use sizeof! */
299
300 #define BLKELVGET _IOR(0x12, 106,size_t) /* elevator get */
301 #define BLKELVSET _IOW(0x12, 107,size_t) /* elevator set */
302 /* This was here just to show that the number is taken -
303 probably all these _IO(0x12,*) ioctls should be moved to blkpg.h. */
304 #endif
305 /* A jump here: 108-111 have been used for various private purposes. */
306 #define BLKBSZGET _IOR(0x12, 112,size_t)
307 #define BLKBSZSET _IOW(0x12, 113,size_t)
308 #define BLKGETSIZE64 _IOR(0x12, 114,size_t) /* return device size in bytes (u64 *arg) */
309 #define BLKTRACESETUP _IOWR(0x12, 115, struct blk_user_trace_setup)
310 #define BLKTRACESTART _IO(0x12, 116)
311 #define BLKTRACESTOP _IO(0x12, 117)
312 #define BLKTRACETEARDOWN _IO(0x12, 118)
313 #define BLKDISCARD _IO(0x12, 119)
314 #define BLKIOMIN _IO(0x12, 120)
315 #define BLKIOOPT _IO(0x12, 121)
316 #define BLKALIGNOFF _IO(0x12, 122)
317 #define BLKPBSZGET _IO(0x12, 123)
318 #define BLKDISCARDZEROES _IO(0x12, 124)
319 #define BLKSECDISCARD _IO(0x12, 125)
320
321 #define BMAP_IOCTL 1 /* obsolete - kept for compatibility */
322 #define FIBMAP _IO(0x00, 1) /* bmap access */
323 #define FIGETBSZ _IO(0x00, 2) /* get the block size used for bmap */
324 #define FIFREEZE _IOWR( 'X', 119, int) /* Freeze */
325 #define FITHAW _IOWR( 'X', 120, int) /* Thaw */
326 #define FITRIM _IOWR( 'X', 121, struct fstrim_range) /* Trim */
327
328 #define FS_IOC_GETFLAGS _IOR( 'f', 1, long)
329 #define FS_IOC_SETFLAGS _IOW( 'f', 2, long)
330 #define FS_IOC_GETVERSION _IOR( 'v', 1, long)
331 #define FS_IOC_SETVERSION _IOW( 'v', 2, long)
332 #define FS_IOC_FIEMAP _IOWR( 'f', 11, struct fiemap)
333 #define FS_IOC32_GETFLAGS _IOR( 'f', 1, int)
334 #define FS_IOC32_SETFLAGS _IOW( 'f', 2, int)
335 #define FS_IOC32_GETVERSION _IOR( 'v', 1, int)
336 #define FS_IOC32_SETVERSION _IOW( 'v', 2, int)
337
338 /*
339 * Inode flags (FS_IOC_GETFLAGS / FS_IOC_SETFLAGS)
340 */
341 #define FS_SECRM_FL 0x00000001 /* Secure deletion */
342 #define FS_UNRM_FL 0x00000002 /* Undelete */
343 #define FS_COMPR_FL 0x00000004 /* Compress file */
344 #define FS_SYNC_FL 0x00000008 /* Synchronous updates */
345 #define FS_IMMUTABLE_FL 0x00000010 /* Immutable file */
346 #define FS_APPEND_FL 0x00000020 /* writes to file may only append */
347 #define FS_NODUMP_FL 0x00000040 /* do not dump file */
348 #define FS_NOATIME_FL 0x00000080 /* do not update atime */
349 /* Reserved for compression usage... */
350 #define FS_DIRTY_FL 0x00000100
351 #define FS_COMPRBLK_FL 0x00000200 /* One or more compressed clusters */
352 #define FS_NOCOMP_FL 0x00000400 /* Don't compress */
353 #define FS_ECOMPR_FL 0x00000800 /* Compression error */
354 /* End compression flags --- maybe not all used */
355 #define FS_BTREE_FL 0x00001000 /* btree format dir */
356 #define FS_INDEX_FL 0x00001000 /* hash-indexed directory */
357 #define FS_IMAGIC_FL 0x00002000 /* AFS directory */
358 #define FS_JOURNAL_DATA_FL 0x00004000 /* Reserved for ext3 */
359 #define FS_NOTAIL_FL 0x00008000 /* file tail should not be merged */
360 #define FS_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */
361 #define FS_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/
362 #define FS_EXTENT_FL 0x00080000 /* Extents */
363 #define FS_DIRECTIO_FL 0x00100000 /* Use direct i/o */
364 #define FS_NOCOW_FL 0x00800000 /* Do not cow file */
365 #define FS_RESERVED_FL 0x80000000 /* reserved for ext2 lib */
366
367 #define FS_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */
368 #define FS_FL_USER_MODIFIABLE 0x000380FF /* User modifiable flags */
369
370
371 #define SYNC_FILE_RANGE_WAIT_BEFORE 1
372 #define SYNC_FILE_RANGE_WRITE 2
373 #define SYNC_FILE_RANGE_WAIT_AFTER 4
374
375 #ifdef __KERNEL__
376
377 #include <linux/linkage.h>
378 #include <linux/wait.h>
379 #include <linux/types.h>
380 #include <linux/kdev_t.h>
381 #include <linux/dcache.h>
382 #include <linux/path.h>
383 #include <linux/stat.h>
384 #include <linux/cache.h>
385 #include <linux/list.h>
386 #include <linux/radix-tree.h>
387 #include <linux/prio_tree.h>
388 #include <linux/init.h>
389 #include <linux/pid.h>
390 #include <linux/mutex.h>
391 #include <linux/capability.h>
392 #include <linux/semaphore.h>
393 #include <linux/fiemap.h>
394 #include <linux/rculist_bl.h>
395
396 #include <asm/atomic.h>
397 #include <asm/byteorder.h>
398
399 struct export_operations;
400 struct hd_geometry;
401 struct iovec;
402 struct nameidata;
403 struct kiocb;
404 struct kobject;
405 struct pipe_inode_info;
406 struct poll_table_struct;
407 struct kstatfs;
408 struct vm_area_struct;
409 struct vfsmount;
410 struct cred;
411
412 extern void __init inode_init( void);
413 extern void __init inode_init_early( void);
414 extern void __init files_init( unsigned long);
415
416 extern struct files_stat_struct files_stat;
417 extern unsigned long get_max_files( void);
418 extern int sysctl_nr_open;
419 extern struct inodes_stat_t inodes_stat;
420 extern int leases_enable, lease_break_time;
421
422 struct buffer_head;
423 typedef int (get_block_t)( struct inode *inode, sector_t iblock,
424 struct buffer_head *bh_result, int create);
425 typedef void (dio_iodone_t)( struct kiocb *iocb, loff_t offset,
426 ssize_t bytes, void * private, int ret,
427 bool is_async);
428
429 /*
430 * Attribute flags. These should be or-ed together to figure out what
431 * has been changed!
432 */
433 #define ATTR_MODE ( 1 << 0)
434 #define ATTR_UID ( 1 << 1)
435 #define ATTR_GID ( 1 << 2)
436 #define ATTR_SIZE ( 1 << 3)
437 #define ATTR_ATIME ( 1 << 4)
438 #define ATTR_MTIME ( 1 << 5)
439 #define ATTR_CTIME ( 1 << 6)
440 #define ATTR_ATIME_SET ( 1 << 7)
441 #define ATTR_MTIME_SET ( 1 << 8)
442 #define ATTR_FORCE ( 1 << 9) /* Not a change, but a change it */
443 #define ATTR_ATTR_FLAG ( 1 << 10)
444 #define ATTR_KILL_SUID ( 1 << 11)
445 #define ATTR_KILL_SGID ( 1 << 12)
446 #define ATTR_FILE ( 1 << 13)
447 #define ATTR_KILL_PRIV ( 1 << 14)
448 #define ATTR_OPEN ( 1 << 15) /* Truncating from open(O_TRUNC) */
449 #define ATTR_TIMES_SET ( 1 << 16)
450
451 /*
452 * This is the Inode Attributes structure, used for notify_change(). It
453 * uses the above definitions as flags, to know which values have changed.
454 * Also, in this manner, a Filesystem can look at only the values it cares
455 * about. Basically, these are the attributes that the VFS layer can
456 * request to change from the FS layer.
457 *
458 * Derek Atkins <warlord@MIT.EDU> 94-10-20
459 */
460 struct iattr {
461 unsigned int ia_valid;
462 umode_t ia_mode;
463 uid_t ia_uid;
464 gid_t ia_gid;
465 loff_t ia_size;
466 struct timespec ia_atime;
467 struct timespec ia_mtime;
468 struct timespec ia_ctime;
469
470 /*
471 * Not an attribute, but an auxiliary info for filesystems wanting to
472 * implement an ftruncate() like method. NOTE: filesystem should
473 * check for (ia_valid & ATTR_FILE), and not for (ia_file != NULL).
474 */
475 struct file *ia_file;
476 };
477
478 /*
479 * Includes for diskquotas.
480 */
481 #include <linux/quota.h>
482
483 /**
484 * enum positive_aop_returns - aop return codes with specific semantics
485 *
486 * @AOP_WRITEPAGE_ACTIVATE: Informs the caller that page writeback has
487 * completed, that the page is still locked, and
488 * should be considered active. The VM uses this hint
489 * to return the page to the active list -- it won't
490 * be a candidate for writeback again in the near
491 * future. Other callers must be careful to unlock
492 * the page if they get this return. Returned by
493 * writepage();
494 *
495 * @AOP_TRUNCATED_PAGE: The AOP method that was handed a locked page has
496 * unlocked it and the page might have been truncated.
497 * The caller should back up to acquiring a new page and
498 * trying again. The aop will be taking reasonable
499 * precautions not to livelock. If the caller held a page
500 * reference, it should drop it before retrying. Returned
501 * by readpage().
502 *
503 * address_space_operation functions return these large constants to indicate
504 * special semantics to the caller. These are much larger than the bytes in a
505 * page to allow for functions that return the number of bytes operated on in a
506 * given page.
507 */
508
509 enum positive_aop_returns {
510 AOP_WRITEPAGE_ACTIVATE = 0x80000,
511 AOP_TRUNCATED_PAGE = 0x80001,
512 };
513
514 #define AOP_FLAG_UNINTERRUPTIBLE 0x0001 /* will not do a short write */
515 #define AOP_FLAG_CONT_EXPAND 0x0002 /* called from cont_expand */
516 #define AOP_FLAG_NOFS 0x0004 /* used by filesystem to direct
517 * helper code (eg buffer layer)
518 * to clear GFP_FS from alloc */
519
520 /*
521 * oh the beauties of C type declarations.
522 */
523 struct page;
524 struct address_space;
525 struct writeback_control;
526
527 struct iov_iter {
528 const struct iovec *iov;
529 unsigned long nr_segs;
530 size_t iov_offset;
531 size_t count;
532 };
533
534 size_t iov_iter_copy_from_user_atomic( struct page *page,
535 struct iov_iter *i, unsigned long offset, size_t bytes);
536 size_t iov_iter_copy_from_user( struct page *page,
537 struct iov_iter *i, unsigned long offset, size_t bytes);
538 void iov_iter_advance( struct iov_iter *i, size_t bytes);
539 int iov_iter_fault_in_readable( struct iov_iter *i, size_t bytes);
540 size_t iov_iter_single_seg_count( struct iov_iter *i);
541
542 static inline void iov_iter_init( struct iov_iter *i,
543 const struct iovec *iov, unsigned long nr_segs,
544 size_t count, size_t written)
545 {
546 i->iov = iov;
547 i->nr_segs = nr_segs;
548 i->iov_offset = 0;
549 i->count = count + written;
550
551 iov_iter_advance(i, written);
552 }
553
554 static inline size_t iov_iter_count( struct iov_iter *i)
555 {
556 return i->count;
557 }
558
559 /*
560 * "descriptor" for what we're up to with a read.
561 * This allows us to use the same read code yet
562 * have multiple different users of the data that
563 * we read from a file.
564 *
565 * The simplest case just copies the data to user
566 * mode.
567 */
568 typedef struct {
569 size_t written;
570 size_t count;
571 union {
572 char __user *buf;
573 void *data;
574 } arg;
575 int error;
576 } read_descriptor_t;
577
578 typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
579 unsigned long, unsigned long);
580
581 struct address_space_operations {
582 int (*writepage)( struct page *page, struct writeback_control *wbc);
583 int (*readpage)( struct file *, struct page *);
584
585 /* Write back some dirty pages from this mapping. */
586 int (*writepages)( struct address_space *, struct writeback_control *);
587
588 /* Set a page dirty. Return true if this dirtied it */
589 int (*set_page_dirty)( struct page *page);
590
591 int (*readpages)( struct file *filp, struct address_space *mapping,
592 struct list_head *pages, unsigned nr_pages);
593
594 int (*write_begin)( struct file *, struct address_space *mapping,
595 loff_t pos, unsigned len, unsigned flags,
596 struct page **pagep, void **fsdata);
597 int (*write_end)( struct file *, struct address_space *mapping,
598 loff_t pos, unsigned len, unsigned copied,
599 struct page *page, void *fsdata);
600
601 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
602 sector_t (*bmap)( struct address_space *, sector_t);
603 void (*invalidatepage) ( struct page *, unsigned long);
604 int (*releasepage) ( struct page *, gfp_t);
605 void (*freepage)( struct page *);
606 ssize_t (*direct_IO)( int, struct kiocb *, const struct iovec *iov,
607 loff_t offset, unsigned long nr_segs);
608 int (*get_xip_mem)( struct address_space *, pgoff_t, int,
609 void **, unsigned long *);
610 /* migrate the contents of a page to the specified target */
611 int (*migratepage) ( struct address_space *,
612 struct page *, struct page *);
613 int (*launder_page) ( struct page *);
614 int (*is_partially_uptodate) ( struct page *, read_descriptor_t *,
615 unsigned long);
616 int (*error_remove_page)( struct address_space *, struct page *);
617 };
618
619 extern const struct address_space_operations empty_aops;
620
621 /*
622 * pagecache_write_begin/pagecache_write_end must be used by general code
623 * to write into the pagecache.
624 */
625 int pagecache_write_begin( struct file *, struct address_space *mapping,
626 loff_t pos, unsigned len, unsigned flags,
627 struct page **pagep, void **fsdata);
628
629 int pagecache_write_end( struct file *, struct address_space *mapping,
630 loff_t pos, unsigned len, unsigned copied,
631 struct page *page, void *fsdata);
632
633 struct backing_dev_info;
634 struct address_space {
635 struct inode *host; /* owner: inode, block_device */
636 struct radix_tree_root page_tree; /* radix tree of all pages */
637 spinlock_t tree_lock; /* and lock protecting it */
638 unsigned int i_mmap_writable; /* count VM_SHARED mappings */
639 struct prio_tree_root i_mmap; /* tree of private and shared mappings */
640 struct list_head i_mmap_nonlinear; /*list VM_NONLINEAR mappings */
641 struct mutex i_mmap_mutex; /* protect tree, count, list */
642 /* Protected by tree_lock together with the radix tree */
643 unsigned long nrpages; /* number of total pages */
644 pgoff_t writeback_index; /* writeback starts here */
645 const struct address_space_operations *a_ops; /* methods */
646 unsigned long flags; /* error bits/gfp mask */
647 struct backing_dev_info *backing_dev_info; /* device readahead, etc */
648 spinlock_t private_lock; /* for use by the address_space */
649 struct list_head private_list; /* ditto */
650 struct address_space *assoc_mapping; /* ditto */
651 } __attribute__((aligned( sizeof( long))));
652 /*
653 * On most architectures that alignment is already the case; but
654 * must be enforced here for CRIS, to let the least significant bit
655 * of struct page's "mapping" pointer be used for PAGE_MAPPING_ANON.
656 */
657
658 struct block_device {
659 dev_t bd_dev; /* not a kdev_t - it's a search key */
660 int bd_openers;
661 struct inode * bd_inode; /* will die */
662 struct super_block * bd_super;
663 struct mutex bd_mutex; /* open/close mutex */
664 struct list_head bd_inodes;
665 void * bd_claiming;
666 void * bd_holder;
667 int bd_holders;
668 bool bd_write_holder;
669 #ifdef CONFIG_SYSFS
670 struct list_head bd_holder_disks;
671 #endif
672 struct block_device * bd_contains;
673 unsigned bd_block_size;
674 struct hd_struct * bd_part;
675 /* number of times partitions within this device have been opened. */
676 unsigned bd_part_count;
677 int bd_invalidated;
678 struct gendisk * bd_disk;
679 struct list_head bd_list;
680 /*
681 * Private data. You must have bd_claim'ed the block_device
682 * to use this. NOTE: bd_claim allows an owner to claim
683 * the same device multiple times, the owner must take special
684 * care to not mess up bd_private for that case.
685 */
686 unsigned long bd_private;
687
688 /* The counter of freeze processes */
689 int bd_fsfreeze_count;
690 /* Mutex for freeze */
691 struct mutex bd_fsfreeze_mutex;
692 };
693
694 /*
695 * Radix-tree tags, for tagging dirty and writeback pages within the pagecache
696 * radix trees
697 */
698 #define PAGECACHE_TAG_DIRTY 0
699 #define PAGECACHE_TAG_WRITEBACK 1
700 #define PAGECACHE_TAG_TOWRITE 2
701
702 int mapping_tagged( struct address_space *mapping, int tag);
703
704 /*
705 * Might pages of this file be mapped into userspace?
706 */
707 static inline int mapping_mapped( struct address_space *mapping)
708 {
709 return !prio_tree_empty(&mapping->i_mmap) ||
710 !list_empty(&mapping->i_mmap_nonlinear);
711 }
712
713 /*
714 * Might pages of this file have been modified in userspace?
715 * Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap_pgoff
716 * marks vma as VM_SHARED if it is shared, and the file was opened for
717 * writing i.e. vma may be mprotected writable even if now readonly.
718 */
719 static inline int mapping_writably_mapped( struct address_space *mapping)
720 {
721 return mapping->i_mmap_writable != 0;
722 }
723
724 /*
725 * Use sequence counter to get consistent i_size on 32-bit processors.
726 */
727 #if BITS_PER_LONG== 32 && defined(CONFIG_SMP)
728 #include <linux/seqlock.h>
729 #define __NEED_I_SIZE_ORDERED
730 #define i_size_ordered_init(inode) seqcount_init(&inode->i_size_seqcount)
731 #else
732 #define i_size_ordered_init(inode) do { } while ( 0)
733 #endif
734
735 struct posix_acl;
736 #define ACL_NOT_CACHED (( void *)(- 1))
737
738 struct inode {
739 /* RCU path lookup touches following: */
740 umode_t i_mode;
741 uid_t i_uid;
742 gid_t i_gid;
743 const struct inode_operations *i_op;
744 struct super_block *i_sb;
745
746 spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
747 unsigned int i_flags;
748 unsigned long i_state;
749 #ifdef CONFIG_SECURITY
750 void *i_security;
751 #endif
752 struct mutex i_mutex;
753
754
755 unsigned long dirtied_when; /* jiffies of first dirtying */
756
757 struct hlist_node i_hash;
758 struct list_head i_wb_list; /* backing dev IO list */
759 struct list_head i_lru; /* inode LRU list */
760 struct list_head i_sb_list;
761 union {
762 struct list_head i_dentry;
763 struct rcu_head i_rcu;
764 };
765 unsigned long i_ino;
766 atomic_t i_count;
767 unsigned int i_nlink;
768 dev_t i_rdev;
769 unsigned int i_blkbits;
770 u64 i_version;
771 loff_t i_size;
772 #ifdef __NEED_I_SIZE_ORDERED
773 seqcount_t i_size_seqcount;
774 #endif
775 struct timespec i_atime;
776 struct timespec i_mtime;
777 struct timespec i_ctime;
778 blkcnt_t i_blocks;
779 unsigned short i_bytes;
780 struct rw_semaphore i_alloc_sem;
781 const struct file_operations *i_fop; /* former ->i_op->default_file_ops */
782 struct file_lock *i_flock;
783 struct address_space *i_mapping;
784 struct address_space i_data;
785 #ifdef CONFIG_QUOTA
786 struct dquot *i_dquot[MAXQUOTAS];
787 #endif
788 struct list_head i_devices;
789 union {
790 struct pipe_inode_info *i_pipe;
791 struct block_device *i_bdev;
792 struct cdev *i_cdev;
793 };
794
795 __u32 i_generation;
796
797 #ifdef CONFIG_FSNOTIFY
798 __u32 i_fsnotify_mask; /* all events this inode cares about */
799 struct hlist_head i_fsnotify_marks;
800 #endif
801
802 #ifdef CONFIG_IMA
803 atomic_t i_readcount; /* struct files open RO */
804 #endif
805 atomic_t i_writecount;
806 #ifdef CONFIG_FS_POSIX_ACL
807 struct posix_acl *i_acl;
808 struct posix_acl *i_default_acl;
809 #endif
810 void *i_private; /* fs or device private pointer */
811 };
812
813 static inline int inode_unhashed( struct inode *inode)
814 {
815 return hlist_unhashed(&inode->i_hash);
816 }
817
818 /*
819 * inode->i_mutex nesting subclasses for the lock validator:
820 *
821 * 0: the object of the current VFS operation
822 * 1: parent
823 * 2: child/target
824 * 3: quota file
825 *
826 * The locking order between these classes is
827 * parent -> child -> normal -> xattr -> quota
828 */
829 enum inode_i_mutex_lock_class
830 {
831 I_MUTEX_NORMAL,
832 I_MUTEX_PARENT,
833 I_MUTEX_CHILD,
834 I_MUTEX_XATTR,
835 I_MUTEX_QUOTA
836 };
837
838 /*
839 * NOTE: in a 32bit arch with a preemptable kernel and
840 * an UP compile the i_size_read/write must be atomic
841 * with respect to the local cpu (unlike with preempt disabled),
842 * but they don't need to be atomic with respect to other cpus like in
843 * true SMP (so they need either to either locally disable irq around
844 * the read or for example on x86 they can be still implemented as a
845 * cmpxchg8b without the need of the lock prefix). For SMP compiles
846 * and 64bit archs it makes no difference if preempt is enabled or not.
847 */
848 static inline loff_t i_size_read( const struct inode *inode)
849 {
850 #if BITS_PER_LONG== 32 && defined(CONFIG_SMP)
851 loff_t i_size;
852 unsigned int seq;
853
854 do {
855 seq = read_seqcount_begin(&inode->i_size_seqcount);
856 i_size = inode->i_size;
857 } while (read_seqcount_retry(&inode->i_size_seqcount, seq));
858 return i_size;
859 #elif BITS_PER_LONG== 32 && defined(CONFIG_PREEMPT)
860 loff_t i_size;
861
862 preempt_disable();
863 i_size = inode->i_size;
864 preempt_enable();
865 return i_size;
866 #else
867 return inode->i_size;
868 #endif
869 }
870
871 /*
872 * NOTE: unlike i_size_read(), i_size_write() does need locking around it
873 * (normally i_mutex), otherwise on 32bit/SMP an update of i_size_seqcount
874 * can be lost, resulting in subsequent i_size_read() calls spinning forever.
875 */
876 static inline void i_size_write( struct inode *inode, loff_t i_size)
877 {
878 #if BITS_PER_LONG== 32 && defined(CONFIG_SMP)
879 write_seqcount_begin(&inode->i_size_seqcount);
880 inode->i_size = i_size;
881 write_seqcount_end(&inode->i_size_seqcount);
882 #elif BITS_PER_LONG== 32 && defined(CONFIG_PREEMPT)
883 preempt_disable();
884 inode->i_size = i_size;
885 preempt_enable();
886 #else
887 inode->i_size = i_size;
888 #endif
889 }
890
891 static inline unsigned iminor( const struct inode *inode)
892 {
893 return MINOR(inode->i_rdev);
894 }
895
896 static inline unsigned imajor( const struct inode *inode)
897 {
898 return MAJOR(inode->i_rdev);
899 }
900
901 extern struct block_device *I_BDEV( struct inode *inode);
902
903 struct fown_struct {
904 rwlock_t lock; /* protects pid, uid, euid fields */
905 struct pid *pid; /* pid or -pgrp where SIGIO should be sent */
906 enum pid_type pid_type; /* Kind of process group SIGIO should be sent to */
907 uid_t uid, euid; /* uid/euid of process setting the owner */
908 int signum; /* posix.1b rt signal to be delivered on IO */
909 };
910
911 /*
912 * Track a single file's readahead state
913 */
914 struct file_ra_state {
915 pgoff_t start; /* where readahead started */
916 unsigned int size; /* # of readahead pages */
917 unsigned int async_size; /* do asynchronous readahead when
918 there are only # of pages ahead */
919
920 unsigned int ra_pages; /* Maximum readahead window */
921 unsigned int mmap_miss; /* Cache miss stat for mmap accesses */
922 loff_t prev_pos; /* Cache last read() position */
923 };
924
925 /*
926 * Check if @index falls in the readahead windows.
927 */
928 static inline int ra_has_index( struct file_ra_state *ra, pgoff_t index)
929 {
930 return (index >= ra->start &&
931 index < ra->start + ra->size);
932 }
933
934 #define FILE_MNT_WRITE_TAKEN 1
935 #define FILE_MNT_WRITE_RELEASED 2
936
937 struct file {
938 /*
939 * fu_list becomes invalid after file_free is called and queued via
940 * fu_rcuhead for RCU freeing
941 */
942 union {
943 struct list_head fu_list;
944 struct rcu_head fu_rcuhead;
945 } f_u;
946 struct path f_path;
947 #define f_dentry f_path.dentry
948 #define f_vfsmnt f_path.mnt
949 const struct file_operations *f_op;
950 spinlock_t f_lock; /* f_ep_links, f_flags, no IRQ */
951 #ifdef CONFIG_SMP
952 int f_sb_list_cpu;
953 #endif
954 atomic_long_t f_count;
955 unsigned int f_flags;
956 fmode_t f_mode;
957 loff_t f_pos;
958 struct fown_struct f_owner;
959 const struct cred *f_cred;
960 struct file_ra_state f_ra;
961
962 u64 f_version;
963 #ifdef CONFIG_SECURITY
964 void *f_security;
965 #endif
966 /* needed for tty driver, and maybe others */
967 void *private_data;
968
969 #ifdef CONFIG_EPOLL
970 /* Used by fs/eventpoll.c to link all the hooks to this file */
971 struct list_head f_ep_links;
972 #endif /* #ifdef CONFIG_EPOLL */
973 struct address_space *f_mapping;
974 #ifdef CONFIG_DEBUG_WRITECOUNT
975 unsigned long f_mnt_write_state;
976 #endif
977 };
978
979 struct file_handle {
980 __u32 handle_bytes;
981 int handle_type;
982 /* file identifier */
983 unsigned char f_handle[ 0];
984 };
985
986 #define get_file(x) atomic_long_inc(&(x)->f_count)
987 #define fput_atomic(x) atomic_long_add_unless(&(x)->f_count, - 1, 1)
988 #define file_count(x) atomic_long_read(&(x)->f_count)
989
990 #ifdef CONFIG_DEBUG_WRITECOUNT
991 static inline void file_take_write( struct file *f)
992 {
993 WARN_ON(f->f_mnt_write_state != 0);
994 f->f_mnt_write_state = FILE_MNT_WRITE_TAKEN;
995 }
996 static inline void file_release_write( struct file *f)
997 {
998 f->f_mnt_write_state |= FILE_MNT_WRITE_RELEASED;
999 }
1000 static inline void file_reset_write( struct file *f)
1001 {
1002 f->f_mnt_write_state = 0;
1003 }
1004 static inline void file_check_state( struct file *f)
1005 {
1006 /*
1007 * At this point, either both or neither of these bits
1008 * should be set.
1009 */
1010 WARN_ON(f->f_mnt_write_state == FILE_MNT_WRITE_TAKEN);
1011 WARN_ON(f->f_mnt_write_state == FILE_MNT_WRITE_RELEASED);
1012 }
1013 static inline int file_check_writeable( struct file *f)
1014 {
1015 if (f->f_mnt_write_state == FILE_MNT_WRITE_TAKEN)
1016 return 0;
1017 printk(KERN_WARNING "writeable file with no "
1018 "mnt_want_write()\n");
1019 WARN_ON( 1);
1020 return -EINVAL;
1021 }
1022 #else /* !CONFIG_DEBUG_WRITECOUNT */
1023 static inline void file_take_write( struct file *filp) {}
1024 static inline void file_release_write( struct file *filp) {}
1025 static inline void file_reset_write( struct file *filp) {}
1026 static inline void file_check_state( struct file *filp) {}
1027 static inline int file_check_writeable( struct file *filp)
1028 {
1029 return 0;
1030 }
1031 #endif /* CONFIG_DEBUG_WRITECOUNT */
1032
1033 #define MAX_NON_LFS ((1UL<< 31) - 1)
1034
1035 /* Page cache limit. The filesystems should put that into their s_maxbytes
1036 limits, otherwise bad things can happen in VM. */
1037 #if BITS_PER_LONG== 32
1038 #define MAX_LFS_FILESIZE (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG- 1))- 1)
1039 #elif BITS_PER_LONG== 64
1040 #define MAX_LFS_FILESIZE 0x7fffffffffffffffUL
1041 #endif
1042
1043 #define FL_POSIX 1
1044 #define FL_FLOCK 2
1045 #define FL_ACCESS 8 /* not trying to lock, just looking */
1046 #define FL_EXISTS 16 /* when unlocking, test for existence */
1047 #define FL_LEASE 32 /* lease held on this file */
1048 #define FL_CLOSE 64 /* unlock on close */
1049 #define FL_SLEEP 128 /* A blocking lock */
1050
1051 /*
1052 * Special return value from posix_lock_file() and vfs_lock_file() for
1053 * asynchronous locking.
1054 */
1055 #define FILE_LOCK_DEFERRED 1
1056
1057 /*
1058 * The POSIX file lock owner is determined by
1059 * the "struct files_struct" in the thread group
1060 * (or NULL for no owner - BSD locks).
1061 *
1062 * Lockd stuffs a "host" pointer into this.
1063 */
1064 typedef struct files_struct *fl_owner_t;
1065
1066 struct file_lock_operations {
1067 void (*fl_copy_lock)( struct file_lock *, struct file_lock *);
1068 void (*fl_release_private)( struct file_lock *);
1069 };
1070
1071 struct lock_manager_operations {
1072 int (*fl_compare_owner)( struct file_lock *, struct file_lock *);
1073 void (*fl_notify)( struct file_lock *); /* unblock callback */
1074 int (*fl_grant)( struct file_lock *, struct file_lock *, int);
1075 void (*fl_release_private)( struct file_lock *);
1076 void (*fl_break)( struct file_lock *);
1077 int (*fl_change)( struct file_lock **, int);
1078 };
1079
1080 struct lock_manager {
1081 struct list_head list;
1082 };
1083
1084 void locks_start_grace( struct lock_manager *);
1085 void locks_end_grace( struct lock_manager *);
1086 int locks_in_grace( void);
1087
1088 /* that will die - we need it for nfs_lock_info */
1089 #include <linux/nfs_fs_i.h>
1090
1091 struct file_lock {
1092 struct file_lock *fl_next; /* singly linked list for this inode */
1093 struct list_head fl_link; /* doubly linked list of all locks */
1094 struct list_head fl_block; /* circular list of blocked processes */
1095 fl_owner_t fl_owner;
1096 unsigned char fl_flags;
1097 unsigned char fl_type;
1098 unsigned int fl_pid;
1099 struct pid *fl_nspid;
1100 wait_queue_head_t fl_wait;
1101 struct file *fl_file;
1102 loff_t fl_start;
1103 loff_t fl_end;
1104
1105 struct fasync_struct * fl_fasync; /* for lease break notifications */
1106 unsigned long fl_break_time; /* for nonblocking lease breaks */
1107
1108 const struct file_lock_operations *fl_ops; /* Callbacks for filesystems */
1109 const struct lock_manager_operations *fl_lmops; /* Callbacks for lockmanagers */
1110 union {
1111 struct nfs_lock_info nfs_fl;
1112 struct nfs4_lock_info nfs4_fl;
1113 struct {
1114 struct list_head link; /* link in AFS vnode's pending_locks list */
1115 int state; /* state of grant or error if -ve */
1116 } afs;
1117 } fl_u;
1118 };
1119
1120 /* The following constant reflects the upper bound of the file/locking space */
1121 #ifndef OFFSET_MAX
1122 #define INT_LIMIT(x) (~((x) 1 << ( sizeof(x)* 8 - 1)))
1123 #define OFFSET_MAX INT_LIMIT(loff_t)
1124 #define OFFT_OFFSET_MAX INT_LIMIT(off_t)
1125 #endif
1126
1127 #include <linux/fcntl.h>
1128
1129 extern void send_sigio( struct fown_struct *fown, int fd, int band);
1130
1131 #ifdef CONFIG_FILE_LOCKING
1132 extern int fcntl_getlk( struct file *, struct flock __user *);
1133 extern int fcntl_setlk( unsigned int, struct file *, unsigned int,
1134 struct flock __user *);
1135
1136 #if BITS_PER_LONG == 32
1137 extern int fcntl_getlk64( struct file *, struct flock64 __user *);
1138 extern int fcntl_setlk64( unsigned int, struct file *, unsigned int,
1139 struct flock64 __user *);
1140 #endif
1141
1142 extern int fcntl_setlease( unsigned int fd, struct file *filp, long arg);
1143 extern int fcntl_getlease( struct file *filp);
1144
1145 /* fs/locks.c */
1146 void locks_free_lock( struct file_lock *fl);
1147 extern void locks_init_lock( struct file_lock *);
1148 extern struct file_lock * locks_alloc_lock( void);
1149 extern void locks_copy_lock( struct file_lock *, struct file_lock *);
1150 extern void __locks_copy_lock( struct file_lock *, const struct file_lock *);
1151 extern void locks_remove_posix( struct file *, fl_owner_t);
1152 extern void locks_remove_flock( struct file *);
1153 extern void locks_release_private( struct file_lock *);
1154 extern void posix_test_lock( struct file *, struct file_lock *);
1155 extern int posix_lock_file( struct file *, struct file_lock *, struct file_lock *);
1156 extern int posix_lock_file_wait( struct file *, struct file_lock *);
1157 extern int posix_unblock_lock( struct file *, struct file_lock *);
1158 extern int vfs_test_lock( struct file *, struct file_lock *);
1159 extern int vfs_lock_file( struct file *, unsigned int, struct file_lock *, struct file_lock *);
1160 extern int vfs_cancel_lock( struct file *filp, struct file_lock *fl);
1161 extern int flock_lock_file_wait( struct file *filp, struct file_lock *fl);
1162 extern int __break_lease( struct inode *inode, unsigned int flags);
1163 extern void lease_get_mtime( struct inode *, struct timespec *time);
1164 extern int generic_setlease( struct file *, long, struct file_lock **);
1165 extern int vfs_setlease( struct file *, long, struct file_lock **);
1166 extern int lease_modify( struct file_lock **, int);
1167 extern int lock_may_read( struct inode *, loff_t start, unsigned long count);
1168 extern int lock_may_write( struct inode *, loff_t start, unsigned long count);
1169 extern void lock_flocks( void);
1170 extern void unlock_flocks( void);
1171 #else /* !CONFIG_FILE_LOCKING */
1172 static inline int fcntl_getlk( struct file *file, struct flock __user *user)
1173 {
1174 return -EINVAL;
1175 }
1176
1177 static inline int fcntl_setlk( unsigned int fd, struct file *file,
1178 unsigned int cmd, struct flock __user *user)
1179 {
1180 return -EACCES;
1181 }
1182
1183 #if BITS_PER_LONG == 32
1184 static inline int fcntl_getlk64( struct file *file, struct flock64 __user *user)
1185 {
1186 return -EINVAL;
1187 }
1188
1189 static inline int fcntl_setlk64( unsigned int fd, struct file *file,
1190 unsigned int cmd, struct flock64 __user *user)
1191 {
1192 return -EACCES;
1193 }
1194 #endif
1195 static inline int fcntl_setlease( unsigned int fd, struct file *filp, long arg)
1196 {
1197 return 0;
1198 }
1199
1200 static inline int fcntl_getlease( struct file *filp)
1201 {
1202 return 0;
1203 }
1204
1205 static inline void locks_init_lock( struct file_lock *fl)
1206 {
1207 return;
1208 }
1209
1210 static inline void __locks_copy_lock( struct file_lock * new, struct file_lock *fl)
1211 {
1212 return;
1213 }
1214
1215 static inline void locks_copy_lock( struct file_lock * new, struct file_lock *fl)
1216 {
1217 return;
1218 }
1219
1220 static inline void locks_remove_posix( struct file *filp, fl_owner_t owner)
1221 {
1222 return;
1223 }
1224
1225 static inline void locks_remove_flock( struct file *filp)
1226 {
1227 return;
1228 }
1229
1230 static inline void posix_test_lock( struct file *filp, struct file_lock *fl)
1231 {
1232 return;
1233 }
1234
1235 static inline int posix_lock_file( struct file *filp, struct file_lock *fl,
1236 struct file_lock *conflock)
1237 {
1238 return -ENOLCK;
1239 }
1240
1241 static inline int posix_lock_file_wait( struct file *filp, struct file_lock *fl)
1242 {
1243 return -ENOLCK;
1244 }
1245
1246 static inline int posix_unblock_lock( struct file *filp,
1247 struct file_lock *waiter)
1248 {
1249 return -ENOENT;
1250 }
1251
1252 static inline int vfs_test_lock( struct file *filp, struct file_lock *fl)
1253 {
1254 return 0;
1255 }
1256
1257 static inline int vfs_lock_file( struct file *filp, unsigned int cmd,
1258 struct file_lock *fl, struct file_lock *conf)
1259 {
1260 return -ENOLCK;
1261 }
1262
1263 static inline int vfs_cancel_lock( struct file *filp, struct file_lock *fl)
1264 {
1265 return 0;
1266 }
1267
1268 static inline int flock_lock_file_wait( struct file *filp,
1269 struct file_lock *request)
1270 {
1271 return -ENOLCK;
1272 }
1273
1274 static inline int __break_lease( struct inode *inode, unsigned int mode)
1275 {
1276 return 0;
1277 }
1278
1279 static inline void lease_get_mtime( struct inode *inode, struct timespec *time)
1280 {
1281 return;
1282 }
1283
1284 static inline int generic_setlease( struct file *filp, long arg,
1285 struct file_lock **flp)
1286 {
1287 return -EINVAL;
1288 }
1289
1290 static inline int vfs_setlease( struct file *filp, long arg,
1291 struct file_lock **lease)
1292 {
1293 return -EINVAL;
1294 }
1295
1296 static inline int lease_modify( struct file_lock **before, int arg)
1297 {
1298 return -EINVAL;
1299 }
1300
1301 static inline int lock_may_read( struct inode *inode, loff_t start,
1302 unsigned long len)
1303 {
1304 return 1;
1305 }
1306
1307 static inline int lock_may_write( struct inode *inode, loff_t start,
1308 unsigned long len)
1309 {
1310 return 1;
1311 }
1312
1313 static inline void lock_flocks( void)
1314 {
1315 }
1316
1317 static inline void unlock_flocks( void)
1318 {
1319 }
1320
1321 #endif /* !CONFIG_FILE_LOCKING */
1322
1323
1324 struct fasync_struct {
1325 spinlock_t fa_lock;
1326 int magic;
1327 int fa_fd;
1328 struct fasync_struct *fa_next; /* singly linked list */
1329 struct file *fa_file;
1330 struct rcu_head fa_rcu;
1331 };
1332
1333 #define FASYNC_MAGIC 0x4601
1334
1335 /* SMP safe fasync helpers: */
1336 extern int fasync_helper( int, struct file *, int, struct fasync_struct **);
1337 extern struct fasync_struct *fasync_insert_entry( int, struct file *, struct fasync_struct **, struct fasync_struct *);
1338 extern int fasync_remove_entry( struct file *, struct fasync_struct **);
1339 extern struct fasync_struct *fasync_alloc( void);
1340 extern void fasync_free( struct fasync_struct *);
1341
1342 /* can be called from interrupts */
1343 extern void kill_fasync( struct fasync_struct **, int, int);
1344
1345 extern int __f_setown( struct file *filp, struct pid *, enum pid_type, int force);
1346 extern int f_setown( struct file *filp, unsigned long arg, int force);
1347 extern void f_delown( struct file *filp);
1348 extern pid_t f_getown( struct file *filp);
1349 extern int send_sigurg( struct fown_struct *fown);
1350
1351 /*
1352 * Umount options
1353 */
1354
1355 #define MNT_FORCE 0x00000001 /* Attempt to forcibily umount */
1356 #define MNT_DETACH 0x00000002 /* Just detach from the tree */
1357 #define MNT_EXPIRE 0x00000004 /* Mark for expiry */
1358 #define UMOUNT_NOFOLLOW 0x00000008 /* Don't follow symlink on umount */
1359 #define UMOUNT_UNUSED 0x80000000 /* Flag guaranteed to be unused */
1360
1361 extern struct list_head super_blocks;
1362 extern spinlock_t sb_lock;
1363
1364 struct super_block {
1365 struct list_head s_list; /* Keep this first */
1366 dev_t s_dev; /* search index; _not_ kdev_t */
1367 unsigned char s_dirt;
1368 unsigned char s_blocksize_bits;
1369 unsigned long s_blocksize;
1370 loff_t s_maxbytes; /* Max file size */
1371 struct file_system_type *s_type;
1372 const struct super_operations *s_op;
1373 const struct dquot_operations *dq_op;
1374 const struct quotactl_ops *s_qcop;
1375 const struct export_operations *s_export_op;
1376 unsigned long s_flags;
1377 unsigned long s_magic;
1378 struct dentry *s_root;
1379 struct rw_semaphore s_umount;
1380 struct mutex s_lock;
1381 int s_count;
1382 atomic_t s_active;
1383 #ifdef CONFIG_SECURITY
1384 void *s_security;
1385 #endif
1386 const struct xattr_handler **s_xattr;
1387
1388 struct list_head s_inodes; /* all inodes */
1389 struct hlist_bl_head s_anon; /* anonymous dentries for (nfs) exporting */
1390 #ifdef CONFIG_SMP
1391 struct list_head __percpu *s_files;
1392 #else
1393 struct list_head s_files;
1394 #endif
1395 /* s_dentry_lru, s_nr_dentry_unused protected by dcache.c lru locks */
1396 struct list_head s_dentry_lru; /* unused dentry lru */
1397 int s_nr_dentry_unused; /* # of dentry on lru */
1398
1399 struct block_device *s_bdev;
1400 struct backing_dev_info *s_bdi;
1401 struct mtd_info *s_mtd;
1402 struct list_head s_instances;
1403 struct quota_info s_dquot; /* Diskquota specific options */
1404
1405 int s_frozen;
1406 wait_queue_head_t s_wait_unfrozen;
1407
1408 char s_id[ 32]; /* Informational name */
1409 u8 s_uuid[ 16]; /* UUID */
1410
1411 void *s_fs_info; /* Filesystem private info */
1412 fmode_t s_mode;
1413
1414 /* Granularity of c/m/atime in ns.
1415 Cannot be worse than a second */
1416 u32 s_time_gran;
1417
1418 /*
1419 * The next field is for VFS *only*. No filesystems have any business
1420 * even looking at it. You had been warned.
1421 */
1422 struct mutex s_vfs_rename_mutex; /* Kludge */
1423
1424 /*
1425 * Filesystem subtype. If non-empty the filesystem type field
1426 * in /proc/mounts will be "type.subtype"
1427 */
1428 char *s_subtype;
1429
1430 /*
1431 * Saved mount options for lazy filesystems using
1432 * generic_show_options()
1433 */
1434 char __rcu *s_options;
1435 const struct dentry_operations *s_d_op; /* default d_op for dentries */
1436
1437 /*
1438 * Saved pool identifier for cleancache (-1 means none)
1439 */
1440 int cleancache_poolid;
1441 };
1442
1443 extern struct timespec current_fs_time( struct super_block *sb);
1444
1445 /*
1446 * Snapshotting support.
1447 */
1448 enum {
1449 SB_UNFROZEN = 0,
1450 SB_FREEZE_WRITE = 1,
1451 SB_FREEZE_TRANS = 2,
1452 };
1453
1454 #define vfs_check_frozen(sb, level) \
1455 wait_event((sb)->s_wait_unfrozen, ((sb)->s_frozen < (level)))
1456
1457 #define get_fs_excl() atomic_inc(¤t->fs_excl)
1458 #define put_fs_excl() atomic_dec(¤t->fs_excl)
1459 #define has_fs_excl() atomic_read(¤t->fs_excl)
1460
1461 /*
1462 * until VFS tracks user namespaces for inodes, just make all files
1463 * belong to init_user_ns
1464 */
1465 extern struct user_namespace init_user_ns;
1466 #define inode_userns(inode) (&init_user_ns)
1467 extern bool inode_owner_or_capable( const struct inode *inode);
1468
1469 /* not quite ready to be deprecated, but... */
1470 extern void lock_super( struct super_block *);
1471 extern void unlock_super( struct super_block *);
1472
1473 /*
1474 * VFS helper functions..
1475 */
1476 extern int vfs_create( struct inode *, struct dentry *, int, struct nameidata *);
1477 extern int vfs_mkdir( struct inode *, struct dentry *, int);
1478 extern int vfs_mknod( struct inode *, struct dentry *, int, dev_t);
1479 extern int vfs_symlink( struct inode *, struct dentry *, const char *);
1480 extern int vfs_link( struct dentry *, struct inode *, struct dentry *);
1481 extern int vfs_rmdir( struct inode *, struct dentry *);
1482 extern int vfs_unlink( struct inode *, struct dentry *);
1483 extern int vfs_rename( struct inode *, struct dentry *, struct inode *, struct dentry *);
1484
1485 /*
1486 * VFS dentry helper functions.
1487 */
1488 extern void dentry_unhash( struct dentry *dentry);
1489
1490 /*
1491 * VFS file helper functions.
1492 */
1493 extern int file_permission( struct file *, int);
1494 extern void inode_init_owner( struct inode *inode, const struct inode *dir,
1495 mode_t mode);
1496 /*
1497 * VFS FS_IOC_FIEMAP helper definitions.
1498 */
1499 struct fiemap_extent_info {
1500 unsigned int fi_flags; /* Flags as passed from user */
1501 unsigned int fi_extents_mapped; /* Number of mapped extents */
1502 unsigned int fi_extents_max; /* Size of fiemap_extent array */
1503 struct fiemap_extent __user *fi_extents_start; /* Start of
1504 fiemap_extent array */
1505 };
1506 int fiemap_fill_next_extent( struct fiemap_extent_info *info, u64 logical,
1507 u64 phys, u64 len, u32 flags);
1508 int fiemap_check_flags( struct fiemap_extent_info *fieinfo, u32 fs_flags);
1509
1510 /*
1511 * File types
1512 *
1513 * NOTE! These match bits 12..15 of stat.st_mode
1514 * (ie "(i_mode >> 12) & 15").
1515 */
1516 #define DT_UNKNOWN 0
1517 #define DT_FIFO 1
1518 #define DT_CHR 2
1519 #define DT_DIR 4
1520 #define DT_BLK 6
1521 #define DT_REG 8
1522 #define DT_LNK 10
1523 #define DT_SOCK 12
1524 #define DT_WHT 14
1525
1526 /*
1527 * This is the "filldir" function type, used by readdir() to let
1528 * the kernel specify what kind of dirent layout it wants to have.
1529 * This allows the kernel to read directories into kernel space or
1530 * to have different dirent layouts depending on the binary type.
1531 */
1532 typedef int (*filldir_t)( void *, const char *, int, loff_t, u64, unsigned);
1533 struct block_device_operations;
1534
1535 /* These macros are for out of kernel modules to test that
1536 * the kernel supports the unlocked_ioctl and compat_ioctl
1537 * fields in struct file_operations. */
1538 #define HAVE_COMPAT_IOCTL 1
1539 #define HAVE_UNLOCKED_IOCTL 1
1540
1541 /*
1542 * NOTE:
1543 * all file operations except setlease can be called without
1544 * the big kernel lock held in all filesystems.
1545 */
1546 struct file_operations {
1547 struct module *owner;
1548 loff_t (*llseek) ( struct file *, loff_t, int);
1549 ssize_t (*read) ( struct file *, char __user *, size_t, loff_t *);
1550 ssize_t (*write) ( struct file *, const char __user *, size_t, loff_t *);
1551 ssize_t (*aio_read) ( struct kiocb *, const struct iovec *, unsigned long, loff_t);
1552 ssize_t (*aio_write) ( struct kiocb *, const struct iovec *, unsigned long, loff_t);
1553 int (*readdir) ( struct file *, void *, filldir_t);
1554 unsigned int (*poll) ( struct file *, struct poll_table_struct *);
1555 long (*unlocked_ioctl) ( struct file *, unsigned int, unsigned long);
1556 long (*compat_ioctl) ( struct file *, unsigned int, unsigned long);
1557 int (*mmap) ( struct file *, struct vm_area_struct *);
1558 int (*open) ( struct inode *, struct file *);
1559 int (*flush) ( struct file *, fl_owner_t id);
1560 int (*release) ( struct inode *, struct file *);
1561 int (*fsync) ( struct file *, int datasync);
1562 int (*aio_fsync) ( struct kiocb *, int datasync);
1563 int (*fasync) ( int, struct file *, int);
1564 int (*lock) ( struct file *, int, struct file_lock *);
1565 ssize_t (*sendpage) ( struct file *, struct page *, int, size_t, loff_t *, int);
1566 unsigned long (*get_unmapped_area)( struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
1567 int (*check_flags)( int);
1568 int (*flock) ( struct file *, int, struct file_lock *);
1569 ssize_t (*splice_write)( struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
1570 ssize_t (*splice_read)( struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
1571 int (*setlease)( struct file *, long, struct file_lock **);
1572 long (*fallocate)( struct file *file, int mode, loff_t offset,
1573 loff_t len);
1574 };
1575
1576 #define IPERM_FLAG_RCU 0x0001
1577
1578 struct inode_operations {
1579 struct dentry * (*lookup) ( struct inode *, struct dentry *, struct nameidata *);
1580 void * (*follow_link) ( struct dentry *, struct nameidata *);
1581 int (*permission) ( struct inode *, int, unsigned int);
1582 int (*check_acl)( struct inode *, int, unsigned int);
1583
1584 int (*readlink) ( struct dentry *, char __user *, int);
1585 void (*put_link) ( struct dentry *, struct nameidata *, void *);
1586
1587 int (*create) ( struct inode *, struct dentry *, int, struct nameidata *);
1588 int (*link) ( struct dentry *, struct inode *, struct dentry *);
1589 int (*unlink) ( struct inode *, struct dentry *);
1590 int (*symlink) ( struct inode *, struct dentry *, const char *);
1591 int (*mkdir) ( struct inode *, struct dentry *, int);
1592 int (*rmdir) ( struct inode *, struct dentry *);
1593 int (*mknod) ( struct inode *, struct dentry *, int,dev_t);
1594 int (*rename) ( struct inode *, struct dentry *,
1595 struct inode *, struct dentry *);
1596 void (*truncate) ( struct inode *);
1597 int (*setattr) ( struct dentry *, struct iattr *);
1598 int (*getattr) ( struct vfsmount *mnt, struct dentry *, struct kstat *);
1599 int (*setxattr) ( struct dentry *, const char *, const void *,size_t, int);
1600 ssize_t (*getxattr) ( struct dentry *, const char *, void *, size_t);
1601 ssize_t (*listxattr) ( struct dentry *, char *, size_t);
1602 int (*removexattr) ( struct dentry *, const char *);
1603 void (*truncate_range)( struct inode *, loff_t, loff_t);
1604 int (*fiemap)( struct inode *, struct fiemap_extent_info *, u64 start,
1605 u64 len);
1606 } ____cacheline_aligned;
1607
1608 struct seq_file;
1609
1610 ssize_t rw_copy_check_uvector( int type, const struct iovec __user * uvector,
1611 unsigned long nr_segs, unsigned long fast_segs,
1612 struct iovec *fast_pointer,
1613 struct iovec **ret_pointer);
1614
1615 extern ssize_t vfs_read( struct file *, char __user *, size_t, loff_t *);
1616 extern ssize_t vfs_write( struct file *, const char __user *, size_t, loff_t *);
1617 extern ssize_t vfs_readv( struct file *, const struct iovec __user *,
1618 unsigned long, loff_t *);
1619 extern ssize_t vfs_writev( struct file *, const struct iovec __user *,
1620 unsigned long, loff_t *);
1621
1622 struct super_operations {
1623 struct inode *(*alloc_inode)( struct super_block *sb);
1624 void (*destroy_inode)( struct inode *);
1625
1626 void (*dirty_inode) ( struct inode *, int flags);
1627 int (*write_inode) ( struct inode *, struct writeback_control *wbc);
1628 int (*drop_inode) ( struct inode *);
1629 void (*evict_inode) ( struct inode *);
1630 void (*put_super) ( struct super_block *);
1631 void (*write_super) ( struct super_block *);
1632 int (*sync_fs)( struct super_block *sb, int wait);
1633 int (*freeze_fs) ( struct super_block *);
1634 int (*unfreeze_fs) ( struct super_block *);
1635 int (*statfs) ( struct dentry *, struct kstatfs *);
1636 int (*remount_fs) ( struct super_block *, int *, char *);
1637 void (*umount_begin) ( struct super_block *);
1638
1639 int (*show_options)( struct seq_file *, struct vfsmount *);
1640 int (*show_devname)( struct seq_file *, struct vfsmount *);
1641 int (*show_path)( struct seq_file *, struct vfsmount *);
1642 int (*show_stats)( struct seq_file *, struct vfsmount *);
1643 #ifdef CONFIG_QUOTA
1644 ssize_t (*quota_read)( struct super_block *, int, char *, size_t, loff_t);
1645 ssize_t (*quota_write)( struct super_block *, int, const char *, size_t, loff_t);
1646 #endif
1647 int (*bdev_try_to_free_page)( struct super_block*, struct page*, gfp_t);
1648 };
1649
1650 /*
1651 * Inode state bits. Protected by inode->i_lock
1652 *
1653 * Three bits determine the dirty state of the inode, I_DIRTY_SYNC,
1654 * I_DIRTY_DATASYNC and I_DIRTY_PAGES.
1655 *
1656 * Four bits define the lifetime of an inode. Initially, inodes are I_NEW,
1657 * until that flag is cleared. I_WILL_FREE, I_FREEING and I_CLEAR are set at
1658 * various stages of removing an inode.
1659 *
1660 * Two bits are used for locking and completion notification, I_NEW and I_SYNC.
1661 *
1662 * I_DIRTY_SYNC Inode is dirty, but doesn't have to be written on
1663 * fdatasync(). i_atime is the usual cause.
1664 * I_DIRTY_DATASYNC Data-related inode changes pending. We keep track of
1665 * these changes separately from I_DIRTY_SYNC so that we
1666 * don't have to write inode on fdatasync() when only
1667 * mtime has changed in it.
1668 * I_DIRTY_PAGES Inode has dirty pages. Inode itself may be clean.
1669 * I_NEW Serves as both a mutex and completion notification.
1670 * New inodes set I_NEW. If two processes both create
1671 * the same inode, one of them will release its inode and
1672 * wait for I_NEW to be released before returning.
1673 * Inodes in I_WILL_FREE, I_FREEING or I_CLEAR state can
1674 * also cause waiting on I_NEW, without I_NEW actually
1675 * being set. find_inode() uses this to prevent returning
1676 * nearly-dead inodes.
1677 * I_WILL_FREE Must be set when calling write_inode_now() if i_count
1678 * is zero. I_FREEING must be set when I_WILL_FREE is
1679 * cleared.
1680 * I_FREEING Set when inode is about to be freed but still has dirty
1681 * pages or buffers attached or the inode itself is still
1682 * dirty.
1683 * I_CLEAR Added by end_writeback(). In this state the inode is clean
1684 * and can be destroyed. Inode keeps I_FREEING.
1685 *
1686 * Inodes that are I_WILL_FREE, I_FREEING or I_CLEAR are
1687 * prohibited for many purposes. iget() must wait for
1688 * the inode to be completely released, then create it
1689 * anew. Other functions will just ignore such inodes,
1690 * if appropriate. I_NEW is used for waiting.
1691 *
1692 * I_SYNC Synchonized write of dirty inode data. The bits is
1693 * set during data writeback, and cleared with a wakeup
1694 * on the bit address once it is done.
1695 *
1696 * Q: What is the difference between I_WILL_FREE and I_FREEING?
1697 */
1698 #define I_DIRTY_SYNC ( 1 << 0)
1699 #define I_DIRTY_DATASYNC ( 1 << 1)
1700 #define I_DIRTY_PAGES ( 1 << 2)
1701 #define __I_NEW 3
1702 #define I_NEW ( 1 << __I_NEW)
1703 #define I_WILL_FREE ( 1 << 4)
1704 #define I_FREEING ( 1 << 5)
1705 #define I_CLEAR ( 1 << 6)
1706 #define __I_SYNC 7
1707 #define I_SYNC ( 1 << __I_SYNC)
1708 #define I_REFERENCED ( 1 << 8)
1709
1710 #define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES)
1711
1712 extern void __mark_inode_dirty( struct inode *, int);
1713 static inline void mark_inode_dirty( struct inode *inode)
1714 {
1715 __mark_inode_dirty(inode, I_DIRTY);
1716 }
1717
1718 static inline void mark_inode_dirty_sync( struct inode *inode)
1719 {
1720 __mark_inode_dirty(inode, I_DIRTY_SYNC);
1721 }
1722
1723 /**
1724 * inc_nlink - directly increment an inode's link count
1725 * @inode: inode
1726 *
1727 * This is a low-level filesystem helper to replace any
1728 * direct filesystem manipulation of i_nlink. Currently,
1729 * it is only here for parity with dec_nlink().
1730 */
1731 static inline void inc_nlink( struct inode *inode)
1732 {
1733 inode->i_nlink++;
1734 }
1735
1736 static inline void inode_inc_link_count( struct inode *inode)
1737 {
1738 inc_nlink(inode);
1739 mark_inode_dirty(inode);
1740 }
1741
1742 /**
1743 * drop_nlink - directly drop an inode's link count
1744 * @inode: inode
1745 *
1746 * This is a low-level filesystem helper to replace any
1747 * direct filesystem manipulation of i_nlink. In cases
1748 * where we are attempting to track writes to the
1749 * filesystem, a decrement to zero means an imminent
1750 * write when the file is truncated and actually unlinked
1751 * on the filesystem.
1752 */
1753 static inline void drop_nlink( struct inode *inode)
1754 {
1755 inode->i_nlink--;
1756 }
1757
1758 /**
1759 * clear_nlink - directly zero an inode's link count
1760 * @inode: inode
1761 *
1762 * This is a low-level filesystem helper to replace any
1763 * direct filesystem manipulation of i_nlink. See
1764 * drop_nlink() for why we care about i_nlink hitting zero.
1765 */
1766 static inline void clear_nlink( struct inode *inode)
1767 {
1768 inode->i_nlink = 0;
1769 }
1770
1771 static inline void inode_dec_link_count( struct inode *inode)
1772 {
1773 drop_nlink(inode);
1774 mark_inode_dirty(inode);
1775 }
1776
1777 /**
1778 * inode_inc_iversion - increments i_version
1779 * @inode: inode that need to be updated
1780 *
1781 * Every time the inode is modified, the i_version field will be incremented.
1782 * The filesystem has to be mounted with i_version flag
1783 */
1784
1785 static inline void inode_inc_iversion( struct inode *inode)
1786 {
1787 spin_lock(&inode->i_lock);
1788 inode->i_version++;
1789 spin_unlock(&inode->i_lock);
1790 }
1791
1792 extern void touch_atime( struct vfsmount *mnt, struct dentry *dentry);
1793 static inline void file_accessed( struct file *file)
1794 {
1795 if (!(file->f_flags & O_NOATIME))
1796 touch_atime(file->f_path.mnt, file->f_path.dentry);
1797 }
1798
1799 int sync_inode( struct inode *inode, struct writeback_control *wbc);
1800 int sync_inode_metadata( struct inode *inode, int wait);
1801
1802 struct file_system_type {
1803 const char *name;
1804 int fs_flags;
1805 struct dentry *(*mount) ( struct file_system_type *, int,
1806 const char *, void *);
1807 void (*kill_sb) ( struct super_block *);
1808 struct module *owner;
1809 struct file_system_type * next;
1810 struct list_head fs_supers;
1811
1812 struct lock_class_key s_lock_key;
1813 struct lock_class_key s_umount_key;
1814 struct lock_class_key s_vfs_rename_key;
1815
1816 struct lock_class_key i_lock_key;
1817 struct lock_class_key i_mutex_key;
1818 struct lock_class_key i_mutex_dir_key;
1819 struct lock_class_key i_alloc_sem_key;
1820 };
1821
1822 extern struct dentry *mount_ns( struct file_system_type *fs_type, int flags,
1823 void *data, int (*fill_super)( struct super_block *, void *, int));
1824 extern struct dentry *mount_bdev( struct file_system_type *fs_type,
1825 int flags, const char *dev_name, void *data,
1826 int (*fill_super)( struct super_block *, void *, int));
1827 extern struct dentry *mount_single( struct file_system_type *fs_type,
1828 int flags, void *data,
1829 int (*fill_super)( struct super_block *, void *, int));
1830 extern struct dentry *mount_nodev( struct file_system_type *fs_type,
1831 int flags, void *data,
1832 int (*fill_super)( struct super_block *, void *, int));
1833 void generic_shutdown_super( struct super_block *sb);
1834 void kill_block_super( struct super_block *sb);
1835 void kill_anon_super( struct super_block *sb);
1836 void kill_litter_super( struct super_block *sb);
1837 void deactivate_super( struct super_block *sb);
1838 void deactivate_locked_super( struct super_block *sb);
1839 int set_anon_super( struct super_block *s, void *data);
1840 struct super_block *sget( struct file_system_type *type,
1841 int (*test)( struct super_block *, void *),
1842 int (*set)( struct super_block *, void *),
1843 void *data);
1844 extern struct dentry *mount_pseudo( struct file_system_type *, char *,
1845 const struct super_operations *ops,
1846 const struct dentry_operations *dops,
1847 unsigned long);
1848
1849 static inline void sb_mark_dirty( struct super_block *sb)
1850 {
1851 sb->s_dirt = 1;
1852 }
1853 static inline void sb_mark_clean( struct super_block *sb)
1854 {
1855 sb->s_dirt = 0;
1856 }
1857 static inline int sb_is_dirty( struct super_block *sb)
1858 {
1859 return sb->s_dirt;
1860 }
1861
1862 /* Alas, no aliases. Too much hassle with bringing module.h everywhere */
1863 #define fops_get(fops) \
1864 (((fops) && try_module_get((fops)->owner) ? (fops) : NULL))
1865 #define fops_put(fops) \
1866 do { if (fops) module_put((fops)->owner); } while( 0)
1867
1868 extern int register_filesystem( struct file_system_type *);
1869 extern int unregister_filesystem( struct file_system_type *);
1870 extern struct vfsmount *kern_mount_data( struct file_system_type *, void *data);
1871 #define kern_mount(type) kern_mount_data(type, NULL)
1872 extern int may_umount_tree( struct vfsmount *);
1873 extern int may_umount( struct vfsmount *);
1874 extern long do_mount( char *, char *, char *, unsigned long, void *);
1875 extern struct vfsmount *collect_mounts( struct path *);
1876 extern void drop_collected_mounts( struct vfsmount *);
1877 extern int iterate_mounts( int (*)( struct vfsmount *, void *), void *,
1878 struct vfsmount *);
1879 extern int vfs_statfs( struct path *, struct kstatfs *);
1880 extern int user_statfs( const char __user *, struct kstatfs *);
1881 extern int fd_statfs( int, struct kstatfs *);
1882 extern int statfs_by_dentry( struct dentry *, struct kstatfs *);
1883 extern int freeze_super( struct super_block *super);
1884 extern int thaw_super( struct super_block *super);
1885
1886 extern int current_umask( void);
1887
1888 /* /sys/fs */
1889 extern struct kobject *fs_kobj;
1890
1891 #define MAX_RW_COUNT (INT_MAX & PAGE_CACHE_MASK)
1892 extern int rw_verify_area( int, struct file *, loff_t *, size_t);
1893
1894 #define FLOCK_VERIFY_READ 1
1895 #define FLOCK_VERIFY_WRITE 2
1896
1897 #ifdef CONFIG_FILE_LOCKING
1898 extern int locks_mandatory_locked( struct inode *);
1899 extern int locks_mandatory_area( int, struct inode *, struct file *, loff_t, size_t);
1900
1901 /*
1902 * Candidates for mandatory locking have the setgid bit set
1903 * but no group execute bit - an otherwise meaningless combination.
1904 */
1905
1906 static inline int __mandatory_lock( struct inode *ino)
1907 {
1908 return (ino->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID;
1909 }
1910
1911 /*
1912 * ... and these candidates should be on MS_MANDLOCK mounted fs,
1913 * otherwise these will be advisory locks
1914 */
1915
1916 static inline int mandatory_lock( struct inode *ino)
1917 {
1918 return IS_MANDLOCK(ino) && __mandatory_lock(ino);
1919 }
1920
1921 static inline int locks_verify_locked( struct inode *inode)
1922 {
1923 if (mandatory_lock(inode))
1924 return locks_mandatory_locked(inode);
1925 return 0;
1926 }
1927
1928 static inline int locks_verify_truncate( struct inode *inode,
1929 struct file *filp,
1930 loff_t size)
1931 {
1932 if (inode->i_flock && mandatory_lock(inode))
1933 return locks_mandatory_area(
1934 FLOCK_VERIFY_WRITE, inode, filp,
1935 size < inode->i_size ? size : inode->i_size,
1936 (size < inode->i_size ? inode->i_size - size
1937 : size - inode->i_size)
1938 );
1939 return 0;
1940 }
1941
1942 static inline int break_lease( struct inode *inode, unsigned int mode)
1943 {
1944 if (inode->i_flock)
1945 return __break_lease(inode, mode);
1946 return 0;
1947 }
1948 #else /* !CONFIG_FILE_LOCKING */
1949 static inline int locks_mandatory_locked( struct inode *inode)
1950 {
1951 return 0;
1952 }
1953
1954 static inline int locks_mandatory_area( int rw, struct inode *inode,
1955 struct file *filp, loff_t offset,
1956 size_t count)
1957 {
1958 return 0;
1959 }
1960
1961 static inline int __mandatory_lock( struct inode *inode)
1962 {
1963 return 0;
1964 }
1965
1966 static inline int mandatory_lock( struct inode *inode)
1967 {
1968 return 0;
1969 }
1970
1971 static inline int locks_verify_locked( struct inode *inode)
1972 {
1973 return 0;
1974 }
1975
1976 static inline int locks_verify_truncate( struct inode *inode, struct file *filp,
1977 size_t size)
1978 {
1979 return 0;
1980 }
1981
1982 static inline int break_lease( struct inode *inode, unsigned int mode)
1983 {
1984 return 0;
1985 }
1986
1987 #endif /* CONFIG_FILE_LOCKING */
1988
1989 /* fs/open.c */
1990
1991 extern int do_truncate( struct dentry *, loff_t start, unsigned int time_attrs,
1992 struct file *filp);
1993 extern int do_fallocate( struct file *file, int mode, loff_t offset,
1994 loff_t len);
1995 extern long do_sys_open( int dfd, const char __user *filename, int flags,
1996 int mode);
1997 extern struct file *filp_open( const char *, int, int);
1998 extern struct file *file_open_root( struct dentry *, struct vfsmount *,
1999 const char *, int);
2000 extern struct file * dentry_open( struct dentry *, struct vfsmount *, int,
2001 const struct cred *);
2002 extern int filp_close( struct file *, fl_owner_t id);
2003 extern char * getname( const char __user *);
2004
2005 /* fs/ioctl.c */
2006
2007 extern int ioctl_preallocate( struct file *filp, void __user *argp);
2008
2009 /* fs/dcache.c */
2010 extern void __init vfs_caches_init_early( void);
2011 extern void __init vfs_caches_init( unsigned long);
2012
2013 extern struct kmem_cache *names_cachep;
2014
2015 #define __getname_gfp(gfp) kmem_cache_alloc(names_cachep, (gfp))
2016 #define __getname() __getname_gfp(GFP_KERNEL)
2017 #define __putname(name) kmem_cache_free(names_cachep, ( void *)(name))
2018 #ifndef CONFIG_AUDITSYSCALL
2019 #define putname(name) __putname(name)
2020 #else
2021 extern void putname( const char *name);
2022 #endif
2023
2024 #ifdef CONFIG_BLOCK
2025 extern int register_blkdev( unsigned int, const char *);
2026 extern void unregister_blkdev( unsigned int, const char *);
2027 extern struct block_device *bdget(dev_t);
2028 extern struct block_device *bdgrab( struct block_device *bdev);
2029 extern void bd_set_size( struct block_device *, loff_t size);
2030 extern void bd_forget( struct inode *inode);
2031 extern void bdput( struct block_device *);
2032 extern void invalidate_bdev( struct block_device *);
2033 extern int sync_blockdev( struct block_device *bdev);
2034 extern struct super_block *freeze_bdev( struct block_device *);
2035 extern void emergency_thaw_all( void);
2036 extern int thaw_bdev( struct block_device *bdev, struct super_block *sb);
2037 extern int fsync_bdev( struct block_device *);
2038 #else
2039 static inline void bd_forget( struct inode *inode) {}
2040 static inline int sync_blockdev( struct block_device *bdev) { return 0; }
2041 static inline void invalidate_bdev( struct block_device *bdev) {}
2042
2043 static inline struct super_block *freeze_bdev( struct block_device *sb)
2044 {
2045 return NULL;
2046 }
2047
2048 static inline int thaw_bdev( struct block_device *bdev, struct super_block *sb)
2049 {
2050 return 0;
2051 }
2052 #endif
2053 extern int sync_filesystem( struct super_block *);
2054 extern const struct file_operations def_blk_fops;
2055 extern const struct file_operations def_chr_fops;
2056 extern const struct file_operations bad_sock_fops;
2057 extern const struct file_operations def_fifo_fops;
2058 #ifdef CONFIG_BLOCK
2059 extern int ioctl_by_bdev( struct block_device *, unsigned, unsigned long);
2060 extern int blkdev_ioctl( struct block_device *, fmode_t, unsigned, unsigned long);
2061 extern long compat_blkdev_ioctl( struct file *, unsigned, unsigned long);
2062 extern int blkdev_get( struct block_device *bdev, fmode_t mode, void *holder);
2063 extern struct block_device *blkdev_get_by_path( const char *path, fmode_t mode,
2064 void *holder);
2065 extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode,
2066 void *holder);
2067 extern int blkdev_put( struct block_device *bdev, fmode_t mode);
2068 #ifdef CONFIG_SYSFS
2069 extern int bd_link_disk_holder( struct block_device *bdev, struct gendisk *disk);
2070 extern void bd_unlink_disk_holder( struct block_device *bdev,
2071 struct gendisk *disk);
2072 #else
2073 static inline int bd_link_disk_holder( struct block_device *bdev,
2074 struct gendisk *disk)
2075 {
2076 return 0;
2077 }
2078 static inline void bd_unlink_disk_holder( struct block_device *bdev,
2079 struct gendisk *disk)
2080 {
2081 }
2082 #endif
2083 #endif
2084
2085 /* fs/char_dev.c */
2086 #define CHRDEV_MAJOR_HASH_SIZE 255
2087 extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *);
2088 extern int register_chrdev_region(dev_t, unsigned, const char *);
2089 extern int __register_chrdev( unsigned int major, unsigned int baseminor,
2090 unsigned int count, const char *name,
2091 const struct file_operations *fops);
2092 extern void __unregister_chrdev( unsigned int major, unsigned int baseminor,
2093 unsigned int count, const char *name);
2094 extern void unregister_chrdev_region(dev_t, unsigned);
2095 extern void chrdev_show( struct seq_file *,off_t);
2096
2097 static inline int register_chrdev( unsigned int major, const char *name,
2098 const struct file_operations *fops)
2099 {
2100 return __register_chrdev(major, 0, 256, name, fops);
2101 }
2102
2103 static inline void unregister_chrdev( unsigned int major, const char *name)
2104 {
2105 __unregister_chrdev(major, 0, 256, name);
2106 }
2107
2108 /* fs/block_dev.c */
2109 #define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */
2110 #define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */
2111
2112 #ifdef CONFIG_BLOCK
2113 #define BLKDEV_MAJOR_HASH_SIZE 255
2114 extern const char *__bdevname(dev_t, char *buffer);
2115 extern const char *bdevname( struct block_device *bdev, char *buffer);
2116 extern struct block_device *lookup_bdev( const char *);
2117 extern void blkdev_show( struct seq_file *,off_t);
2118
2119 #else
2120 #define BLKDEV_MAJOR_HASH_SIZE 0
2121 #endif
2122
2123 extern void init_special_inode( struct inode *, umode_t, dev_t);
2124
2125 /* Invalid inode operations -- fs/bad_inode.c */
2126 extern void make_bad_inode( struct inode *);
2127 extern int is_bad_inode( struct inode *);
2128
2129 extern const struct file_operations read_pipefifo_fops;
2130 extern const struct file_operations write_pipefifo_fops;
2131 extern const struct file_operations rdwr_pipefifo_fops;
2132
2133 extern int fs_may_remount_ro( struct super_block *);
2134
2135 #ifdef CONFIG_BLOCK
2136 /*
2137 * return READ, READA, or WRITE
2138 */
2139 #define bio_rw(bio) ((bio)->bi_rw & (RW_MASK | RWA_MASK))
2140
2141 /*
2142 * return data direction, READ or WRITE
2143 */
2144 #define bio_data_dir(bio) ((bio)->bi_rw & 1)
2145
2146 extern void check_disk_size_change( struct gendisk *disk,
2147 struct block_device *bdev);
2148 extern int revalidate_disk( struct gendisk *);
2149 extern int check_disk_change( struct block_device *);
2150 extern int __invalidate_device( struct block_device *, bool);
2151 extern int invalidate_partition( struct gendisk *, int);
2152 #endif
2153 unsigned long invalidate_mapping_pages( struct address_space *mapping,
2154 pgoff_t start, pgoff_t end);
2155
2156 static inline void invalidate_remote_inode( struct inode *inode)
2157 {
2158 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
2159 S_ISLNK(inode->i_mode))
2160 invalidate_mapping_pages(inode->i_mapping, 0, - 1);
2161 }
2162 extern int invalidate_inode_pages2( struct address_space *mapping);
2163 extern int invalidate_inode_pages2_range( struct address_space *mapping,
2164 pgoff_t start, pgoff_t end);
2165 extern int write_inode_now( struct inode *, int);
2166 extern int filemap_fdatawrite( struct address_space *);
2167 extern int filemap_flush( struct address_space *);
2168 extern int filemap_fdatawait( struct address_space *);
2169 extern int filemap_fdatawait_range( struct address_space *, loff_t lstart,
2170 loff_t lend);
2171 extern int filemap_write_and_wait( struct address_space *mapping);
2172 extern int filemap_write_and_wait_range( struct address_space *mapping,
2173 loff_t lstart, loff_t lend);
2174 extern int __filemap_fdatawrite_range( struct address_space *mapping,
2175 loff_t start, loff_t end, int sync_mode);
2176 extern int filemap_fdatawrite_range( struct address_space *mapping,
2177 loff_t start, loff_t end);
2178
2179 extern int vfs_fsync_range( struct file *file, loff_t start, loff_t end,
2180 int datasync);
2181 extern int vfs_fsync( struct file *file, int datasync);
2182 extern int generic_write_sync( struct file *file, loff_t pos, loff_t count);
2183 extern void sync_supers( void);
2184 extern void emergency_sync( void);
2185 extern void emergency_remount( void);
2186 #ifdef CONFIG_BLOCK
2187 extern sector_t bmap( struct inode *, sector_t);
2188 #endif
2189 extern int notify_change( struct dentry *, struct iattr *);
2190 extern int inode_permission( struct inode *, int);
2191 extern int generic_permission( struct inode *, int, unsigned int,
2192 int (*check_acl)( struct inode *, int, unsigned int));
2193
2194 static inline bool execute_ok( struct inode *inode)
2195 {
2196 return (inode->i_mode & S_IXUGO) || S_ISDIR(inode->i_mode);
2197 }
2198
2199 extern int get_write_access( struct inode *);
2200 extern int deny_write_access( struct file *);
2201 static inline void put_write_access( struct inode * inode)
2202 {
2203 atomic_dec(&inode->i_writecount);
2204 }
2205 static inline void allow_write_access( struct file *file)
2206 {
2207 if (file)
2208 atomic_inc(&file->f_path.dentry->d_inode->i_writecount);
2209 }
2210 #ifdef CONFIG_IMA
2211 static inline void i_readcount_dec( struct inode *inode)
2212 {
2213 BUG_ON(!atomic_read(&inode->i_readcount));
2214 atomic_dec(&inode->i_readcount);
2215 }
2216 static inline void i_readcount_inc( struct inode *inode)
2217 {
2218 atomic_inc(&inode->i_readcount);
2219 }
2220 #else
2221 static inline void i_readcount_dec( struct inode *inode)
2222 {
2223 return;
2224 }
2225 static inline void i_readcount_inc( struct inode *inode)
2226 {
2227 return;
2228 }
2229 #endif
2230 extern int do_pipe_flags( int *, int);
2231 extern struct file *create_read_pipe( struct file *f, int flags);
2232 extern struct file *create_write_pipe( int flags);
2233 extern void free_write_pipe( struct file *);
2234
2235 extern int kernel_read( struct file *, loff_t, char *, unsigned long);
2236 extern struct file * open_exec( const char *);
2237
2238 /* fs/dcache.c -- generic fs support functions */
2239 extern int is_subdir( struct dentry *, struct dentry *);
2240 extern int path_is_under( struct path *, struct path *);
2241 extern ino_t find_inode_number( struct dentry *, struct qstr *);
2242
2243 #include <linux/err.h>
2244
2245 /* needed for stackable file system support */
2246 extern loff_t default_llseek( struct file *file, loff_t offset, int origin);
2247
2248 extern loff_t vfs_llseek( struct file *file, loff_t offset, int origin);
2249
2250 extern int inode_init_always( struct super_block *, struct inode *);
2251 extern void inode_init_once( struct inode *);
2252 extern void address_space_init_once( struct address_space *mapping);
2253 extern void ihold( struct inode * inode);
2254 extern void iput( struct inode *);
2255 extern struct inode * igrab( struct inode *);
2256 extern ino_t iunique( struct super_block *, ino_t);
2257 extern int inode_needs_sync( struct inode *inode);
2258 extern int generic_delete_inode( struct inode *inode);
2259 extern int generic_drop_inode( struct inode *inode);
2260
2261 extern struct inode *ilookup5_nowait( struct super_block *sb,
2262 unsigned long hashval, int (*test)( struct inode *, void *),
2263 void *data);
2264 extern struct inode *ilookup5( struct super_block *sb, unsigned long hashval,
2265 int (*test)( struct inode *, void *), void *data);
2266 extern struct inode *ilookup( struct super_block *sb, unsigned long ino);
2267
2268 extern struct inode * iget5_locked( struct super_block *, unsigned long, int (*test)( struct inode *, void *), int (*set)( struct inode *, void *), void *);
2269 extern struct inode * iget_locked( struct super_block *, unsigned long);
2270 extern int insert_inode_locked4( struct inode *, unsigned long, int (*test)( struct inode *, void *), void *);
2271 extern int insert_inode_locked( struct inode *);
2272 extern void unlock_new_inode( struct inode *);
2273 extern unsigned int get_next_ino( void);
2274
2275 extern void __iget( struct inode * inode);
2276 extern void iget_failed( struct inode *);
2277 extern void end_writeback( struct inode *);
2278 extern void __destroy_inode( struct inode *);
2279 extern struct inode *new_inode( struct super_block *);
2280 extern void free_inode_nonrcu( struct inode *inode);
2281 extern int should_remove_suid( struct dentry *);
2282 extern int file_remove_suid( struct file *);
2283
2284 extern void __insert_inode_hash( struct inode *, unsigned long hashval);
2285 extern void remove_inode_hash( struct inode *);
2286 static inline void insert_inode_hash( struct inode *inode)
2287 {
2288 __insert_inode_hash(inode, inode->i_ino);
2289 }
2290 extern void inode_sb_list_add( struct inode *inode);
2291
2292 #ifdef CONFIG_BLOCK
2293 extern void submit_bio( int, struct bio *);
2294 extern int bdev_read_only( struct block_device *);
2295 #endif
2296 extern int set_blocksize( struct block_device *, int);
2297 extern int sb_set_blocksize( struct super_block *, int);
2298 extern int sb_min_blocksize( struct super_block *, int);
2299
2300 extern int generic_file_mmap( struct file *, struct vm_area_struct *);
2301 extern int generic_file_readonly_mmap( struct file *, struct vm_area_struct *);
2302 extern int file_read_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size);
2303 int generic_write_checks( struct file *file, loff_t *pos, size_t *count, int isblk);
2304 extern ssize_t generic_file_aio_read( struct kiocb *, const struct iovec *, unsigned long, loff_t);
2305 extern ssize_t __generic_file_aio_write( struct kiocb *, const struct iovec *, unsigned long,
2306 loff_t *);
2307 extern ssize_t generic_file_aio_write( struct kiocb *, const struct iovec *, unsigned long, loff_t);
2308 extern ssize_t generic_file_direct_write( struct kiocb *, const struct iovec *,
2309 unsigned long *, loff_t, loff_t *, size_t, size_t);
2310 extern ssize_t generic_file_buffered_write( struct kiocb *, const struct iovec *,
2311 unsigned long, loff_t, loff_t *, size_t, ssize_t);
2312 extern ssize_t do_sync_read( struct file *filp, char __user *buf, size_t len, loff_t *ppos);
2313 extern ssize_t do_sync_write( struct file *filp, const char __user *buf, size_t len, loff_t *ppos);
2314 extern int generic_segment_checks( const struct iovec *iov,
2315 unsigned long *nr_segs, size_t *count, int access_flags);
2316
2317 /* fs/block_dev.c */
2318 extern ssize_t blkdev_aio_write( struct kiocb *iocb, const struct iovec *iov,
2319 unsigned long nr_segs, loff_t pos);
2320 extern int blkdev_fsync( struct file *filp, int datasync);
2321
2322 /* fs/splice.c */
2323 extern ssize_t generic_file_splice_read( struct file *, loff_t *,
2324 struct pipe_inode_info *, size_t, unsigned int);
2325 extern ssize_t default_file_splice_read( struct file *, loff_t *,
2326 struct pipe_inode_info *, size_t, unsigned int);
2327 extern ssize_t generic_file_splice_write( struct pipe_inode_info *,
2328 struct file *, loff_t *, size_t, unsigned int);
2329 extern ssize_t generic_splice_sendpage( struct pipe_inode_info *pipe,
2330 struct file *out, loff_t *, size_t len, unsigned int flags);
2331 extern long do_splice_direct( struct file *in, loff_t *ppos, struct file *out,
2332 size_t len, unsigned int flags);
2333
2334 extern void
2335 file_ra_state_init( struct file_ra_state *ra, struct address_space *mapping);
2336 extern loff_t noop_llseek( struct file *file, loff_t offset, int origin);
2337 extern loff_t no_llseek( struct file *file, loff_t offset, int origin);
2338 extern loff_t generic_file_llseek( struct file *file, loff_t offset, int origin);
2339 extern loff_t generic_file_llseek_unlocked( struct file *file, loff_t offset,
2340 int origin);
2341 extern int generic_file_open( struct inode * inode, struct file * filp);
2342 extern int nonseekable_open( struct inode * inode, struct file * filp);
2343
2344 #ifdef CONFIG_FS_XIP
2345 extern ssize_t xip_file_read( struct file *filp, char __user *buf, size_t len,
2346 loff_t *ppos);
2347 extern int xip_file_mmap( struct file * file, struct vm_area_struct * vma);
2348 extern ssize_t xip_file_write( struct file *filp, const char __user *buf,
2349 size_t len, loff_t *ppos);
2350 extern int xip_truncate_page( struct address_space *mapping, loff_t from);
2351 #else
2352 static inline int xip_truncate_page( struct address_space *mapping, loff_t from)
2353 {
2354 return 0;
2355 }
2356 #endif
2357
2358 #ifdef CONFIG_BLOCK
2359 typedef void (dio_submit_t)( int rw, struct bio *bio, struct inode *inode,
2360 loff_t file_offset);
2361
2362 enum {
2363 /* need locking between buffered and direct access */
2364 DIO_LOCKING = 0x01,
2365
2366 /* filesystem does not support filling holes */
2367 DIO_SKIP_HOLES = 0x02,
2368 };
2369
2370 void dio_end_io( struct bio *bio, int error);
2371
2372 ssize_t __blockdev_direct_IO( int rw, struct kiocb *iocb, struct inode *inode,
2373 struct block_device *bdev, const struct iovec *iov, loff_t offset,
2374 unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
2375 dio_submit_t submit_io, int flags);
2376
2377 static inline ssize_t blockdev_direct_IO( int rw, struct kiocb *iocb,
2378 struct inode *inode, struct block_device *bdev, const struct iovec *iov,
2379 loff_t offset, unsigned long nr_segs, get_block_t get_block,
2380 dio_iodone_t end_io)
2381 {
2382 return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
2383 nr_segs, get_block, end_io, NULL,
2384 DIO_LOCKING | DIO_SKIP_HOLES);
2385 }
2386 #endif
2387
2388 extern const struct file_operations generic_ro_fops;
2389
2390 #define special_file(m) (S_ISCHR(m)||S_ISBLK(m)||S_ISFIFO(m)||S_ISSOCK(m))
2391
2392 extern int vfs_readlink( struct dentry *, char __user *, int, const char *);
2393 extern int vfs_follow_link( struct nameidata *, const char *);
2394 extern int page_readlink( struct dentry *, char __user *, int);
2395 extern void *page_follow_link_light( struct dentry *, struct nameidata *);
2396 extern void page_put_link( struct dentry *, struct nameidata *, void *);
2397 extern int __page_symlink( struct inode *inode, const char *symname, int len,
2398 int nofs);
2399 extern int page_symlink( struct inode *inode, const char *symname, int len);
2400 extern const struct inode_operations page_symlink_inode_operations;
2401 extern int generic_readlink( struct dentry *, char __user *, int);
2402 extern void generic_fillattr( struct inode *, struct kstat *);
2403 extern int vfs_getattr( struct vfsmount *, struct dentry *, struct kstat *);
2404 void __inode_add_bytes( struct inode *inode, loff_t bytes);
2405 void inode_add_bytes( struct inode *inode, loff_t bytes);
2406 void inode_sub_bytes( struct inode *inode, loff_t bytes);
2407 loff_t inode_get_bytes( struct inode *inode);
2408 void inode_set_bytes( struct inode *inode, loff_t bytes);
2409
2410 extern int vfs_readdir( struct file *, filldir_t, void *);
2411
2412 extern int vfs_stat( const char __user *, struct kstat *);
2413 extern int vfs_lstat( const char __user *, struct kstat *);
2414 extern int vfs_fstat( unsigned int, struct kstat *);
2415 extern int vfs_fstatat( int , const char __user *, struct kstat *, int);
2416
2417 extern int do_vfs_ioctl( struct file *filp, unsigned int fd, unsigned int cmd,
2418 unsigned long arg);
2419 extern int __generic_block_fiemap( struct inode *inode,
2420 struct fiemap_extent_info *fieinfo,
2421 loff_t start, loff_t len,
2422 get_block_t *get_block);
2423 extern int generic_block_fiemap( struct inode *inode,
2424 struct fiemap_extent_info *fieinfo, u64 start,
2425 u64 len, get_block_t *get_block);
2426
2427 extern void get_filesystem( struct file_system_type *fs);
2428 extern void put_filesystem( struct file_system_type *fs);
2429 extern struct file_system_type *get_fs_type( const char *name);
2430 extern struct super_block *get_super( struct block_device *);
2431 extern struct super_block *get_active_super( struct block_device *bdev);
2432 extern struct super_block *user_get_super(dev_t);
2433 extern void drop_super( struct super_block *sb);
2434 extern void iterate_supers( void (*)( struct super_block *, void *), void *);
2435
2436 extern int dcache_dir_open( struct inode *, struct file *);
2437 extern int dcache_dir_close( struct inode *, struct file *);
2438 extern loff_t dcache_dir_lseek( struct file *, loff_t, int);
2439 extern int dcache_readdir( struct file *, void *, filldir_t);
2440 extern int simple_setattr( struct dentry *, struct iattr *);
2441 extern int simple_getattr( struct vfsmount *, struct dentry *, struct kstat *);
2442 extern int simple_statfs( struct dentry *, struct kstatfs *);
2443 extern int simple_link( struct dentry *, struct inode *, struct dentry *);
2444 extern int simple_unlink( struct inode *, struct dentry *);
2445 extern int simple_rmdir( struct inode *, struct dentry *);
2446 extern int simple_rename( struct inode *, struct dentry *, struct inode *, struct dentry *);
2447 extern int noop_fsync( struct file *, int);
2448 extern int simple_empty( struct dentry *);
2449 extern int simple_readpage( struct file *file, struct page *page);
2450 extern int simple_write_begin( struct file *file, struct address_space *mapping,
2451 loff_t pos, unsigned len, unsigned flags,
2452 struct page **pagep, void **fsdata);
2453 extern int simple_write_end( struct file *file, struct address_space *mapping,
2454 loff_t pos, unsigned len, unsigned copied,
2455 struct page *page, void *fsdata);
2456
2457 extern struct dentry *simple_lookup( struct inode *, struct dentry *, struct nameidata *);
2458 extern ssize_t generic_read_dir( struct file *, char __user *, size_t, loff_t *);
2459 extern const struct file_operations simple_dir_operations;
2460 extern const struct inode_operations simple_dir_inode_operations;
2461 struct tree_descr { char *name; const struct file_operations *ops; int mode; };
2462 struct dentry *d_alloc_name( struct dentry *, const char *);
2463 extern int simple_fill_super( struct super_block *, unsigned long, struct tree_descr *);
2464 extern int simple_pin_fs( struct file_system_type *, struct vfsmount **mount, int *count);
2465 extern void simple_release_fs( struct vfsmount **mount, int *count);
2466
2467 extern ssize_t simple_read_from_buffer( void __user *to, size_t count,
2468 loff_t *ppos, const void *from, size_t available);
2469 extern ssize_t simple_write_to_buffer( void *to, size_t available, loff_t *ppos,
2470 const void __user *from, size_t count);
2471
2472 extern int generic_file_fsync( struct file *, int);
2473
2474 extern int generic_check_addressable( unsigned, u64);
2475
2476 #ifdef CONFIG_MIGRATION
2477 extern int buffer_migrate_page( struct address_space *,
2478 struct page *, struct page *);
2479 #else
2480 #define buffer_migrate_page NULL
2481 #endif
2482
2483 extern int inode_change_ok( const struct inode *, struct iattr *);
2484 extern int inode_newsize_ok( const struct inode *, loff_t offset);
2485 extern void setattr_copy( struct inode *inode, const struct iattr *attr);
2486
2487 extern void file_update_time( struct file *file);
2488
2489 extern int generic_show_options( struct seq_file *m, struct vfsmount *mnt);
2490 extern void save_mount_options( struct super_block *sb, char *options);
2491 extern void replace_mount_options( struct super_block *sb, char *options);
2492
2493 static inline ino_t parent_ino( struct dentry *dentry)
2494 {
2495 ino_t res;
2496
2497 /*
2498 * Don't strictly need d_lock here? If the parent ino could change
2499 * then surely we'd have a deeper race in the caller?
2500 */
2501 spin_lock(&dentry->d_lock);
2502 res = dentry->d_parent->d_inode->i_ino;
2503 spin_unlock(&dentry->d_lock);
2504 return res;
2505 }
2506
2507 /* Transaction based IO helpers */
2508
2509 /*
2510 * An argresp is stored in an allocated page and holds the
2511 * size of the argument or response, along with its content
2512 */
2513 struct simple_transaction_argresp {
2514 ssize_t size;
2515 char data[ 0];
2516 };
2517
2518 #define SIMPLE_TRANSACTION_LIMIT (PAGE_SIZE - sizeof( struct simple_transaction_argresp))
2519
2520 char *simple_transaction_get( struct file *file, const char __user *buf,
2521 size_t size);
2522 ssize_t simple_transaction_read( struct file *file, char __user *buf,
2523 size_t size, loff_t *pos);
2524 int simple_transaction_release( struct inode *inode, struct file *file);
2525
2526 void simple_transaction_set( struct file *file, size_t n);
2527
2528 /*
2529 * simple attribute files
2530 *
2531 * These attributes behave similar to those in sysfs:
2532 *
2533 * Writing to an attribute immediately sets a value, an open file can be
2534 * written to multiple times.
2535 *
2536 * Reading from an attribute creates a buffer from the value that might get
2537 * read with multiple read calls. When the attribute has been read
2538 * completely, no further read calls are possible until the file is opened
2539 * again.
2540 *
2541 * All attributes contain a text representation of a numeric value
2542 * that are accessed with the get() and set() functions.
2543 */
2544 #define DEFINE_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \
2545 static int __fops ## _open( struct inode *inode, struct file *file) \
2546 { \
2547 __simple_attr_check_format(__fmt, 0ull); \
2548 return simple_attr_open(inode, file, __get, __set, __fmt); \
2549 } \
2550 static const struct file_operations __fops = { \
2551 .owner = THIS_MODULE, \
2552 .open = __fops ## _open, \
2553 .release = simple_attr_release, \
2554 .read = simple_attr_read, \
2555 .write = simple_attr_write, \
2556 .llseek = generic_file_llseek, \
2557 };
2558
2559 static inline void __attribute__((format(printf, 1, 2)))
2560 __simple_attr_check_format( const char *fmt, ...)
2561 {
2562 /* don't do anything, just let the compiler check the arguments; */
2563 }
2564
2565 int simple_attr_open( struct inode *inode, struct file *file,
2566 int (*get)( void *, u64 *), int (*set)( void *, u64),
2567 const char *fmt);
2568 int simple_attr_release( struct inode *inode, struct file *file);
2569 ssize_t simple_attr_read( struct file *file, char __user *buf,
2570 size_t len, loff_t *ppos);
2571 ssize_t simple_attr_write( struct file *file, const char __user *buf,
2572 size_t len, loff_t *ppos);
2573
2574 struct ctl_table;
2575 int proc_nr_files( struct ctl_table *table, int write,
2576 void __user *buffer, size_t *lenp, loff_t *ppos);
2577 int proc_nr_dentry( struct ctl_table *table, int write,
2578 void __user *buffer, size_t *lenp, loff_t *ppos);
2579 int proc_nr_inodes( struct ctl_table *table, int write,
2580 void __user *buffer, size_t *lenp, loff_t *ppos);
2581 int __init get_filesystem_list( char *buf);
2582
2583 #define __FMODE_EXEC ((__force int) FMODE_EXEC)
2584 #define __FMODE_NONOTIFY ((__force int) FMODE_NONOTIFY)
2585
2586 #define ACC_MODE(x) ( "\004\002\006\006"[(x)&O_ACCMODE])
2587 #define OPEN_FMODE(flag) ((__force fmode_t)(((flag + 1) & O_ACCMODE) | \
2588 (flag & __FMODE_NONOTIFY)))
2589
2590 static inline int is_sxid(mode_t mode)
2591 {
2592 return (mode & S_ISUID) || ((mode & S_ISGID) && (mode & S_IXGRP));
2593 }
2594
2595 static inline void inode_has_no_xattr( struct inode *inode)
2596 {
2597 if (!is_sxid(inode->i_mode) && (inode->i_sb->s_flags & MS_NOSEC))
2598 inode->i_flags |= S_NOSEC;
2599 }
2600
2601 #endif /* __KERNEL__ */
2602 #endif /* _LINUX_FS_H */
1 /*
2 * pci.h
3 *
4 * PCI defines and function prototypes
5 * Copyright 1994, Drew Eckhardt
6 * Copyright 1997--1999 Martin Mares <mj@ucw.cz>
7 *
8 * For more information, please consult the following manuals (look at
9 * http://www.pcisig.com/ for how to get them):
10 *
11 * PCI BIOS Specification
12 * PCI Local Bus Specification
13 * PCI to PCI Bridge Specification
14 * PCI System Design Guide
15 */
16
17 #ifndef LINUX_PCI_H
18 #define LINUX_PCI_H
19
20 #include <linux/pci_regs.h> /* The pci register defines */
21
22 /*
23 * The PCI interface treats multi-function devices as independent
24 * devices. The slot/function address of each device is encoded
25 * in a single byte as follows:
26 *
27 * 7:3 = slot
28 * 2:0 = function
29 */
30 #define PCI_DEVFN(slot, func) ((((slot) & 0x1f) << 3) | ((func) & 0x07))
31 #define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f)
32 #define PCI_FUNC(devfn) ((devfn) & 0x07)
33
34 /* Ioctls for /proc/bus/pci/X/Y nodes. */
35 #define PCIIOC_BASE ( 'P' << 24 | 'C' << 16 | 'I' << 8)
36 #define PCIIOC_CONTROLLER (PCIIOC_BASE | 0x00) /* Get controller for PCI device. */
37 #define PCIIOC_MMAP_IS_IO (PCIIOC_BASE | 0x01) /* Set mmap state to I/O space. */
38 #define PCIIOC_MMAP_IS_MEM (PCIIOC_BASE | 0x02) /* Set mmap state to MEM space. */
39 #define PCIIOC_WRITE_COMBINE (PCIIOC_BASE | 0x03) /* Enable/disable write-combining. */
40
41 #ifdef __KERNEL__
42
43 #include <linux/mod_devicetable.h>
44
45 #include <linux/types.h>
46 #include <linux/init.h>
47 #include <linux/ioport.h>
48 #include <linux/list.h>
49 #include <linux/compiler.h>
50 #include <linux/errno.h>
51 #include <linux/kobject.h>
52 #include <asm/atomic.h>
53 #include <linux/device.h>
54 #include <linux/io.h>
55 #include <linux/irqreturn.h>
56
57 /* Include the ID list */
58 #include <linux/pci_ids.h>
59
60 /* pci_slot represents a physical slot */
61 struct pci_slot {
62 struct pci_bus *bus; /* The bus this slot is on */
63 struct list_head list; /* node in list of slots on this bus */
64 struct hotplug_slot *hotplug; /* Hotplug info (migrate over time) */
65 unsigned char number; /* PCI_SLOT(pci_dev->devfn) */
66 struct kobject kobj;
67 };
68
69 static inline const char *pci_slot_name( const struct pci_slot *slot)
70 {
71 return kobject_name(&slot->kobj);
72 }
73
74 /* File state for mmap()s on /proc/bus/pci/X/Y */
75 enum pci_mmap_state {
76 pci_mmap_io,
77 pci_mmap_mem
78 };
79
80 /* This defines the direction arg to the DMA mapping routines. */
81 #define PCI_DMA_BIDIRECTIONAL 0
82 #define PCI_DMA_TODEVICE 1
83 #define PCI_DMA_FROMDEVICE 2
84 #define PCI_DMA_NONE 3
85
86 /*
87 * For PCI devices, the region numbers are assigned this way:
88 */
89 enum {
90 /* #0-5: standard PCI resources */
91 PCI_STD_RESOURCES,
92 PCI_STD_RESOURCE_END = 5,
93
94 /* #6: expansion ROM resource */
95 PCI_ROM_RESOURCE,
96
97 /* device specific resources */
98 #ifdef CONFIG_PCI_IOV
99 PCI_IOV_RESOURCES,
100 PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1,
101 #endif
102
103 /* resources assigned to buses behind the bridge */
104 #define PCI_BRIDGE_RESOURCE_NUM 4
105
106 PCI_BRIDGE_RESOURCES,
107 PCI_BRIDGE_RESOURCE_END = PCI_BRIDGE_RESOURCES +
108 PCI_BRIDGE_RESOURCE_NUM - 1,
109
110 /* total resources associated with a PCI device */
111 PCI_NUM_RESOURCES,
112
113 /* preserve this for compatibility */
114 DEVICE_COUNT_RESOURCE
115 };
116
117 typedef int __bitwise pci_power_t;
118
119 #define PCI_D0 ((pci_power_t __force) 0)
120 #define PCI_D1 ((pci_power_t __force) 1)
121 #define PCI_D2 ((pci_power_t __force) 2)
122 #define PCI_D3hot ((pci_power_t __force) 3)
123 #define PCI_D3cold ((pci_power_t __force) 4)
124 #define PCI_UNKNOWN ((pci_power_t __force) 5)
125 #define PCI_POWER_ERROR ((pci_power_t __force) - 1)
126
127 /* Remember to update this when the list above changes! */
128 extern const char *pci_power_names[];
129
130 static inline const char *pci_power_name(pci_power_t state)
131 {
132 return pci_power_names[ 1 + ( int) state];
133 }
134
135 #define PCI_PM_D2_DELAY 200
136 #define PCI_PM_D3_WAIT 10
137 #define PCI_PM_BUS_WAIT 50
138
139 /** The pci_channel state describes connectivity between the CPU and
140 * the pci device. If some PCI bus between here and the pci device
141 * has crashed or locked up, this info is reflected here.
142 */
143 typedef unsigned int __bitwise pci_channel_state_t;
144
145 enum pci_channel_state {
146 /* I/O channel is in normal state */
147 pci_channel_io_normal = (__force pci_channel_state_t) 1,
148
149 /* I/O to channel is blocked */
150 pci_channel_io_frozen = (__force pci_channel_state_t) 2,
151
152 /* PCI card is dead */
153 pci_channel_io_perm_failure = (__force pci_channel_state_t) 3,
154 };
155
156 typedef unsigned int __bitwise pcie_reset_state_t;
157
158 enum pcie_reset_state {
159 /* Reset is NOT asserted (Use to deassert reset) */
160 pcie_deassert_reset = (__force pcie_reset_state_t) 1,
161
162 /* Use #PERST to reset PCI-E device */
163 pcie_warm_reset = (__force pcie_reset_state_t) 2,
164
165 /* Use PCI-E Hot Reset to reset device */
166 pcie_hot_reset = (__force pcie_reset_state_t) 3
167 };
168
169 typedef unsigned short __bitwise pci_dev_flags_t;
170 enum pci_dev_flags {
171 /* INTX_DISABLE in PCI_COMMAND register disables MSI
172 * generation too.
173 */
174 PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) 1,
175 /* Device configuration is irrevocably lost if disabled into D3 */
176 PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2,
177 };
178
179 enum pci_irq_reroute_variant {
180 INTEL_IRQ_REROUTE_VARIANT = 1,
181 MAX_IRQ_REROUTE_VARIANTS = 3
182 };
183
184 typedef unsigned short __bitwise pci_bus_flags_t;
185 enum pci_bus_flags {
186 PCI_BUS_FLAGS_NO_MSI = (__force pci_bus_flags_t) 1,
187 PCI_BUS_FLAGS_NO_MMRBC = (__force pci_bus_flags_t) 2,
188 };
189
190 /* Based on the PCI Hotplug Spec, but some values are made up by us */
191 enum pci_bus_speed {
192 PCI_SPEED_33MHz = 0x00,
193 PCI_SPEED_66MHz = 0x01,
194 PCI_SPEED_66MHz_PCIX = 0x02,
195 PCI_SPEED_100MHz_PCIX = 0x03,
196 PCI_SPEED_133MHz_PCIX = 0x04,
197 PCI_SPEED_66MHz_PCIX_ECC = 0x05,
198 PCI_SPEED_100MHz_PCIX_ECC = 0x06,
199 PCI_SPEED_133MHz_PCIX_ECC = 0x07,
200 PCI_SPEED_66MHz_PCIX_266 = 0x09,
201 PCI_SPEED_100MHz_PCIX_266 = 0x0a,
202 PCI_SPEED_133MHz_PCIX_266 = 0x0b,
203 AGP_UNKNOWN = 0x0c,
204 AGP_1X = 0x0d,
205 AGP_2X = 0x0e,
206 AGP_4X = 0x0f,
207 AGP_8X = 0x10,
208 PCI_SPEED_66MHz_PCIX_533 = 0x11,
209 PCI_SPEED_100MHz_PCIX_533 = 0x12,
210 PCI_SPEED_133MHz_PCIX_533 = 0x13,
211 PCIE_SPEED_2_5GT = 0x14,
212 PCIE_SPEED_5_0GT = 0x15,
213 PCIE_SPEED_8_0GT = 0x16,
214 PCI_SPEED_UNKNOWN = 0xff,
215 };
216
217 struct pci_cap_saved_data {
218 char cap_nr;
219 unsigned int size;
220 u32 data[ 0];
221 };
222
223 struct pci_cap_saved_state {
224 struct hlist_node next;
225 struct pci_cap_saved_data cap;
226 };
227
228 struct pcie_link_state;
229 struct pci_vpd;
230 struct pci_sriov;
231 struct pci_ats;
232
233 /*
234 * The pci_dev structure is used to describe PCI devices.
235 */
236 struct pci_dev {
237 struct list_head bus_list; /* node in per-bus list */
238 struct pci_bus *bus; /* bus this device is on */
239 struct pci_bus *subordinate; /* bus this device bridges to */
240
241 void *sysdata; /* hook for sys-specific extension */
242 struct proc_dir_entry *procent; /* device entry in /proc/bus/pci */
243 struct pci_slot *slot; /* Physical slot this device is in */
244
245 unsigned int devfn; /* encoded device & function index */
246 unsigned short vendor;
247 unsigned short device;
248 unsigned short subsystem_vendor;
249 unsigned short subsystem_device;
250 unsigned int class; /* 3 bytes: (base,sub,prog-if) */
251 u8 revision; /* PCI revision, low byte of class word */
252 u8 hdr_type; /* PCI header type (`multi' flag masked out) */
253 u8 pcie_cap; /* PCI-E capability offset */
254 u8 pcie_type; /* PCI-E device/port type */
255 u8 rom_base_reg; /* which config register controls the ROM */
256 u8 pin; /* which interrupt pin this device uses */
257
258 struct pci_driver *driver; /* which driver has allocated this device */
259 u64 dma_mask; /* Mask of the bits of bus address this
260 device implements. Normally this is
261 0xffffffff. You only need to change
262 this if your device has broken DMA
263 or supports 64-bit transfers. */
264
265 struct device_dma_parameters dma_parms;
266
267 pci_power_t current_state; /* Current operating state. In ACPI-speak,
268 this is D0-D3, D0 being fully functional,
269 and D3 being off. */
270 int pm_cap; /* PM capability offset in the
271 configuration space */
272 unsigned int pme_support: 5; /* Bitmask of states from which PME#
273 can be generated */
274 unsigned int pme_interrupt: 1;
275 unsigned int d1_support: 1; /* Low power state D1 is supported */
276 unsigned int d2_support: 1; /* Low power state D2 is supported */
277 unsigned int no_d1d2: 1; /* Only allow D0 and D3 */
278 unsigned int mmio_always_on: 1; /* disallow turning off io/mem
279 decoding during bar sizing */
280 unsigned int wakeup_prepared: 1;
281 unsigned int d3_delay; /* D3->D0 transition time in ms */
282
283 #ifdef CONFIG_PCIEASPM
284 struct pcie_link_state *link_state; /* ASPM link state. */
285 #endif
286
287 pci_channel_state_t error_state; /* current connectivity state */
288 struct device dev; /* Generic device interface */
289
290 int cfg_size; /* Size of configuration space */
291
292 /*
293 * Instead of touching interrupt line and base address registers
294 * directly, use the values stored here. They might be different!
295 */
296 unsigned int irq;
297 struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
298 resource_size_t fw_addr[DEVICE_COUNT_RESOURCE]; /* FW-assigned addr */
299
300 /* These fields are used by common fixups */
301 unsigned int transparent: 1; /* Transparent PCI bridge */
302 unsigned int multifunction: 1; /* Part of multi-function device */
303 /* keep track of device state */
304 unsigned int is_added: 1;
305 unsigned int is_busmaster: 1; /* device is busmaster */
306 unsigned int no_msi: 1; /* device may not use msi */
307 unsigned int block_ucfg_access: 1; /* userspace config space access is blocked */
308 unsigned int broken_parity_status: 1; /* Device generates false positive parity */
309 unsigned int irq_reroute_variant: 2; /* device needs IRQ rerouting variant */
310 unsigned int msi_enabled: 1;
311 unsigned int msix_enabled: 1;
312 unsigned int ari_enabled: 1; /* ARI forwarding */
313 unsigned int is_managed: 1;
314 unsigned int is_pcie: 1; /* Obsolete. Will be removed.
315 Use pci_is_pcie() instead */
316 unsigned int needs_freset: 1; /* Dev requires fundamental reset */
317 unsigned int state_saved: 1;
318 unsigned int is_physfn: 1;
319 unsigned int is_virtfn: 1;
320 unsigned int reset_fn: 1;
321 unsigned int is_hotplug_bridge: 1;
322 unsigned int __aer_firmware_first_valid: 1;
323 unsigned int __aer_firmware_first: 1;
324 pci_dev_flags_t dev_flags;
325 atomic_t enable_cnt; /* pci_enable_device has been called */
326
327 u32 saved_config_space[ 16]; /* config space saved at suspend time */
328 struct hlist_head saved_cap_space;
329 struct bin_attribute *rom_attr; /* attribute descriptor for sysfs ROM entry */
330 int rom_attr_enabled; /* has display of the rom attribute been enabled? */
331 struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
332 struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
333 #ifdef CONFIG_PCI_MSI
334 struct list_head msi_list;
335 #endif
336 struct pci_vpd *vpd;
337 #ifdef CONFIG_PCI_IOV
338 union {
339 struct pci_sriov *sriov; /* SR-IOV capability related */
340 struct pci_dev *physfn; /* the PF this VF is associated with */
341 };
342 struct pci_ats *ats; /* Address Translation Service */
343 #endif
344 };
345
346 static inline struct pci_dev *pci_physfn( struct pci_dev *dev)
347 {
348 #ifdef CONFIG_PCI_IOV
349 if (dev->is_virtfn)
350 dev = dev->physfn;
351 #endif
352
353 return dev;
354 }
355
356 extern struct pci_dev *alloc_pci_dev( void);
357
358 #define pci_dev_b(n) list_entry(n, struct pci_dev, bus_list)
359 #define to_pci_dev(n) container_of(n, struct pci_dev, dev)
360 #define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
361
362 static inline int pci_channel_offline( struct pci_dev *pdev)
363 {
364 return (pdev->error_state != pci_channel_io_normal);
365 }
366
367 static inline struct pci_cap_saved_state *pci_find_saved_cap(
368 struct pci_dev *pci_dev, char cap)
369 {
370 struct pci_cap_saved_state *tmp;
371 struct hlist_node *pos;
372
373 hlist_for_each_entry(tmp, pos, &pci_dev->saved_cap_space, next) {
374 if (tmp->cap.cap_nr == cap)
375 return tmp;
376 }
377 return NULL;
378 }
379
380 static inline void pci_add_saved_cap( struct pci_dev *pci_dev,
381 struct pci_cap_saved_state *new_cap)
382 {
383 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
384 }
385
386 /*
387 * The first PCI_BRIDGE_RESOURCE_NUM PCI bus resources (those that correspond
388 * to P2P or CardBus bridge windows) go in a table. Additional ones (for
389 * buses below host bridges or subtractive decode bridges) go in the list.
390 * Use pci_bus_for_each_resource() to iterate through all the resources.
391 */
392
393 /*
394 * PCI_SUBTRACTIVE_DECODE means the bridge forwards the window implicitly
395 * and there's no way to program the bridge with the details of the window.
396 * This does not apply to ACPI _CRS windows, even with the _DEC subtractive-
397 * decode bit set, because they are explicit and can be programmed with _SRS.
398 */
399 #define PCI_SUBTRACTIVE_DECODE 0x1
400
401 struct pci_bus_resource {
402 struct list_head list;
403 struct resource *res;
404 unsigned int flags;
405 };
406
407 #define PCI_REGION_FLAG_MASK 0x0fU /* These bits of resource flags tell us the PCI region flags */
408
409 struct pci_bus {
410 struct list_head node; /* node in list of buses */
411 struct pci_bus *parent; /* parent bus this bridge is on */
412 struct list_head children; /* list of child buses */
413 struct list_head devices; /* list of devices on this bus */
414 struct pci_dev *self; /* bridge device as seen by parent */
415 struct list_head slots; /* list of slots on this bus */
416 struct resource *resource[PCI_BRIDGE_RESOURCE_NUM];
417 struct list_head resources; /* address space routed to this bus */
418
419 struct pci_ops *ops; /* configuration access functions */
420 void *sysdata; /* hook for sys-specific extension */
421 struct proc_dir_entry *procdir; /* directory entry in /proc/bus/pci */
422
423 unsigned char number; /* bus number */
424 unsigned char primary; /* number of primary bridge */
425 unsigned char secondary; /* number of secondary bridge */
426 unsigned char subordinate; /* max number of subordinate buses */
427 unsigned char max_bus_speed; /* enum pci_bus_speed */
428 unsigned char cur_bus_speed; /* enum pci_bus_speed */
429
430 char name[ 48];
431
432 unsigned short bridge_ctl; /* manage NO_ISA/FBB/et al behaviors */
433 pci_bus_flags_t bus_flags; /* Inherited by child busses */
434 struct device *bridge;
435 struct device dev;
436 struct bin_attribute *legacy_io; /* legacy I/O for this bus */
437 struct bin_attribute *legacy_mem; /* legacy mem */
438 unsigned int is_added: 1;
439 };
440
441 #define pci_bus_b(n) list_entry(n, struct pci_bus, node)
442 #define to_pci_bus(n) container_of(n, struct pci_bus, dev)
443
444 /*
445 * Returns true if the pci bus is root (behind host-pci bridge),
446 * false otherwise
447 */
448 static inline bool pci_is_root_bus( struct pci_bus *pbus)
449 {
450 return !(pbus->parent);
451 }
452
453 #ifdef CONFIG_PCI_MSI
454 static inline bool pci_dev_msi_enabled( struct pci_dev *pci_dev)
455 {
456 return pci_dev->msi_enabled || pci_dev->msix_enabled;
457 }
458 #else
459 static inline bool pci_dev_msi_enabled( struct pci_dev *pci_dev) { return false; }
460 #endif
461
462 /*
463 * Error values that may be returned by PCI functions.
464 */
465 #define PCIBIOS_SUCCESSFUL 0x00
466 #define PCIBIOS_FUNC_NOT_SUPPORTED 0x81
467 #define PCIBIOS_BAD_VENDOR_ID 0x83
468 #define PCIBIOS_DEVICE_NOT_FOUND 0x86
469 #define PCIBIOS_BAD_REGISTER_NUMBER 0x87
470 #define PCIBIOS_SET_FAILED 0x88
471 #define PCIBIOS_BUFFER_TOO_SMALL 0x89
472
473 /* Low-level architecture-dependent routines */
474
475 struct pci_ops {
476 int (*read)( struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val);
477 int (*write)( struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val);
478 };
479
480 /*
481 * ACPI needs to be able to access PCI config space before we've done a
482 * PCI bus scan and created pci_bus structures.
483 */
484 extern int raw_pci_read( unsigned int domain, unsigned int bus,
485 unsigned int devfn, int reg, int len, u32 *val);
486 extern int raw_pci_write( unsigned int domain, unsigned int bus,
487 unsigned int devfn, int reg, int len, u32 val);
488
489 struct pci_bus_region {
490 resource_size_t start;
491 resource_size_t end;
492 };
493
494 struct pci_dynids {
495 spinlock_t lock; /* protects list, index */
496 struct list_head list; /* for IDs added at runtime */
497 };
498
499 /* ---------------------------------------------------------------- */
500 /** PCI Error Recovery System (PCI-ERS). If a PCI device driver provides
501 * a set of callbacks in struct pci_error_handlers, then that device driver
502 * will be notified of PCI bus errors, and will be driven to recovery
503 * when an error occurs.
504 */
505
506 typedef unsigned int __bitwise pci_ers_result_t;
507
508 enum pci_ers_result {
509 /* no result/none/not supported in device driver */
510 PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1,
511
512 /* Device driver can recover without slot reset */
513 PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2,
514
515 /* Device driver wants slot to be reset. */
516 PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3,
517
518 /* Device has completely failed, is unrecoverable */
519 PCI_ERS_RESULT_DISCONNECT = (__force pci_ers_result_t) 4,
520
521 /* Device driver is fully recovered and operational */
522 PCI_ERS_RESULT_RECOVERED = (__force pci_ers_result_t) 5,
523 };
524
525 /* PCI bus error event callbacks */
526 struct pci_error_handlers {
527 /* PCI bus error detected on this device */
528 pci_ers_result_t (*error_detected)( struct pci_dev *dev,
529 enum pci_channel_state error);
530
531 /* MMIO has been re-enabled, but not DMA */
532 pci_ers_result_t (*mmio_enabled)( struct pci_dev *dev);
533
534 /* PCI Express link has been reset */
535 pci_ers_result_t (*link_reset)( struct pci_dev *dev);
536
537 /* PCI slot has been reset */
538 pci_ers_result_t (*slot_reset)( struct pci_dev *dev);
539
540 /* Device driver may resume normal operations */
541 void (*resume)( struct pci_dev *dev);
542 };
543
544 /* ---------------------------------------------------------------- */
545
546 struct module;
547 struct pci_driver {
548 struct list_head node;
549 const char *name;
550 const struct pci_device_id *id_table; /* must be non-NULL for probe to be called */
551 int (*probe) ( struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */
552 void (*remove) ( struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */
553 int (*suspend) ( struct pci_dev *dev, pm_message_t state); /* Device suspended */
554 int (*suspend_late) ( struct pci_dev *dev, pm_message_t state);
555 int (*resume_early) ( struct pci_dev *dev);
556 int (*resume) ( struct pci_dev *dev); /* Device woken up */
557 void (*shutdown) ( struct pci_dev *dev);
558 struct pci_error_handlers *err_handler;
559 struct device_driver driver;
560 struct pci_dynids dynids;
561 };
562
563 #define to_pci_driver(drv) container_of(drv, struct pci_driver, driver)
564
565 /**
566 * DEFINE_PCI_DEVICE_TABLE - macro used to describe a pci device table
567 * @_table: device table name
568 *
569 * This macro is used to create a struct pci_device_id array (a device table)
570 * in a generic manner.
571 */
572 #define DEFINE_PCI_DEVICE_TABLE(_table) \
573 const struct pci_device_id _table[] __devinitconst
574
575 /**
576 * PCI_DEVICE - macro used to describe a specific pci device
577 * @vend: the 16 bit PCI Vendor ID
578 * @dev: the 16 bit PCI Device ID
579 *
580 * This macro is used to create a struct pci_device_id that matches a
581 * specific device. The subvendor and subdevice fields will be set to
582 * PCI_ANY_ID.
583 */
584 #define PCI_DEVICE(vend,dev) \
585 .vendor = (vend), .device = (dev), \
586 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
587
588 /**
589 * PCI_DEVICE_CLASS - macro used to describe a specific pci device class
590 * @dev_class: the class, subclass, prog-if triple for this device
591 * @dev_class_mask: the class mask for this device
592 *
593 * This macro is used to create a struct pci_device_id that matches a
594 * specific PCI class. The vendor, device, subvendor, and subdevice
595 * fields will be set to PCI_ANY_ID.
596 */
597 #define PCI_DEVICE_CLASS(dev_class,dev_class_mask) \
598 . class = (dev_class), .class_mask = (dev_class_mask), \
599 .vendor = PCI_ANY_ID, .device = PCI_ANY_ID, \
600 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
601
602 /**
603 * PCI_VDEVICE - macro used to describe a specific pci device in short form
604 * @vendor: the vendor name
605 * @device: the 16 bit PCI Device ID
606 *
607 * This macro is used to create a struct pci_device_id that matches a
608 * specific PCI device. The subvendor, and subdevice fields will be set
609 * to PCI_ANY_ID. The macro allows the next field to follow as the device
610 * private data.
611 */
612
613 #define PCI_VDEVICE(vendor, device) \
614 PCI_VENDOR_ID_##vendor, (device), \
615 PCI_ANY_ID, PCI_ANY_ID, 0, 0
616
617 /* these external functions are only available when PCI support is enabled */
618 #ifdef CONFIG_PCI
619
620 extern struct bus_type pci_bus_type;
621
622 /* Do NOT directly access these two variables, unless you are arch specific pci
623 * code, or pci core code. */
624 extern struct list_head pci_root_buses; /* list of all known PCI buses */
625 /* Some device drivers need know if pci is initiated */
626 extern int no_pci_devices( void);
627
628 void pcibios_fixup_bus( struct pci_bus *);
629 int __must_check pcibios_enable_device( struct pci_dev *, int mask);
630 char *pcibios_setup( char *str);
631
632 /* Used only when drivers/pci/setup.c is used */
633 resource_size_t pcibios_align_resource( void *, const struct resource *,
634 resource_size_t,
635 resource_size_t);
636 void pcibios_update_irq( struct pci_dev *, int irq);
637
638 /* Weak but can be overriden by arch */
639 void pci_fixup_cardbus( struct pci_bus *);
640
641 /* Generic PCI functions used internally */
642
643 void pcibios_scan_specific_bus( int busn);
644 extern struct pci_bus *pci_find_bus( int domain, int busnr);
645 void pci_bus_add_devices( const struct pci_bus *bus);
646 struct pci_bus *pci_scan_bus_parented( struct device *parent, int bus,
647 struct pci_ops *ops, void *sysdata);
648 static inline struct pci_bus * __devinit pci_scan_bus( int bus, struct pci_ops *ops,
649 void *sysdata)
650 {
651 struct pci_bus *root_bus;
652 root_bus = pci_scan_bus_parented(NULL, bus, ops, sysdata);
653 if (root_bus)
654 pci_bus_add_devices(root_bus);
655 return root_bus;
656 }
657 struct pci_bus *pci_create_bus( struct device *parent, int bus,
658 struct pci_ops *ops, void *sysdata);
659 struct pci_bus *pci_add_new_bus( struct pci_bus *parent, struct pci_dev *dev,
660 int busnr);
661 void pcie_update_link_speed( struct pci_bus *bus, u16 link_status);
662 struct pci_slot *pci_create_slot( struct pci_bus *parent, int slot_nr,
663 const char *name,
664 struct hotplug_slot *hotplug);
665 void pci_destroy_slot( struct pci_slot *slot);
666 void pci_renumber_slot( struct pci_slot *slot, int slot_nr);
667 int pci_scan_slot( struct pci_bus *bus, int devfn);
668 struct pci_dev *pci_scan_single_device( struct pci_bus *bus, int devfn);
669 void pci_device_add( struct pci_dev *dev, struct pci_bus *bus);
670 unsigned int pci_scan_child_bus( struct pci_bus *bus);
671 int __must_check pci_bus_add_device( struct pci_dev *dev);
672 void pci_read_bridge_bases( struct pci_bus *child);
673 struct resource *pci_find_parent_resource( const struct pci_dev *dev,
674 struct resource *res);
675 u8 pci_swizzle_interrupt_pin( struct pci_dev *dev, u8 pin);
676 int pci_get_interrupt_pin( struct pci_dev *dev, struct pci_dev **bridge);
677 u8 pci_common_swizzle( struct pci_dev *dev, u8 *pinp);
678 extern struct pci_dev *pci_dev_get( struct pci_dev *dev);
679 extern void pci_dev_put( struct pci_dev *dev);
680 extern void pci_remove_bus( struct pci_bus *b);
681 extern void pci_remove_bus_device( struct pci_dev *dev);
682 extern void pci_stop_bus_device( struct pci_dev *dev);
683 void pci_setup_cardbus( struct pci_bus *bus);
684 extern void pci_sort_breadthfirst( void);
685 #define dev_is_pci(d) ((d)->bus == &pci_bus_type)
686 #define dev_is_pf(d) ((dev_is_pci(d) ? to_pci_dev(d)->is_physfn : false))
687 #define dev_num_vf(d) ((dev_is_pci(d) ? pci_num_vf(to_pci_dev(d)) : 0))
688
689 /* Generic PCI functions exported to card drivers */
690
691 enum pci_lost_interrupt_reason {
692 PCI_LOST_IRQ_NO_INFORMATION = 0,
693 PCI_LOST_IRQ_DISABLE_MSI,
694 PCI_LOST_IRQ_DISABLE_MSIX,
695 PCI_LOST_IRQ_DISABLE_ACPI,
696 };
697 enum pci_lost_interrupt_reason pci_lost_interrupt( struct pci_dev *dev);
698 int pci_find_capability( struct pci_dev *dev, int cap);
699 int pci_find_next_capability( struct pci_dev *dev, u8 pos, int cap);
700 int pci_find_ext_capability( struct pci_dev *dev, int cap);
701 int pci_bus_find_ext_capability( struct pci_bus *bus, unsigned int devfn,
702 int cap);
703 int pci_find_ht_capability( struct pci_dev *dev, int ht_cap);
704 int pci_find_next_ht_capability( struct pci_dev *dev, int pos, int ht_cap);
705 struct pci_bus *pci_find_next_bus( const struct pci_bus *from);
706
707 struct pci_dev *pci_get_device( unsigned int vendor, unsigned int device,
708 struct pci_dev *from);
709 struct pci_dev *pci_get_subsys( unsigned int vendor, unsigned int device,
710 unsigned int ss_vendor, unsigned int ss_device,
711 struct pci_dev *from);
712 struct pci_dev *pci_get_slot( struct pci_bus *bus, unsigned int devfn);
713 struct pci_dev *pci_get_domain_bus_and_slot( int domain, unsigned int bus,
714 unsigned int devfn);
715 static inline struct pci_dev *pci_get_bus_and_slot( unsigned int bus,
716 unsigned int devfn)
717 {
718 return pci_get_domain_bus_and_slot( 0, bus, devfn);
719 }
720 struct pci_dev *pci_get_class( unsigned int class, struct pci_dev *from);
721 int pci_dev_present( const struct pci_device_id *ids);
722
723 int pci_bus_read_config_byte( struct pci_bus *bus, unsigned int devfn,
724 int where, u8 *val);
725 int pci_bus_read_config_word( struct pci_bus *bus, unsigned int devfn,
726 int where, u16 *val);
727 int pci_bus_read_config_dword( struct pci_bus *bus, unsigned int devfn,
728 int where, u32 *val);
729 int pci_bus_write_config_byte( struct pci_bus *bus, unsigned int devfn,
730 int where, u8 val);
731 int pci_bus_write_config_word( struct pci_bus *bus, unsigned int devfn,
732 int where, u16 val);
733 int pci_bus_write_config_dword( struct pci_bus *bus, unsigned int devfn,
734 int where, u32 val);
735 struct pci_ops *pci_bus_set_ops( struct pci_bus *bus, struct pci_ops *ops);
736
737 static inline int pci_read_config_byte( struct pci_dev *dev, int where, u8 *val)
738 {
739 return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val);
740 }
741 static inline int pci_read_config_word( struct pci_dev *dev, int where, u16 *val)
742 {
743 return pci_bus_read_config_word(dev->bus, dev->devfn, where, val);
744 }
745 static inline int pci_read_config_dword( struct pci_dev *dev, int where,
746 u32 *val)
747 {
748 return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val);
749 }
750 static inline int pci_write_config_byte( struct pci_dev *dev, int where, u8 val)
751 {
752 return pci_bus_write_config_byte(dev->bus, dev->devfn, where, val);
753 }
754 static inline int pci_write_config_word( struct pci_dev *dev, int where, u16 val)
755 {
756 return pci_bus_write_config_word(dev->bus, dev->devfn, where, val);
757 }
758 static inline int pci_write_config_dword( struct pci_dev *dev, int where,
759 u32 val)
760 {
761 return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val);
762 }
763
764 int __must_check pci_enable_device( struct pci_dev *dev);
765 int __must_check pci_enable_device_io( struct pci_dev *dev);
766 int __must_check pci_enable_device_mem( struct pci_dev *dev);
767 int __must_check pci_reenable_device( struct pci_dev *);
768 int __must_check pcim_enable_device( struct pci_dev *pdev);
769 void pcim_pin_device( struct pci_dev *pdev);
770
771 static inline int pci_is_enabled( struct pci_dev *pdev)
772 {
773 return (atomic_read(&pdev->enable_cnt) > 0);
774 }
775
776 static inline int pci_is_managed( struct pci_dev *pdev)
777 {
778 return pdev->is_managed;
779 }
780
781 void pci_disable_device( struct pci_dev *dev);
782 void pci_set_master( struct pci_dev *dev);
783 void pci_clear_master( struct pci_dev *dev);
784 int pci_set_pcie_reset_state( struct pci_dev *dev, enum pcie_reset_state state);
785 int pci_set_cacheline_size( struct pci_dev *dev);
786 #define HAVE_PCI_SET_MWI
787 int __must_check pci_set_mwi( struct pci_dev *dev);
788 int pci_try_set_mwi( struct pci_dev *dev);
789 void pci_clear_mwi( struct pci_dev *dev);
790 void pci_intx( struct pci_dev *dev, int enable);
791 void pci_msi_off( struct pci_dev *dev);
792 int pci_set_dma_max_seg_size( struct pci_dev *dev, unsigned int size);
793 int pci_set_dma_seg_boundary( struct pci_dev *dev, unsigned long mask);
794 int pcix_get_max_mmrbc( struct pci_dev *dev);
795 int pcix_get_mmrbc( struct pci_dev *dev);
796 int pcix_set_mmrbc( struct pci_dev *dev, int mmrbc);
797 int pcie_get_readrq( struct pci_dev *dev);
798 int pcie_set_readrq( struct pci_dev *dev, int rq);
799 int __pci_reset_function( struct pci_dev *dev);
800 int pci_reset_function( struct pci_dev *dev);
801 void pci_update_resource( struct pci_dev *dev, int resno);
802 int __must_check pci_assign_resource( struct pci_dev *dev, int i);
803 int pci_select_bars( struct pci_dev *dev, unsigned long flags);
804
805 /* ROM control related routines */
806 int pci_enable_rom( struct pci_dev *pdev);
807 void pci_disable_rom( struct pci_dev *pdev);
808 void __iomem __must_check *pci_map_rom( struct pci_dev *pdev, size_t *size);
809 void pci_unmap_rom( struct pci_dev *pdev, void __iomem *rom);
810 size_t pci_get_rom_size( struct pci_dev *pdev, void __iomem *rom, size_t size);
811
812 /* Power management related routines */
813 int pci_save_state( struct pci_dev *dev);
814 void pci_restore_state( struct pci_dev *dev);
815 struct pci_saved_state *pci_store_saved_state( struct pci_dev *dev);
816 int pci_load_saved_state( struct pci_dev *dev, struct pci_saved_state *state);
817 int pci_load_and_free_saved_state( struct pci_dev *dev,
818 struct pci_saved_state **state);
819 int __pci_complete_power_transition( struct pci_dev *dev, pci_power_t state);
820 int pci_set_power_state( struct pci_dev *dev, pci_power_t state);
821 pci_power_t pci_choose_state( struct pci_dev *dev, pm_message_t state);
822 bool pci_pme_capable( struct pci_dev *dev, pci_power_t state);
823 void pci_pme_active( struct pci_dev *dev, bool enable);
824 int __pci_enable_wake( struct pci_dev *dev, pci_power_t state,
825 bool runtime, bool enable);
826 int pci_wake_from_d3( struct pci_dev *dev, bool enable);
827 pci_power_t pci_target_state( struct pci_dev *dev);
828 int pci_prepare_to_sleep( struct pci_dev *dev);
829 int pci_back_from_sleep( struct pci_dev *dev);
830 bool pci_dev_run_wake( struct pci_dev *dev);
831 bool pci_check_pme_status( struct pci_dev *dev);
832 void pci_pme_wakeup_bus( struct pci_bus *bus);
833
834 static inline int pci_enable_wake( struct pci_dev *dev, pci_power_t state,
835 bool enable)
836 {
837 return __pci_enable_wake(dev, state, false, enable);
838 }
839
840 #define PCI_EXP_IDO_REQUEST ( 1<< 0)
841 #define PCI_EXP_IDO_COMPLETION ( 1<< 1)
842 void pci_enable_ido( struct pci_dev *dev, unsigned long type);
843 void pci_disable_ido( struct pci_dev *dev, unsigned long type);
844
845 enum pci_obff_signal_type {
846 PCI_EXP_OBFF_SIGNAL_L0,
847 PCI_EXP_OBFF_SIGNAL_ALWAYS,
848 };
849 int pci_enable_obff( struct pci_dev *dev, enum pci_obff_signal_type);
850 void pci_disable_obff( struct pci_dev *dev);
851
852 bool pci_ltr_supported( struct pci_dev *dev);
853 int pci_enable_ltr( struct pci_dev *dev);
854 void pci_disable_ltr( struct pci_dev *dev);
855 int pci_set_ltr( struct pci_dev *dev, int snoop_lat_ns, int nosnoop_lat_ns);
856
857 /* For use by arch with custom probe code */
858 void set_pcie_port_type( struct pci_dev *pdev);
859 void set_pcie_hotplug_bridge( struct pci_dev *pdev);
860
861 /* Functions for PCI Hotplug drivers to use */
862 int pci_bus_find_capability( struct pci_bus *bus, unsigned int devfn, int cap);
863 #ifdef CONFIG_HOTPLUG
864 unsigned int pci_rescan_bus( struct pci_bus *bus);
865 #endif
866
867 /* Vital product data routines */
868 ssize_t pci_read_vpd( struct pci_dev *dev, loff_t pos, size_t count, void *buf);
869 ssize_t pci_write_vpd( struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
870 int pci_vpd_truncate( struct pci_dev *dev, size_t size);
871
872 /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
873 void pci_bus_assign_resources( const struct pci_bus *bus);
874 void pci_bus_size_bridges( struct pci_bus *bus);
875 int pci_claim_resource( struct pci_dev *, int);
876 void pci_assign_unassigned_resources( void);
877 void pci_assign_unassigned_bridge_resources( struct pci_dev *bridge);
878 void pdev_enable_device( struct pci_dev *);
879 void pdev_sort_resources( struct pci_dev *, struct resource_list *);
880 int pci_enable_resources( struct pci_dev *, int mask);
881 void pci_fixup_irqs(u8 (*)( struct pci_dev *, u8 *),
882 int (*)( struct pci_dev *, u8, u8));
883 #define HAVE_PCI_REQ_REGIONS 2
884 int __must_check pci_request_regions( struct pci_dev *, const char *);
885 int __must_check pci_request_regions_exclusive( struct pci_dev *, const char *);
886 void pci_release_regions( struct pci_dev *);
887 int __must_check pci_request_region( struct pci_dev *, int, const char *);
888 int __must_check pci_request_region_exclusive( struct pci_dev *, int, const char *);
889 void pci_release_region( struct pci_dev *, int);
890 int pci_request_selected_regions( struct pci_dev *, int, const char *);
891 int pci_request_selected_regions_exclusive( struct pci_dev *, int, const char *);
892 void pci_release_selected_regions( struct pci_dev *, int);
893
894 /* drivers/pci/bus.c */
895 void pci_bus_add_resource( struct pci_bus *bus, struct resource *res, unsigned int flags);
896 struct resource *pci_bus_resource_n( const struct pci_bus *bus, int n);
897 void pci_bus_remove_resources( struct pci_bus *bus);
898
899 #define pci_bus_for_each_resource(bus, res, i) \
900 for (i = 0; \
901 (res = pci_bus_resource_n(bus, i)) || i < PCI_BRIDGE_RESOURCE_NUM; \
902 i++)
903
904 int __must_check pci_bus_alloc_resource( struct pci_bus *bus,
905 struct resource *res, resource_size_t size,
906 resource_size_t align, resource_size_t min,
907 unsigned int type_mask,
908 resource_size_t (*alignf)( void *,
909 const struct resource *,
910 resource_size_t,
911 resource_size_t),
912 void *alignf_data);
913 void pci_enable_bridges( struct pci_bus *bus);
914
915 /* Proper probing supporting hot-pluggable devices */
916 int __must_check __pci_register_driver( struct pci_driver *, struct module *,
917 const char *mod_name);
918
919 /*
920 * pci_register_driver must be a macro so that KBUILD_MODNAME can be expanded
921 */
922 #define pci_register_driver(driver) \
923 __pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
924
925 void pci_unregister_driver( struct pci_driver *dev);
926 void pci_remove_behind_bridge( struct pci_dev *dev);
927 struct pci_driver *pci_dev_driver( const struct pci_dev *dev);
928 int pci_add_dynid( struct pci_driver *drv,
929 unsigned int vendor, unsigned int device,
930 unsigned int subvendor, unsigned int subdevice,
931 unsigned int class, unsigned int class_mask,
932 unsigned long driver_data);
933 const struct pci_device_id *pci_match_id( const struct pci_device_id *ids,
934 struct pci_dev *dev);
935 int pci_scan_bridge( struct pci_bus *bus, struct pci_dev *dev, int max,
936 int pass);
937
938 void pci_walk_bus( struct pci_bus *top, int (*cb)( struct pci_dev *, void *),
939 void *userdata);
940 int pci_cfg_space_size_ext( struct pci_dev *dev);
941 int pci_cfg_space_size( struct pci_dev *dev);
942 unsigned char pci_bus_max_busnr( struct pci_bus *bus);
943
944 #define PCI_VGA_STATE_CHANGE_BRIDGE ( 1 << 0)
945 #define PCI_VGA_STATE_CHANGE_DECODES ( 1 << 1)
946
947 int pci_set_vga_state( struct pci_dev *pdev, bool decode,
948 unsigned int command_bits, u32 flags);
949 /* kmem_cache style wrapper around pci_alloc_consistent() */
950
951 #include <linux/pci-dma.h>
952 #include <linux/dmapool.h>
953
954 #define pci_pool dma_pool
955 #define pci_pool_create(name, pdev, size, align, allocation) \
956 dma_pool_create(name, &pdev->dev, size, align, allocation)
957 #define pci_pool_destroy(pool) dma_pool_destroy(pool)
958 #define pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle)
959 #define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr)
960
961 enum pci_dma_burst_strategy {
962 PCI_DMA_BURST_INFINITY, /* make bursts as large as possible,
963 strategy_parameter is N/A */
964 PCI_DMA_BURST_BOUNDARY, /* disconnect at every strategy_parameter
965 byte boundaries */
966 PCI_DMA_BURST_MULTIPLE, /* disconnect at some multiple of
967 strategy_parameter byte boundaries */
968 };
969
970 struct msix_entry {
971 u32 vector; /* kernel uses to write allocated vector */
972 u16 entry; /* driver uses to specify entry, OS writes */
973 };
974
975
976 #ifndef CONFIG_PCI_MSI
977 static inline int pci_enable_msi_block( struct pci_dev *dev, unsigned int nvec)
978 {
979 return - 1;
980 }
981
982 static inline void pci_msi_shutdown( struct pci_dev *dev)
983 { }
984 static inline void pci_disable_msi( struct pci_dev *dev)
985 { }
986
987 static inline int pci_msix_table_size( struct pci_dev *dev)
988 {
989 return 0;
990 }
991 static inline int pci_enable_msix( struct pci_dev *dev,
992 struct msix_entry *entries, int nvec)
993 {
994 return - 1;
995 }
996
997 static inline void pci_msix_shutdown( struct pci_dev *dev)
998 { }
999 static inline void pci_disable_msix( struct pci_dev *dev)
1000 { }
1001
1002 static inline void msi_remove_pci_irq_vectors( struct pci_dev *dev)
1003 { }
1004
1005 static inline void pci_restore_msi_state( struct pci_dev *dev)
1006 { }
1007 static inline int pci_msi_enabled( void)
1008 {
1009 return 0;
1010 }
1011 #else
1012 extern int pci_enable_msi_block( struct pci_dev *dev, unsigned int nvec);
1013 extern void pci_msi_shutdown( struct pci_dev *dev);
1014 extern void pci_disable_msi( struct pci_dev *dev);
1015 extern int pci_msix_table_size( struct pci_dev *dev);
1016 extern int pci_enable_msix( struct pci_dev *dev,
1017 struct msix_entry *entries, int nvec);
1018 extern void pci_msix_shutdown( struct pci_dev *dev);
1019 extern void pci_disable_msix( struct pci_dev *dev);
1020 extern void msi_remove_pci_irq_vectors( struct pci_dev *dev);
1021 extern void pci_restore_msi_state( struct pci_dev *dev);
1022 extern int pci_msi_enabled( void);
1023 #endif
1024
1025 #ifdef CONFIG_PCIEPORTBUS
1026 extern bool pcie_ports_disabled;
1027 extern bool pcie_ports_auto;
1028 #else
1029 #define pcie_ports_disabled true
1030 #define pcie_ports_auto false
1031 #endif
1032
1033 #ifndef CONFIG_PCIEASPM
1034 static inline int pcie_aspm_enabled( void) { return 0; }
1035 static inline bool pcie_aspm_support_enabled( void) { return false; }
1036 #else
1037 extern int pcie_aspm_enabled( void);
1038 extern bool pcie_aspm_support_enabled( void);
1039 #endif
1040
1041 #ifdef CONFIG_PCIEAER
1042 void pci_no_aer( void);
1043 bool pci_aer_available( void);
1044 #else
1045 static inline void pci_no_aer( void) { }
1046 static inline bool pci_aer_available( void) { return false; }
1047 #endif
1048
1049 #ifndef CONFIG_PCIE_ECRC
1050 static inline void pcie_set_ecrc_checking( struct pci_dev *dev)
1051 {
1052 return;
1053 }
1054 static inline void pcie_ecrc_get_policy( char *str) {};
1055 #else
1056 extern void pcie_set_ecrc_checking( struct pci_dev *dev);
1057 extern void pcie_ecrc_get_policy( char *str);
1058 #endif
1059
1060 #define pci_enable_msi(pdev) pci_enable_msi_block(pdev, 1)
1061
1062 #ifdef CONFIG_HT_IRQ
1063 /* The functions a driver should call */
1064 int ht_create_irq( struct pci_dev *dev, int idx);
1065 void ht_destroy_irq( unsigned int irq);
1066 #endif /* CONFIG_HT_IRQ */
1067
1068 extern void pci_block_user_cfg_access( struct pci_dev *dev);
1069 extern void pci_unblock_user_cfg_access( struct pci_dev *dev);
1070
1071 /*
1072 * PCI domain support. Sometimes called PCI segment (eg by ACPI),
1073 * a PCI domain is defined to be a set of PCI busses which share
1074 * configuration space.
1075 */
1076 #ifdef CONFIG_PCI_DOMAINS
1077 extern int pci_domains_supported;
1078 #else
1079 enum { pci_domains_supported = 0 };
1080 static inline int pci_domain_nr( struct pci_bus *bus)
1081 {
1082 return 0;
1083 }
1084
1085 static inline int pci_proc_domain( struct pci_bus *bus)
1086 {
1087 return 0;
1088 }
1089 #endif /* CONFIG_PCI_DOMAINS */
1090
1091 /* some architectures require additional setup to direct VGA traffic */
1092 typedef int (*arch_set_vga_state_t)( struct pci_dev *pdev, bool decode,
1093 unsigned int command_bits, u32 flags);
1094 extern void pci_register_set_vga_state(arch_set_vga_state_t func);
1095
1096 #else /* CONFIG_PCI is not enabled */
1097
1098 /*
1099 * If the system does not have PCI, clearly these return errors. Define
1100 * these as simple inline functions to avoid hair in drivers.
1101 */
1102
1103 #define _PCI_NOP(o, s, t) \
1104 static inline int pci_##o##_config_##s( struct pci_dev *dev, \
1105 int where, t val) \
1106 { return PCIBIOS_FUNC_NOT_SUPPORTED; }
1107
1108 #define _PCI_NOP_ALL(o, x) _PCI_NOP(o, byte, u8 x) \
1109 _PCI_NOP(o, word, u16 x) \
1110 _PCI_NOP(o, dword, u32 x)
1111 _PCI_NOP_ALL(read, *)
1112 _PCI_NOP_ALL(write,)
1113
1114 static inline struct pci_dev *pci_get_device( unsigned int vendor,
1115 unsigned int device,
1116 struct pci_dev *from)
1117 {
1118 return NULL;
1119 }
1120
1121 static inline struct pci_dev *pci_get_subsys( unsigned int vendor,
1122 unsigned int device,
1123 unsigned int ss_vendor,
1124 unsigned int ss_device,
1125 struct pci_dev *from)
1126 {
1127 return NULL;
1128 }
1129
1130 static inline struct pci_dev *pci_get_class( unsigned int class,
1131 struct pci_dev *from)
1132 {
1133 return NULL;
1134 }
1135
1136 #define pci_dev_present(ids) ( 0)
1137 #define no_pci_devices() ( 1)
1138 #define pci_dev_put(dev) do { } while ( 0)
1139
1140 static inline void pci_set_master( struct pci_dev *dev)
1141 { }
1142
1143 static inline int pci_enable_device( struct pci_dev *dev)
1144 {
1145 return -EIO;
1146 }
1147
1148 static inline void pci_disable_device( struct pci_dev *dev)
1149 { }
1150
1151 static inline int pci_set_dma_mask( struct pci_dev *dev, u64 mask)
1152 {
1153 return -EIO;
1154 }
1155
1156 static inline int pci_set_consistent_dma_mask( struct pci_dev *dev, u64 mask)
1157 {
1158 return -EIO;
1159 }
1160
1161 static inline int pci_set_dma_max_seg_size( struct pci_dev *dev,
1162 unsigned int size)
1163 {
1164 return -EIO;
1165 }
1166
1167 static inline int pci_set_dma_seg_boundary( struct pci_dev *dev,
1168 unsigned long mask)
1169 {
1170 return -EIO;
1171 }
1172
1173 static inline int pci_assign_resource( struct pci_dev *dev, int i)
1174 {
1175 return -EBUSY;
1176 }
1177
1178 static inline int __pci_register_driver( struct pci_driver *drv,
1179 struct module *owner)
1180 {
1181 return 0;
1182 }
1183
1184 static inline int pci_register_driver( struct pci_driver *drv)
1185 {
1186 return 0;
1187 }
1188
1189 static inline void pci_unregister_driver( struct pci_driver *drv)
1190 { }
1191
1192 static inline int pci_find_capability( struct pci_dev *dev, int cap)
1193 {
1194 return 0;
1195 }
1196
1197 static inline int pci_find_next_capability( struct pci_dev *dev, u8 post,
1198 int cap)
1199 {
1200 return 0;
1201 }
1202
1203 static inline int pci_find_ext_capability( struct pci_dev *dev, int cap)
1204 {
1205 return 0;
1206 }
1207
1208 /* Power management related routines */
1209 static inline int pci_save_state( struct pci_dev *dev)
1210 {
1211 return 0;
1212 }
1213
1214 static inline void pci_restore_state( struct pci_dev *dev)
1215 { }
1216
1217 static inline int pci_set_power_state( struct pci_dev *dev, pci_power_t state)
1218 {
1219 return 0;
1220 }
1221
1222 static inline int pci_wake_from_d3( struct pci_dev *dev, bool enable)
1223 {
1224 return 0;
1225 }
1226
1227 static inline pci_power_t pci_choose_state( struct pci_dev *dev,
1228 pm_message_t state)
1229 {
1230 return PCI_D0;
1231 }
1232
1233 static inline int pci_enable_wake( struct pci_dev *dev, pci_power_t state,
1234 int enable)
1235 {
1236 return 0;
1237 }
1238
1239 static inline void pci_enable_ido( struct pci_dev *dev, unsigned long type)
1240 {
1241 }
1242
1243 static inline void pci_disable_ido( struct pci_dev *dev, unsigned long type)
1244 {
1245 }
1246
1247 static inline int pci_enable_obff( struct pci_dev *dev, unsigned long type)
1248 {
1249 return 0;
1250 }
1251
1252 static inline void pci_disable_obff( struct pci_dev *dev)
1253 {
1254 }
1255
1256 static inline int pci_request_regions( struct pci_dev *dev, const char *res_name)
1257 {
1258 return -EIO;
1259 }
1260
1261 static inline void pci_release_regions( struct pci_dev *dev)
1262 { }
1263
1264 #define pci_dma_burst_advice(pdev, strat, strategy_parameter) do { } while ( 0)
1265
1266 static inline void pci_block_user_cfg_access( struct pci_dev *dev)
1267 { }
1268
1269 static inline void pci_unblock_user_cfg_access( struct pci_dev *dev)
1270 { }
1271
1272 static inline struct pci_bus *pci_find_next_bus( const struct pci_bus *from)
1273 { return NULL; }
1274
1275 static inline struct pci_dev *pci_get_slot( struct pci_bus *bus,
1276 unsigned int devfn)
1277 { return NULL; }
1278
1279 static inline struct pci_dev *pci_get_bus_and_slot( unsigned int bus,
1280 unsigned int devfn)
1281 { return NULL; }
1282
1283 static inline int pci_domain_nr( struct pci_bus *bus)
1284 { return 0; }
1285
1286 #define dev_is_pci(d) (false)
1287 #define dev_is_pf(d) (false)
1288 #define dev_num_vf(d) ( 0)
1289 #endif /* CONFIG_PCI */
1290
1291 /* Include architecture-dependent settings and functions */
1292
1293 #include <asm/pci.h>
1294
1295 #ifndef PCIBIOS_MAX_MEM_32
1296 #define PCIBIOS_MAX_MEM_32 (- 1)
1297 #endif
1298
1299 /* these helpers provide future and backwards compatibility
1300 * for accessing popular PCI BAR info */
1301 #define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start)
1302 #define pci_resource_end(dev, bar) ((dev)->resource[(bar)].end)
1303 #define pci_resource_flags(dev, bar) ((dev)->resource[(bar)].flags)
1304 #define pci_resource_len(dev,bar) \
1305 ((pci_resource_start((dev), (bar)) == 0 && \
1306 pci_resource_end((dev), (bar)) == \
1307 pci_resource_start((dev), (bar))) ? 0 : \
1308 \
1309 (pci_resource_end((dev), (bar)) - \
1310 pci_resource_start((dev), (bar)) + 1))
1311
1312 /* Similar to the helpers above, these manipulate per-pci_dev
1313 * driver-specific data. They are really just a wrapper around
1314 * the generic device structure functions of these calls.
1315 */
1316 static inline void *pci_get_drvdata( struct pci_dev *pdev)
1317 {
1318 return dev_get_drvdata(&pdev->dev);
1319 }
1320
1321 static inline void pci_set_drvdata( struct pci_dev *pdev, void *data)
1322 {
1323 dev_set_drvdata(&pdev->dev, data);
1324 }
1325
1326 /* If you want to know what to call your pci_dev, ask this function.
1327 * Again, it's a wrapper around the generic device.
1328 */
1329 static inline const char *pci_name( const struct pci_dev *pdev)
1330 {
1331 return dev_name(&pdev->dev);
1332 }
1333
1334
1335 /* Some archs don't want to expose struct resource to userland as-is
1336 * in sysfs and /proc
1337 */
1338 #ifndef HAVE_ARCH_PCI_RESOURCE_TO_USER
1339 static inline void pci_resource_to_user( const struct pci_dev *dev, int bar,
1340 const struct resource *rsrc, resource_size_t *start,
1341 resource_size_t *end)
1342 {
1343 *start = rsrc->start;
1344 *end = rsrc->end;
1345 }
1346 #endif /* HAVE_ARCH_PCI_RESOURCE_TO_USER */
1347
1348
1349 /*
1350 * The world is not perfect and supplies us with broken PCI devices.
1351 * For at least a part of these bugs we need a work-around, so both
1352 * generic (drivers/pci/quirks.c) and per-architecture code can define
1353 * fixup hooks to be called for particular buggy devices.
1354 */
1355
1356 struct pci_fixup {
1357 u16 vendor, device; /* You can use PCI_ANY_ID here of course */
1358 void (*hook)( struct pci_dev *dev);
1359 };
1360
1361 enum pci_fixup_pass {
1362 pci_fixup_early, /* Before probing BARs */
1363 pci_fixup_header, /* After reading configuration header */
1364 pci_fixup_final, /* Final phase of device fixups */
1365 pci_fixup_enable, /* pci_enable_device() time */
1366 pci_fixup_resume, /* pci_device_resume() */
1367 pci_fixup_suspend, /* pci_device_suspend */
1368 pci_fixup_resume_early, /* pci_device_resume_early() */
1369 };
1370
1371 /* Anonymous variables would be nice... */
1372 #define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, hook) \
1373 static const struct pci_fixup __pci_fixup_##name __used \
1374 __attribute__((__section__(#section))) = { vendor, device, hook };
1375 #define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \
1376 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \
1377 vendor##device##hook, vendor, device, hook)
1378 #define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook) \
1379 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \
1380 vendor##device##hook, vendor, device, hook)
1381 #define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook) \
1382 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \
1383 vendor##device##hook, vendor, device, hook)
1384 #define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook) \
1385 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \
1386 vendor##device##hook, vendor, device, hook)
1387 #define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \
1388 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
1389 resume##vendor##device##hook, vendor, device, hook)
1390 #define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \
1391 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
1392 resume_early##vendor##device##hook, vendor, device, hook)
1393 #define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \
1394 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
1395 suspend##vendor##device##hook, vendor, device, hook)
1396
1397 #ifdef CONFIG_PCI_QUIRKS
1398 void pci_fixup_device( enum pci_fixup_pass pass, struct pci_dev *dev);
1399 #else
1400 static inline void pci_fixup_device( enum pci_fixup_pass pass,
1401 struct pci_dev *dev) {}
1402 #endif
1403
1404 void __iomem *pcim_iomap( struct pci_dev *pdev, int bar, unsigned long maxlen);
1405 void pcim_iounmap( struct pci_dev *pdev, void __iomem *addr);
1406 void __iomem * const *pcim_iomap_table( struct pci_dev *pdev);
1407 int pcim_iomap_regions( struct pci_dev *pdev, u16 mask, const char *name);
1408 int pcim_iomap_regions_request_all( struct pci_dev *pdev, u16 mask,
1409 const char *name);
1410 void pcim_iounmap_regions( struct pci_dev *pdev, u16 mask);
1411
1412 extern int pci_pci_problems;
1413 #define PCIPCI_FAIL 1 /* No PCI PCI DMA */
1414 #define PCIPCI_TRITON 2
1415 #define PCIPCI_NATOMA 4
1416 #define PCIPCI_VIAETBF 8
1417 #define PCIPCI_VSFX 16
1418 #define PCIPCI_ALIMAGIK 32 /* Need low latency setting */
1419 #define PCIAGP_FAIL 64 /* No PCI to AGP DMA */
1420
1421 extern unsigned long pci_cardbus_io_size;
1422 extern unsigned long pci_cardbus_mem_size;
1423 extern u8 __devinitdata pci_dfl_cache_line_size;
1424 extern u8 pci_cache_line_size;
1425
1426 extern unsigned long pci_hotplug_io_size;
1427 extern unsigned long pci_hotplug_mem_size;
1428
1429 int pcibios_add_platform_entries( struct pci_dev *dev);
1430 void pcibios_disable_device( struct pci_dev *dev);
1431 int pcibios_set_pcie_reset_state( struct pci_dev *dev,
1432 enum pcie_reset_state state);
1433
1434 #ifdef CONFIG_PCI_MMCONFIG
1435 extern void __init pci_mmcfg_early_init( void);
1436 extern void __init pci_mmcfg_late_init( void);
1437 #else
1438 static inline void pci_mmcfg_early_init( void) { }
1439 static inline void pci_mmcfg_late_init( void) { }
1440 #endif
1441
1442 int pci_ext_cfg_avail( struct pci_dev *dev);
1443
1444 void __iomem *pci_ioremap_bar( struct pci_dev *pdev, int bar);
1445
1446 #ifdef CONFIG_PCI_IOV
1447 extern int pci_enable_sriov( struct pci_dev *dev, int nr_virtfn);
1448 extern void pci_disable_sriov( struct pci_dev *dev);
1449 extern irqreturn_t pci_sriov_migration( struct pci_dev *dev);
1450 extern int pci_num_vf( struct pci_dev *dev);
1451 #else
1452 static inline int pci_enable_sriov( struct pci_dev *dev, int nr_virtfn)
1453 {
1454 return -ENODEV;
1455 }
1456 static inline void pci_disable_sriov( struct pci_dev *dev)
1457 {
1458 }
1459 static inline irqreturn_t pci_sriov_migration( struct pci_dev *dev)
1460 {
1461 return IRQ_NONE;
1462 }
1463 static inline int pci_num_vf( struct pci_dev *dev)
1464 {
1465 return 0;
1466 }
1467 #endif
1468
1469 #if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE)
1470 extern void pci_hp_create_module_link( struct pci_slot *pci_slot);
1471 extern void pci_hp_remove_module_link( struct pci_slot *pci_slot);
1472 #endif
1473
1474 /**
1475 * pci_pcie_cap - get the saved PCIe capability offset
1476 * @dev: PCI device
1477 *
1478 * PCIe capability offset is calculated at PCI device initialization
1479 * time and saved in the data structure. This function returns saved
1480 * PCIe capability offset. Using this instead of pci_find_capability()
1481 * reduces unnecessary search in the PCI configuration space. If you
1482 * need to calculate PCIe capability offset from raw device for some
1483 * reasons, please use pci_find_capability() instead.
1484 */
1485 static inline int pci_pcie_cap( struct pci_dev *dev)
1486 {
1487 return dev->pcie_cap;
1488 }
1489
1490 /**
1491 * pci_is_pcie - check if the PCI device is PCI Express capable
1492 * @dev: PCI device
1493 *
1494 * Retrun true if the PCI device is PCI Express capable, false otherwise.
1495 */
1496 static inline bool pci_is_pcie( struct pci_dev *dev)
1497 {
1498 return !!pci_pcie_cap(dev);
1499 }
1500
1501 void pci_request_acs( void);
1502
1503
1504 #define PCI_VPD_LRDT 0x80 /* Large Resource Data Type */
1505 #define PCI_VPD_LRDT_ID(x) (x | PCI_VPD_LRDT)
1506
1507 /* Large Resource Data Type Tag Item Names */
1508 #define PCI_VPD_LTIN_ID_STRING 0x02 /* Identifier String */
1509 #define PCI_VPD_LTIN_RO_DATA 0x10 /* Read-Only Data */
1510 #define PCI_VPD_LTIN_RW_DATA 0x11 /* Read-Write Data */
1511
1512 #define PCI_VPD_LRDT_ID_STRING PCI_VPD_LRDT_ID(PCI_VPD_LTIN_ID_STRING)
1513 #define PCI_VPD_LRDT_RO_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RO_DATA)
1514 #define PCI_VPD_LRDT_RW_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RW_DATA)
1515
1516 /* Small Resource Data Type Tag Item Names */
1517 #define PCI_VPD_STIN_END 0x78 /* End */
1518
1519 #define PCI_VPD_SRDT_END PCI_VPD_STIN_END
1520
1521 #define PCI_VPD_SRDT_TIN_MASK 0x78
1522 #define PCI_VPD_SRDT_LEN_MASK 0x07
1523
1524 #define PCI_VPD_LRDT_TAG_SIZE 3
1525 #define PCI_VPD_SRDT_TAG_SIZE 1
1526
1527 #define PCI_VPD_INFO_FLD_HDR_SIZE 3
1528
1529 #define PCI_VPD_RO_KEYWORD_PARTNO "PN"
1530 #define PCI_VPD_RO_KEYWORD_MFR_ID "MN"
1531 #define PCI_VPD_RO_KEYWORD_VENDOR0 "V0"
1532 #define PCI_VPD_RO_KEYWORD_CHKSUM "RV"
1533
1534 /**
1535 * pci_vpd_lrdt_size - Extracts the Large Resource Data Type length
1536 * @lrdt: Pointer to the beginning of the Large Resource Data Type tag
1537 *
1538 * Returns the extracted Large Resource Data Type length.
1539 */
1540 static inline u16 pci_vpd_lrdt_size( const u8 *lrdt)
1541 {
1542 return (u16)lrdt[ 1] + ((u16)lrdt[ 2] << 8);
1543 }
1544
1545 /**
1546 * pci_vpd_srdt_size - Extracts the Small Resource Data Type length
1547 * @lrdt: Pointer to the beginning of the Small Resource Data Type tag
1548 *
1549 * Returns the extracted Small Resource Data Type length.
1550 */
1551 static inline u8 pci_vpd_srdt_size( const u8 *srdt)
1552 {
1553 return (*srdt) & PCI_VPD_SRDT_LEN_MASK;
1554 }
1555
1556 /**
1557 * pci_vpd_info_field_size - Extracts the information field length
1558 * @lrdt: Pointer to the beginning of an information field header
1559 *
1560 * Returns the extracted information field length.
1561 */
1562 static inline u8 pci_vpd_info_field_size( const u8 *info_field)
1563 {
1564 return info_field[ 2];
1565 }
1566
1567 /**
1568 * pci_vpd_find_tag - Locates the Resource Data Type tag provided
1569 * @buf: Pointer to buffered vpd data
1570 * @off: The offset into the buffer at which to begin the search
1571 * @len: The length of the vpd buffer
1572 * @rdt: The Resource Data Type to search for
1573 *
1574 * Returns the index where the Resource Data Type was found or
1575 * -ENOENT otherwise.
1576 */
1577 int pci_vpd_find_tag( const u8 *buf, unsigned int off, unsigned int len, u8 rdt);
1578
1579 /**
1580 * pci_vpd_find_info_keyword - Locates an information field keyword in the VPD
1581 * @buf: Pointer to buffered vpd data
1582 * @off: The offset into the buffer at which to begin the search
1583 * @len: The length of the buffer area, relative to off, in which to search
1584 * @kw: The keyword to search for
1585 *
1586 * Returns the index where the information field keyword was found or
1587 * -ENOENT otherwise.
1588 */
1589 int pci_vpd_find_info_keyword( const u8 *buf, unsigned int off,
1590 unsigned int len, const char *kw);
1591
1592 #endif /* __KERNEL__ */
1593 #endif /* LINUX_PCI_H */
|