Error Trace
[Home]
Bug # 18
Show/hide error trace Error trace
-__BLAST_initialize_/home/ldvuser/ref_launch/work/current--X--drivers--X--defaultlinux-3.18-rc1.tar.xz--X--106_1a/linux-3.18-rc1.tar.xz/csd_deg_dscv/2290/dscv_tempdir/dscv/rcv/106_1a/main-ldv_main1_sequence_infinite_withcheck_stateful/preprocess/1-cpp/drivers/staging/dgnc/dgnc_cls.o.i() { 94 -dgnc_cls_ops.tasklet = &(cls_tasklet); dgnc_cls_ops.intr = &(cls_intr); dgnc_cls_ops.uart_init = &(cls_uart_init); dgnc_cls_ops.uart_off = &(cls_uart_off); dgnc_cls_ops.drain = &(cls_drain); dgnc_cls_ops.param = &(cls_param); dgnc_cls_ops.vpd = &(cls_vpd); dgnc_cls_ops.assert_modem_signals = &(cls_assert_modem_signals); dgnc_cls_ops.flush_uart_write = &(cls_flush_uart_write); dgnc_cls_ops.flush_uart_read = &(cls_flush_uart_read); dgnc_cls_ops.disable_receiver = &(cls_disable_receiver); dgnc_cls_ops.enable_receiver = &(cls_enable_receiver); dgnc_cls_ops.send_break = &(cls_send_break); dgnc_cls_ops.send_start_character = &(cls_send_start_character); dgnc_cls_ops.send_stop_character = &(cls_send_stop_character); dgnc_cls_ops.copy_data_from_queue_to_uart = &(cls_copy_data_from_queue_to_uart); dgnc_cls_ops.get_uart_bytes_left = &(cls_get_uart_bytes_left); dgnc_cls_ops.send_immediate_char = &(cls_send_immediate_char); return ; } -__BLAST_initialize_/home/ldvuser/ref_launch/work/current--X--drivers--X--defaultlinux-3.18-rc1.tar.xz--X--106_1a/linux-3.18-rc1.tar.xz/csd_deg_dscv/2290/dscv_tempdir/dscv/rcv/106_1a/main-ldv_main1_sequence_infinite_withcheck_stateful/preprocess/1-cpp/drivers/staging/dgnc/dgnc_driver.o.i() { 78 -debug = 0; rawreadok = 1; trcbuf_size = 1048576; dgnc_BoardFops.owner = &(__this_module); dgnc_BoardFops.llseek = 0; dgnc_BoardFops.read = 0; dgnc_BoardFops.write = 0; dgnc_BoardFops.aio_read = 0; dgnc_BoardFops.aio_write = 0; dgnc_BoardFops.read_iter = 0; dgnc_BoardFops.write_iter = 0; dgnc_BoardFops.iterate = 0; dgnc_BoardFops.poll = 0; dgnc_BoardFops.unlocked_ioctl = &(dgnc_mgmt_ioctl); dgnc_BoardFops.compat_ioctl = 0; dgnc_BoardFops.mmap = 0; dgnc_BoardFops.open = &(dgnc_mgmt_open); dgnc_BoardFops.flush = 0; dgnc_BoardFops.release = &(dgnc_mgmt_close); dgnc_BoardFops.fsync = 0; dgnc_BoardFops.aio_fsync = 0; dgnc_BoardFops.fasync = 0; dgnc_BoardFops.lock = 0; dgnc_BoardFops.sendpage = 0; dgnc_BoardFops.get_unmapped_area = 0; dgnc_BoardFops.check_flags = 0; dgnc_BoardFops.flock = 0; dgnc_BoardFops.splice_write = 0; dgnc_BoardFops.splice_read = 0; dgnc_BoardFops.setlease = 0; dgnc_BoardFops.fallocate = 0; dgnc_BoardFops.show_fdinfo = 0; dgnc_global_lock.__annonCompField19.rlock.raw_lock.__annonCompField7.head_tail = 0; dgnc_global_lock.__annonCompField19.rlock.magic = 3735899821; dgnc_global_lock.__annonCompField19.rlock.owner_cpu = 4294967295; dgnc_global_lock.__annonCompField19.rlock.owner = -1; dgnc_global_lock.__annonCompField19.rlock.dep_map.key = 0; dgnc_global_lock.__annonCompField19.rlock.dep_map.class_cache[ 0 ] = 0; dgnc_global_lock.__annonCompField19.rlock.dep_map.class_cache[ 1 ] = 0; dgnc_global_lock.__annonCompField19.rlock.dep_map.name = "dgnc_global_lock"; dgnc_global_lock.__annonCompField19.rlock.dep_map.cpu = 0; dgnc_global_lock.__annonCompField19.rlock.dep_map.ip = 0; dgnc_poll_tick = 20; dgnc_poll_lock.__annonCompField19.rlock.raw_lock.__annonCompField7.head_tail = 0; dgnc_poll_lock.__annonCompField19.rlock.magic = 3735899821; dgnc_poll_lock.__annonCompField19.rlock.owner_cpu = 4294967295; dgnc_poll_lock.__annonCompField19.rlock.owner = -1; dgnc_poll_lock.__annonCompField19.rlock.dep_map.key = 0; dgnc_poll_lock.__annonCompField19.rlock.dep_map.class_cache[ 0 ] = 0; dgnc_poll_lock.__annonCompField19.rlock.dep_map.class_cache[ 1 ] = 0; dgnc_poll_lock.__annonCompField19.rlock.dep_map.name = "dgnc_poll_lock"; dgnc_poll_lock.__annonCompField19.rlock.dep_map.cpu = 0; dgnc_poll_lock.__annonCompField19.rlock.dep_map.ip = 0; dgnc_pci_tbl[ 0 ].vendor = 4431; dgnc_pci_tbl[ 0 ].device = 40; dgnc_pci_tbl[ 0 ].subvendor = 4294967295; dgnc_pci_tbl[ 0 ].subdevice = 4294967295; dgnc_pci_tbl[ 0 ].class = 0; dgnc_pci_tbl[ 0 ].class_mask = 0; dgnc_pci_tbl[ 0 ].driver_data = 0; dgnc_pci_tbl[ 1 ].vendor = 4431; dgnc_pci_tbl[ 1 ].device = 208; dgnc_pci_tbl[ 1 ].subvendor = 4294967295; dgnc_pci_tbl[ 1 ].subdevice = 4294967295; dgnc_pci_tbl[ 1 ].class = 0; dgnc_pci_tbl[ 1 ].class_mask = 0; dgnc_pci_tbl[ 1 ].driver_data = 1; dgnc_pci_tbl[ 2 ].vendor = 4431; dgnc_pci_tbl[ 2 ].device = 41; dgnc_pci_tbl[ 2 ].subvendor = 4294967295; dgnc_pci_tbl[ 2 ].subdevice = 4294967295; dgnc_pci_tbl[ 2 ].class = 0; dgnc_pci_tbl[ 2 ].class_mask = 0; dgnc_pci_tbl[ 2 ].driver_data = 2; dgnc_pci_tbl[ 3 ].vendor = 4431; dgnc_pci_tbl[ 3 ].device = 209; dgnc_pci_tbl[ 3 ].subvendor = 4294967295; dgnc_pci_tbl[ 3 ].subdevice = 4294967295; dgnc_pci_tbl[ 3 ].class = 0; dgnc_pci_tbl[ 3 ].class_mask = 0; dgnc_pci_tbl[ 3 ].driver_data = 3; dgnc_pci_tbl[ 4 ].vendor = 0; dgnc_pci_tbl[ 4 ].device = 0; dgnc_pci_tbl[ 4 ].subvendor = 0; dgnc_pci_tbl[ 4 ].subdevice = 0; dgnc_pci_tbl[ 4 ].class = 0; dgnc_pci_tbl[ 4 ].class_mask = 0; dgnc_pci_tbl[ 4 ].driver_data = 0; dgnc_Ids[ 0 ].name = "ClassicBoard 4 PCI"; dgnc_Ids[ 0 ].maxports = 4; dgnc_Ids[ 0 ].is_pci_express = 0; dgnc_Ids[ 1 ].name = "ClassicBoard 4 422 PCI"; dgnc_Ids[ 1 ].maxports = 4; dgnc_Ids[ 1 ].is_pci_express = 0; dgnc_Ids[ 2 ].name = "ClassicBoard 8 PCI"; dgnc_Ids[ 2 ].maxports = 8; dgnc_Ids[ 2 ].is_pci_express = 0; dgnc_Ids[ 3 ].name = "ClassicBoard 8 422 PCI"; dgnc_Ids[ 3 ].maxports = 8; dgnc_Ids[ 3 ].is_pci_express = 0; dgnc_Ids[ 4 ].name = "Neo 4 PCI"; dgnc_Ids[ 4 ].maxports = 4; dgnc_Ids[ 4 ].is_pci_express = 0; dgnc_Ids[ 5 ].name = "Neo 8 PCI"; dgnc_Ids[ 5 ].maxports = 8; dgnc_Ids[ 5 ].is_pci_express = 0; dgnc_Ids[ 6 ].name = "Neo 2 - DB9 Universal PCI"; dgnc_Ids[ 6 ].maxports = 2; dgnc_Ids[ 6 ].is_pci_express = 0; dgnc_Ids[ 7 ].name = "Neo 2 - DB9 Universal PCI - Powered Ring Indicator"; dgnc_Ids[ 7 ].maxports = 2; dgnc_Ids[ 7 ].is_pci_express = 0; dgnc_Ids[ 8 ].name = "Neo 2 - RJ45 Universal PCI"; dgnc_Ids[ 8 ].maxports = 2; dgnc_Ids[ 8 ].is_pci_express = 0; dgnc_Ids[ 9 ].name = "Neo 2 - RJ45 Universal PCI - Powered Ring Indicator"; dgnc_Ids[ 9 ].maxports = 2; dgnc_Ids[ 9 ].is_pci_express = 0; dgnc_Ids[ 10 ].name = "Neo 1 422 PCI"; dgnc_Ids[ 10 ].maxports = 1; dgnc_Ids[ 10 ].is_pci_express = 0; dgnc_Ids[ 11 ].name = "Neo 1 422/485 PCI"; dgnc_Ids[ 11 ].maxports = 1; dgnc_Ids[ 11 ].is_pci_express = 0; dgnc_Ids[ 12 ].name = "Neo 2 422/485 PCI"; dgnc_Ids[ 12 ].maxports = 2; dgnc_Ids[ 12 ].is_pci_express = 0; dgnc_Ids[ 13 ].name = "Neo 8 PCI Express"; dgnc_Ids[ 13 ].maxports = 8; dgnc_Ids[ 13 ].is_pci_express = 1; dgnc_Ids[ 14 ].name = "Neo 4 PCI Express"; dgnc_Ids[ 14 ].maxports = 4; dgnc_Ids[ 14 ].is_pci_express = 1; dgnc_Ids[ 15 ].name = "Neo 4 PCI Express RJ45"; dgnc_Ids[ 15 ].maxports = 4; dgnc_Ids[ 15 ].is_pci_express = 1; dgnc_Ids[ 16 ].name = "Neo 8 PCI Express RJ45"; dgnc_Ids[ 16 ].maxports = 8; dgnc_Ids[ 16 ].is_pci_express = 1; dgnc_Ids[ 17 ].name = 0; dgnc_Ids[ 17 ].maxports = 0; dgnc_Ids[ 17 ].is_pci_express = 0; dgnc_driver.node.next = 0; dgnc_driver.node.prev = 0; dgnc_driver.name = "dgnc"; dgnc_driver.id_table = &(dgnc_pci_tbl); dgnc_driver.probe = &(dgnc_init_one); dgnc_driver.remove = 0; dgnc_driver.suspend = 0; dgnc_driver.suspend_late = 0; dgnc_driver.resume_early = 0; dgnc_driver.resume = 0; dgnc_driver.shutdown = 0; dgnc_driver.sriov_configure = 0; dgnc_driver.err_handler = 0; dgnc_driver.driver.name = 0; dgnc_driver.driver.bus = 0; dgnc_driver.driver.owner = 0; dgnc_driver.driver.mod_name = 0; dgnc_driver.driver.suppress_bind_attrs = 0; dgnc_driver.driver.of_match_table = 0; dgnc_driver.driver.acpi_match_table = 0; dgnc_driver.driver.probe = 0; dgnc_driver.driver.remove = 0; dgnc_driver.driver.shutdown = 0; dgnc_driver.driver.suspend = 0; dgnc_driver.driver.resume = 0; dgnc_driver.driver.groups = 0; dgnc_driver.driver.pm = 0; dgnc_driver.driver.p = 0; dgnc_driver.dynids.lock.__annonCompField19.rlock.raw_lock.__annonCompField7.head_tail = 0; dgnc_driver.dynids.lock.__annonCompField19.rlock.magic = 0; dgnc_driver.dynids.lock.__annonCompField19.rlock.owner_cpu = 0; dgnc_driver.dynids.lock.__annonCompField19.rlock.owner = 0; dgnc_driver.dynids.lock.__annonCompField19.rlock.dep_map.key = 0; dgnc_driver.dynids.lock.__annonCompField19.rlock.dep_map.class_cache[ 0 ] = 0; dgnc_driver.dynids.lock.__annonCompField19.rlock.dep_map.class_cache[ 1 ] = 0; dgnc_driver.dynids.lock.__annonCompField19.rlock.dep_map.name = 0; dgnc_driver.dynids.lock.__annonCompField19.rlock.dep_map.cpu = 0; dgnc_driver.dynids.lock.__annonCompField19.rlock.dep_map.ip = 0; dgnc_driver.dynids.list.next = 0; dgnc_driver.dynids.list.prev = 0; dgnc_state_text[ 0 ] = "Board Failed"; dgnc_state_text[ 1 ] = "Board Found"; dgnc_state_text[ 2 ] = "Board READY"; return ; } -__BLAST_initialize_/home/ldvuser/ref_launch/work/current--X--drivers--X--defaultlinux-3.18-rc1.tar.xz--X--106_1a/linux-3.18-rc1.tar.xz/csd_deg_dscv/2290/dscv_tempdir/dscv/rcv/106_1a/main-ldv_main1_sequence_infinite_withcheck_stateful/preprocess/1-cpp/drivers/staging/dgnc/dgnc_mgmt.o.i() { return ; } -__BLAST_initialize_/home/ldvuser/ref_launch/work/current--X--drivers--X--defaultlinux-3.18-rc1.tar.xz--X--106_1a/linux-3.18-rc1.tar.xz/csd_deg_dscv/2290/dscv_tempdir/dscv/rcv/106_1a/main-ldv_main1_sequence_infinite_withcheck_stateful/preprocess/1-cpp/drivers/staging/dgnc/dgnc_neo.o.i() { 97 -dgnc_neo_ops.tasklet = &(neo_tasklet); dgnc_neo_ops.intr = &(neo_intr); dgnc_neo_ops.uart_init = &(neo_uart_init); dgnc_neo_ops.uart_off = &(neo_uart_off); dgnc_neo_ops.drain = &(neo_drain); dgnc_neo_ops.param = &(neo_param); dgnc_neo_ops.vpd = &(neo_vpd); dgnc_neo_ops.assert_modem_signals = &(neo_assert_modem_signals); dgnc_neo_ops.flush_uart_write = &(neo_flush_uart_write); dgnc_neo_ops.flush_uart_read = &(neo_flush_uart_read); dgnc_neo_ops.disable_receiver = &(neo_disable_receiver); dgnc_neo_ops.enable_receiver = &(neo_enable_receiver); dgnc_neo_ops.send_break = &(neo_send_break); dgnc_neo_ops.send_start_character = &(neo_send_start_character); dgnc_neo_ops.send_stop_character = &(neo_send_stop_character); dgnc_neo_ops.copy_data_from_queue_to_uart = &(neo_copy_data_from_queue_to_uart); dgnc_neo_ops.get_uart_bytes_left = &(neo_get_uart_bytes_left); dgnc_neo_ops.send_immediate_char = &(neo_send_immediate_char); dgnc_offset_table[ 0 ] = 1; dgnc_offset_table[ 1 ] = 2; dgnc_offset_table[ 2 ] = 4; dgnc_offset_table[ 3 ] = 8; dgnc_offset_table[ 4 ] = 16; dgnc_offset_table[ 5 ] = 32; dgnc_offset_table[ 6 ] = 64; dgnc_offset_table[ 7 ] = 128; return ; } -__BLAST_initialize_/home/ldvuser/ref_launch/work/current--X--drivers--X--defaultlinux-3.18-rc1.tar.xz--X--106_1a/linux-3.18-rc1.tar.xz/csd_deg_dscv/2290/dscv_tempdir/dscv/rcv/106_1a/main-ldv_main1_sequence_infinite_withcheck_stateful/preprocess/1-cpp/drivers/staging/dgnc/dgnc_tty.o.i() { 91 -dgnc_TmpWriteSem.lock.raw_lock.__annonCompField7.head_tail = 0; dgnc_TmpWriteSem.lock.magic = 3735899821; dgnc_TmpWriteSem.lock.owner_cpu = 4294967295; dgnc_TmpWriteSem.lock.owner = -1; dgnc_TmpWriteSem.lock.dep_map.key = 0; dgnc_TmpWriteSem.lock.dep_map.class_cache[ 0 ] = 0; dgnc_TmpWriteSem.lock.dep_map.class_cache[ 1 ] = 0; dgnc_TmpWriteSem.lock.dep_map.name = "(dgnc_TmpWriteSem).lock"; dgnc_TmpWriteSem.lock.dep_map.cpu = 0; dgnc_TmpWriteSem.lock.dep_map.ip = 0; dgnc_TmpWriteSem.count = 1; dgnc_TmpWriteSem.wait_list.next = &(&(dgnc_TmpWriteSem))->wait_list; dgnc_TmpWriteSem.wait_list.prev = &(&(dgnc_TmpWriteSem))->wait_list; dgnc_digi_init.digi_flags = 128; dgnc_digi_init.digi_maxcps = 100; dgnc_digi_init.digi_maxchar = 50; dgnc_digi_init.digi_bufsize = 100; dgnc_digi_init.digi_onlen = 4; dgnc_digi_init.digi_offlen = 4; dgnc_digi_init.digi_onstr[ 0 ] = 27; dgnc_digi_init.digi_onstr[ 1 ] = 91; dgnc_digi_init.digi_onstr[ 2 ] = 53; dgnc_digi_init.digi_onstr[ 3 ] = 105; dgnc_digi_init.digi_onstr[ 4 ] = 0; dgnc_digi_init.digi_offstr[ 0 ] = 27; dgnc_digi_init.digi_offstr[ 1 ] = 91; dgnc_digi_init.digi_offstr[ 2 ] = 52; dgnc_digi_init.digi_offstr[ 3 ] = 105; dgnc_digi_init.digi_offstr[ 4 ] = 0; dgnc_digi_init.digi_term[ 0 ] = 97; dgnc_digi_init.digi_term[ 1 ] = 110; dgnc_digi_init.digi_term[ 2 ] = 115; dgnc_digi_init.digi_term[ 3 ] = 105; dgnc_digi_init.digi_term[ 4 ] = 0; dgnc_digi_init.digi_term[ 5 ] = 0; dgnc_digi_init.digi_term[ 6 ] = 0; dgnc_digi_init.digi_term[ 7 ] = 0; dgnc_digi_init.digi_term[ 8 ] = 0; dgnc_digi_init.digi_term[ 9 ] = 0; DgncDefaultTermios.c_iflag = 1280; DgncDefaultTermios.c_oflag = 5; DgncDefaultTermios.c_cflag = 3261; DgncDefaultTermios.c_lflag = 35387; DgncDefaultTermios.c_line = 0; DgncDefaultTermios.c_cc[ 0 ] = 3; DgncDefaultTermios.c_cc[ 1 ] = 28; DgncDefaultTermios.c_cc[ 2 ] = 127; DgncDefaultTermios.c_cc[ 3 ] = 21; DgncDefaultTermios.c_cc[ 4 ] = 4; DgncDefaultTermios.c_cc[ 5 ] = 0; DgncDefaultTermios.c_cc[ 6 ] = 0; DgncDefaultTermios.c_cc[ 7 ] = 0; DgncDefaultTermios.c_cc[ 8 ] = 0; DgncDefaultTermios.c_cc[ 9 ] = 0; DgncDefaultTermios.c_cc[ 10 ] = 0; DgncDefaultTermios.c_cc[ 11 ] = 0; DgncDefaultTermios.c_cc[ 12 ] = 0; DgncDefaultTermios.c_cc[ 13 ] = 0; DgncDefaultTermios.c_cc[ 14 ] = 0; DgncDefaultTermios.c_cc[ 15 ] = 0; DgncDefaultTermios.c_cc[ 16 ] = 0; DgncDefaultTermios.c_cc[ 17 ] = 0; DgncDefaultTermios.c_cc[ 18 ] = 0; DgncDefaultTermios.c_ispeed = 0; DgncDefaultTermios.c_ospeed = 0; dgnc_tty_ops.lookup = 0; dgnc_tty_ops.install = 0; dgnc_tty_ops.remove = 0; dgnc_tty_ops.open = &(dgnc_tty_open); dgnc_tty_ops.close = &(dgnc_tty_close); dgnc_tty_ops.shutdown = 0; dgnc_tty_ops.cleanup = 0; dgnc_tty_ops.write = &(dgnc_tty_write); dgnc_tty_ops.put_char = &(dgnc_tty_put_char); dgnc_tty_ops.flush_chars = &(dgnc_tty_flush_chars); dgnc_tty_ops.write_room = &(dgnc_tty_write_room); dgnc_tty_ops.chars_in_buffer = &(dgnc_tty_chars_in_buffer); dgnc_tty_ops.ioctl = &(dgnc_tty_ioctl); dgnc_tty_ops.compat_ioctl = 0; dgnc_tty_ops.set_termios = &(dgnc_tty_set_termios); dgnc_tty_ops.throttle = &(dgnc_tty_throttle); dgnc_tty_ops.unthrottle = &(dgnc_tty_unthrottle); dgnc_tty_ops.stop = &(dgnc_tty_stop); dgnc_tty_ops.start = &(dgnc_tty_start); dgnc_tty_ops.hangup = &(dgnc_tty_hangup); dgnc_tty_ops.break_ctl = &(dgnc_tty_send_break); dgnc_tty_ops.flush_buffer = &(dgnc_tty_flush_buffer); dgnc_tty_ops.set_ldisc = 0; dgnc_tty_ops.wait_until_sent = &(dgnc_tty_wait_until_sent); dgnc_tty_ops.send_xchar = &(dgnc_tty_send_xchar); dgnc_tty_ops.tiocmget = &(dgnc_tty_tiocmget); dgnc_tty_ops.tiocmset = &(dgnc_tty_tiocmset); dgnc_tty_ops.resize = 0; dgnc_tty_ops.set_termiox = 0; dgnc_tty_ops.get_icount = 0; dgnc_tty_ops.poll_init = 0; dgnc_tty_ops.poll_get_char = 0; dgnc_tty_ops.poll_put_char = 0; dgnc_tty_ops.proc_fops = 0; return ; } -__BLAST_initialize_/home/ldvuser/ref_launch/work/current--X--drivers--X--defaultlinux-3.18-rc1.tar.xz--X--106_1a/linux-3.18-rc1.tar.xz/csd_deg_dscv/2290/dscv_tempdir/dscv/rcv/106_1a/main-ldv_main1_sequence_infinite_withcheck_stateful/preprocess/1-cpp/drivers/staging/dgnc/dgnc_sysfs.o.i() { 70 -driver_attr_version.attr.name = "version"; driver_attr_version.attr.mode = 256; driver_attr_version.attr.ignore_lockdep = 0; driver_attr_version.attr.key = 0; driver_attr_version.attr.skey.subkeys[ 0 ].__one_byte = 0; driver_attr_version.attr.skey.subkeys[ 1 ].__one_byte = 0; driver_attr_version.attr.skey.subkeys[ 2 ].__one_byte = 0; driver_attr_version.attr.skey.subkeys[ 3 ].__one_byte = 0; driver_attr_version.attr.skey.subkeys[ 4 ].__one_byte = 0; driver_attr_version.attr.skey.subkeys[ 5 ].__one_byte = 0; driver_attr_version.attr.skey.subkeys[ 6 ].__one_byte = 0; driver_attr_version.attr.skey.subkeys[ 7 ].__one_byte = 0; driver_attr_version.show = &(dgnc_driver_version_show); driver_attr_version.store = 0; driver_attr_boards.attr.name = "boards"; driver_attr_boards.attr.mode = 256; driver_attr_boards.attr.ignore_lockdep = 0; driver_attr_boards.attr.key = 0; driver_attr_boards.attr.skey.subkeys[ 0 ].__one_byte = 0; driver_attr_boards.attr.skey.subkeys[ 1 ].__one_byte = 0; driver_attr_boards.attr.skey.subkeys[ 2 ].__one_byte = 0; driver_attr_boards.attr.skey.subkeys[ 3 ].__one_byte = 0; driver_attr_boards.attr.skey.subkeys[ 4 ].__one_byte = 0; driver_attr_boards.attr.skey.subkeys[ 5 ].__one_byte = 0; driver_attr_boards.attr.skey.subkeys[ 6 ].__one_byte = 0; driver_attr_boards.attr.skey.subkeys[ 7 ].__one_byte = 0; driver_attr_boards.show = &(dgnc_driver_boards_show); driver_attr_boards.store = 0; driver_attr_maxboards.attr.name = "maxboards"; driver_attr_maxboards.attr.mode = 256; driver_attr_maxboards.attr.ignore_lockdep = 0; driver_attr_maxboards.attr.key = 0; driver_attr_maxboards.attr.skey.subkeys[ 0 ].__one_byte = 0; driver_attr_maxboards.attr.skey.subkeys[ 1 ].__one_byte = 0; driver_attr_maxboards.attr.skey.subkeys[ 2 ].__one_byte = 0; driver_attr_maxboards.attr.skey.subkeys[ 3 ].__one_byte = 0; driver_attr_maxboards.attr.skey.subkeys[ 4 ].__one_byte = 0; driver_attr_maxboards.attr.skey.subkeys[ 5 ].__one_byte = 0; driver_attr_maxboards.attr.skey.subkeys[ 6 ].__one_byte = 0; driver_attr_maxboards.attr.skey.subkeys[ 7 ].__one_byte = 0; driver_attr_maxboards.show = &(dgnc_driver_maxboards_show); driver_attr_maxboards.store = 0; driver_attr_debug.attr.name = "debug"; driver_attr_debug.attr.mode = 384; driver_attr_debug.attr.ignore_lockdep = 0; driver_attr_debug.attr.key = 0; driver_attr_debug.attr.skey.subkeys[ 0 ].__one_byte = 0; driver_attr_debug.attr.skey.subkeys[ 1 ].__one_byte = 0; driver_attr_debug.attr.skey.subkeys[ 2 ].__one_byte = 0; driver_attr_debug.attr.skey.subkeys[ 3 ].__one_byte = 0; driver_attr_debug.attr.skey.subkeys[ 4 ].__one_byte = 0; driver_attr_debug.attr.skey.subkeys[ 5 ].__one_byte = 0; driver_attr_debug.attr.skey.subkeys[ 6 ].__one_byte = 0; driver_attr_debug.attr.skey.subkeys[ 7 ].__one_byte = 0; driver_attr_debug.show = &(dgnc_driver_debug_show); driver_attr_debug.store = &(dgnc_driver_debug_store); driver_attr_rawreadok.attr.name = "rawreadok"; driver_attr_rawreadok.attr.mode = 384; driver_attr_rawreadok.attr.ignore_lockdep = 0; driver_attr_rawreadok.attr.key = 0; driver_attr_rawreadok.attr.skey.subkeys[ 0 ].__one_byte = 0; driver_attr_rawreadok.attr.skey.subkeys[ 1 ].__one_byte = 0; driver_attr_rawreadok.attr.skey.subkeys[ 2 ].__one_byte = 0; driver_attr_rawreadok.attr.skey.subkeys[ 3 ].__one_byte = 0; driver_attr_rawreadok.attr.skey.subkeys[ 4 ].__one_byte = 0; driver_attr_rawreadok.attr.skey.subkeys[ 5 ].__one_byte = 0; driver_attr_rawreadok.attr.skey.subkeys[ 6 ].__one_byte = 0; driver_attr_rawreadok.attr.skey.subkeys[ 7 ].__one_byte = 0; driver_attr_rawreadok.show = &(dgnc_driver_rawreadok_show); driver_attr_rawreadok.store = &(dgnc_driver_rawreadok_store); driver_attr_pollrate.attr.name = "pollrate"; driver_attr_pollrate.attr.mode = 384; driver_attr_pollrate.attr.ignore_lockdep = 0; driver_attr_pollrate.attr.key = 0; driver_attr_pollrate.attr.skey.subkeys[ 0 ].__one_byte = 0; driver_attr_pollrate.attr.skey.subkeys[ 1 ].__one_byte = 0; driver_attr_pollrate.attr.skey.subkeys[ 2 ].__one_byte = 0; driver_attr_pollrate.attr.skey.subkeys[ 3 ].__one_byte = 0; driver_attr_pollrate.attr.skey.subkeys[ 4 ].__one_byte = 0; driver_attr_pollrate.attr.skey.subkeys[ 5 ].__one_byte = 0; driver_attr_pollrate.attr.skey.subkeys[ 6 ].__one_byte = 0; driver_attr_pollrate.attr.skey.subkeys[ 7 ].__one_byte = 0; driver_attr_pollrate.show = &(dgnc_driver_pollrate_show); driver_attr_pollrate.store = &(dgnc_driver_pollrate_store); dev_attr_vpd.attr.name = "vpd"; dev_attr_vpd.attr.mode = 256; dev_attr_vpd.attr.ignore_lockdep = 0; dev_attr_vpd.attr.key = 0; dev_attr_vpd.attr.skey.subkeys[ 0 ].__one_byte = 0; dev_attr_vpd.attr.skey.subkeys[ 1 ].__one_byte = 0; dev_attr_vpd.attr.skey.subkeys[ 2 ].__one_byte = 0; dev_attr_vpd.attr.skey.subkeys[ 3 ].__one_byte = 0; dev_attr_vpd.attr.skey.subkeys[ 4 ].__one_byte = 0; dev_attr_vpd.attr.skey.subkeys[ 5 ].__one_byte = 0; dev_attr_vpd.attr.skey.subkeys[ 6 ].__one_byte = 0; dev_attr_vpd.attr.skey.subkeys[ 7 ].__one_byte = 0; dev_attr_vpd.show = &(dgnc_vpd_show); dev_attr_vpd.store = 0; dev_attr_serial_number.attr.name = "serial_number"; dev_attr_serial_number.attr.mode = 256; dev_attr_serial_number.attr.ignore_lockdep = 0; dev_attr_serial_number.attr.key = 0; dev_attr_serial_number.attr.skey.subkeys[ 0 ].__one_byte = 0; dev_attr_serial_number.attr.skey.subkeys[ 1 ].__one_byte = 0; dev_attr_serial_number.attr.skey.subkeys[ 2 ].__one_byte = 0; dev_attr_serial_number.attr.skey.subkeys[ 3 ].__one_byte = 0; dev_attr_serial_number.attr.skey.subkeys[ 4 ].__one_byte = 0; dev_attr_serial_number.attr.skey.subkeys[ 5 ].__one_byte = 0; dev_attr_serial_number.attr.skey.subkeys[ 6 ].__one_byte = 0; dev_attr_serial_number.attr.skey.subkeys[ 7 ].__one_byte = 0; dev_attr_serial_number.show = &(dgnc_serial_number_show); dev_attr_serial_number.store = 0; dev_attr_ports_state.attr.name = "ports_state"; dev_attr_ports_state.attr.mode = 256; dev_attr_ports_state.attr.ignore_lockdep = 0; dev_attr_ports_state.attr.key = 0; dev_attr_ports_state.attr.skey.subkeys[ 0 ].__one_byte = 0; dev_attr_ports_state.attr.skey.subkeys[ 1 ].__one_byte = 0; dev_attr_ports_state.attr.skey.subkeys[ 2 ].__one_byte = 0; dev_attr_ports_state.attr.skey.subkeys[ 3 ].__one_byte = 0; dev_attr_ports_state.attr.skey.subkeys[ 4 ].__one_byte = 0; dev_attr_ports_state.attr.skey.subkeys[ 5 ].__one_byte = 0; dev_attr_ports_state.attr.skey.subkeys[ 6 ].__one_byte = 0; dev_attr_ports_state.attr.skey.subkeys[ 7 ].__one_byte = 0; dev_attr_ports_state.show = &(dgnc_ports_state_show); dev_attr_ports_state.store = 0; dev_attr_ports_baud.attr.name = "ports_baud"; dev_attr_ports_baud.attr.mode = 256; dev_attr_ports_baud.attr.ignore_lockdep = 0; dev_attr_ports_baud.attr.key = 0; dev_attr_ports_baud.attr.skey.subkeys[ 0 ].__one_byte = 0; dev_attr_ports_baud.attr.skey.subkeys[ 1 ].__one_byte = 0; dev_attr_ports_baud.attr.skey.subkeys[ 2 ].__one_byte = 0; dev_attr_ports_baud.attr.skey.subkeys[ 3 ].__one_byte = 0; dev_attr_ports_baud.attr.skey.subkeys[ 4 ].__one_byte = 0; dev_attr_ports_baud.attr.skey.subkeys[ 5 ].__one_byte = 0; dev_attr_ports_baud.attr.skey.subkeys[ 6 ].__one_byte = 0; dev_attr_ports_baud.attr.skey.subkeys[ 7 ].__one_byte = 0; dev_attr_ports_baud.show = &(dgnc_ports_baud_show); dev_attr_ports_baud.store = 0; dev_attr_ports_msignals.attr.name = "ports_msignals"; dev_attr_ports_msignals.attr.mode = 256; dev_attr_ports_msignals.attr.ignore_lockdep = 0; dev_attr_ports_msignals.attr.key = 0; dev_attr_ports_msignals.attr.skey.subkeys[ 0 ].__one_byte = 0; dev_attr_ports_msignals.attr.skey.subkeys[ 1 ].__one_byte = 0; dev_attr_ports_msignals.attr.skey.subkeys[ 2 ].__one_byte = 0; dev_attr_ports_msignals.attr.skey.subkeys[ 3 ].__one_byte = 0; dev_attr_ports_msignals.attr.skey.subkeys[ 4 ].__one_byte = 0; dev_attr_ports_msignals.attr.skey.subkeys[ 5 ].__one_byte = 0; dev_attr_ports_msignals.attr.skey.subkeys[ 6 ].__one_byte = 0; dev_attr_ports_msignals.attr.skey.subkeys[ 7 ].__one_byte = 0; dev_attr_ports_msignals.show = &(dgnc_ports_msignals_show); dev_attr_ports_msignals.store = 0; dev_attr_ports_iflag.attr.name = "ports_iflag"; dev_attr_ports_iflag.attr.mode = 256; dev_attr_ports_iflag.attr.ignore_lockdep = 0; dev_attr_ports_iflag.attr.key = 0; dev_attr_ports_iflag.attr.skey.subkeys[ 0 ].__one_byte = 0; dev_attr_ports_iflag.attr.skey.subkeys[ 1 ].__one_byte = 0; dev_attr_ports_iflag.attr.skey.subkeys[ 2 ].__one_byte = 0; dev_attr_ports_iflag.attr.skey.subkeys[ 3 ].__one_byte = 0; dev_attr_ports_iflag.attr.skey.subkeys[ 4 ].__one_byte = 0; dev_attr_ports_iflag.attr.skey.subkeys[ 5 ].__one_byte = 0; dev_attr_ports_iflag.attr.skey.subkeys[ 6 ].__one_byte = 0; dev_attr_ports_iflag.attr.skey.subkeys[ 7 ].__one_byte = 0; dev_attr_ports_iflag.show = &(dgnc_ports_iflag_show); dev_attr_ports_iflag.store = 0; dev_attr_ports_cflag.attr.name = "ports_cflag"; dev_attr_ports_cflag.attr.mode = 256; dev_attr_ports_cflag.attr.ignore_lockdep = 0; dev_attr_ports_cflag.attr.key = 0; dev_attr_ports_cflag.attr.skey.subkeys[ 0 ].__one_byte = 0; dev_attr_ports_cflag.attr.skey.subkeys[ 1 ].__one_byte = 0; dev_attr_ports_cflag.attr.skey.subkeys[ 2 ].__one_byte = 0; dev_attr_ports_cflag.attr.skey.subkeys[ 3 ].__one_byte = 0; dev_attr_ports_cflag.attr.skey.subkeys[ 4 ].__one_byte = 0; dev_attr_ports_cflag.attr.skey.subkeys[ 5 ].__one_byte = 0; dev_attr_ports_cflag.attr.skey.subkeys[ 6 ].__one_byte = 0; dev_attr_ports_cflag.attr.skey.subkeys[ 7 ].__one_byte = 0; dev_attr_ports_cflag.show = &(dgnc_ports_cflag_show); dev_attr_ports_cflag.store = 0; dev_attr_ports_oflag.attr.name = "ports_oflag"; dev_attr_ports_oflag.attr.mode = 256; dev_attr_ports_oflag.attr.ignore_lockdep = 0; dev_attr_ports_oflag.attr.key = 0; dev_attr_ports_oflag.attr.skey.subkeys[ 0 ].__one_byte = 0; dev_attr_ports_oflag.attr.skey.subkeys[ 1 ].__one_byte = 0; dev_attr_ports_oflag.attr.skey.subkeys[ 2 ].__one_byte = 0; dev_attr_ports_oflag.attr.skey.subkeys[ 3 ].__one_byte = 0; dev_attr_ports_oflag.attr.skey.subkeys[ 4 ].__one_byte = 0; dev_attr_ports_oflag.attr.skey.subkeys[ 5 ].__one_byte = 0; dev_attr_ports_oflag.attr.skey.subkeys[ 6 ].__one_byte = 0; dev_attr_ports_oflag.attr.skey.subkeys[ 7 ].__one_byte = 0; dev_attr_ports_oflag.show = &(dgnc_ports_oflag_show); dev_attr_ports_oflag.store = 0; dev_attr_ports_lflag.attr.name = "ports_lflag"; dev_attr_ports_lflag.attr.mode = 256; dev_attr_ports_lflag.attr.ignore_lockdep = 0; dev_attr_ports_lflag.attr.key = 0; dev_attr_ports_lflag.attr.skey.subkeys[ 0 ].__one_byte = 0; dev_attr_ports_lflag.attr.skey.subkeys[ 1 ].__one_byte = 0; dev_attr_ports_lflag.attr.skey.subkeys[ 2 ].__one_byte = 0; dev_attr_ports_lflag.attr.skey.subkeys[ 3 ].__one_byte = 0; dev_attr_ports_lflag.attr.skey.subkeys[ 4 ].__one_byte = 0; dev_attr_ports_lflag.attr.skey.subkeys[ 5 ].__one_byte = 0; dev_attr_ports_lflag.attr.skey.subkeys[ 6 ].__one_byte = 0; dev_attr_ports_lflag.attr.skey.subkeys[ 7 ].__one_byte = 0; dev_attr_ports_lflag.show = &(dgnc_ports_lflag_show); dev_attr_ports_lflag.store = 0; dev_attr_ports_digi_flag.attr.name = "ports_digi_flag"; dev_attr_ports_digi_flag.attr.mode = 256; dev_attr_ports_digi_flag.attr.ignore_lockdep = 0; dev_attr_ports_digi_flag.attr.key = 0; dev_attr_ports_digi_flag.attr.skey.subkeys[ 0 ].__one_byte = 0; dev_attr_ports_digi_flag.attr.skey.subkeys[ 1 ].__one_byte = 0; dev_attr_ports_digi_flag.attr.skey.subkeys[ 2 ].__one_byte = 0; dev_attr_ports_digi_flag.attr.skey.subkeys[ 3 ].__one_byte = 0; dev_attr_ports_digi_flag.attr.skey.subkeys[ 4 ].__one_byte = 0; dev_attr_ports_digi_flag.attr.skey.subkeys[ 5 ].__one_byte = 0; dev_attr_ports_digi_flag.attr.skey.subkeys[ 6 ].__one_byte = 0; dev_attr_ports_digi_flag.attr.skey.subkeys[ 7 ].__one_byte = 0; dev_attr_ports_digi_flag.show = &(dgnc_ports_digi_flag_show); dev_attr_ports_digi_flag.store = 0; dev_attr_ports_rxcount.attr.name = "ports_rxcount"; dev_attr_ports_rxcount.attr.mode = 256; dev_attr_ports_rxcount.attr.ignore_lockdep = 0; dev_attr_ports_rxcount.attr.key = 0; dev_attr_ports_rxcount.attr.skey.subkeys[ 0 ].__one_byte = 0; dev_attr_ports_rxcount.attr.skey.subkeys[ 1 ].__one_byte = 0; dev_attr_ports_rxcount.attr.skey.subkeys[ 2 ].__one_byte = 0; dev_attr_ports_rxcount.attr.skey.subkeys[ 3 ].__one_byte = 0; dev_attr_ports_rxcount.attr.skey.subkeys[ 4 ].__one_byte = 0; dev_attr_ports_rxcount.attr.skey.subkeys[ 5 ].__one_byte = 0; dev_attr_ports_rxcount.attr.skey.subkeys[ 6 ].__one_byte = 0; dev_attr_ports_rxcount.attr.skey.subkeys[ 7 ].__one_byte = 0; dev_attr_ports_rxcount.show = &(dgnc_ports_rxcount_show); dev_attr_ports_rxcount.store = 0; dev_attr_ports_txcount.attr.name = "ports_txcount"; dev_attr_ports_txcount.attr.mode = 256; dev_attr_ports_txcount.attr.ignore_lockdep = 0; dev_attr_ports_txcount.attr.key = 0; dev_attr_ports_txcount.attr.skey.subkeys[ 0 ].__one_byte = 0; dev_attr_ports_txcount.attr.skey.subkeys[ 1 ].__one_byte = 0; dev_attr_ports_txcount.attr.skey.subkeys[ 2 ].__one_byte = 0; dev_attr_ports_txcount.attr.skey.subkeys[ 3 ].__one_byte = 0; dev_attr_ports_txcount.attr.skey.subkeys[ 4 ].__one_byte = 0; dev_attr_ports_txcount.attr.skey.subkeys[ 5 ].__one_byte = 0; dev_attr_ports_txcount.attr.skey.subkeys[ 6 ].__one_byte = 0; dev_attr_ports_txcount.attr.skey.subkeys[ 7 ].__one_byte = 0; dev_attr_ports_txcount.show = &(dgnc_ports_txcount_show); dev_attr_ports_txcount.store = 0; dev_attr_state.attr.name = "state"; dev_attr_state.attr.mode = 256; dev_attr_state.attr.ignore_lockdep = 0; dev_attr_state.attr.key = 0; dev_attr_state.attr.skey.subkeys[ 0 ].__one_byte = 0; dev_attr_state.attr.skey.subkeys[ 1 ].__one_byte = 0; dev_attr_state.attr.skey.subkeys[ 2 ].__one_byte = 0; dev_attr_state.attr.skey.subkeys[ 3 ].__one_byte = 0; dev_attr_state.attr.skey.subkeys[ 4 ].__one_byte = 0; dev_attr_state.attr.skey.subkeys[ 5 ].__one_byte = 0; dev_attr_state.attr.skey.subkeys[ 6 ].__one_byte = 0; dev_attr_state.attr.skey.subkeys[ 7 ].__one_byte = 0; dev_attr_state.show = &(dgnc_tty_state_show); dev_attr_state.store = 0; dev_attr_baud.attr.name = "baud"; dev_attr_baud.attr.mode = 256; dev_attr_baud.attr.ignore_lockdep = 0; dev_attr_baud.attr.key = 0; dev_attr_baud.attr.skey.subkeys[ 0 ].__one_byte = 0; dev_attr_baud.attr.skey.subkeys[ 1 ].__one_byte = 0; dev_attr_baud.attr.skey.subkeys[ 2 ].__one_byte = 0; dev_attr_baud.attr.skey.subkeys[ 3 ].__one_byte = 0; dev_attr_baud.attr.skey.subkeys[ 4 ].__one_byte = 0; dev_attr_baud.attr.skey.subkeys[ 5 ].__one_byte = 0; dev_attr_baud.attr.skey.subkeys[ 6 ].__one_byte = 0; dev_attr_baud.attr.skey.subkeys[ 7 ].__one_byte = 0; dev_attr_baud.show = &(dgnc_tty_baud_show); dev_attr_baud.store = 0; dev_attr_msignals.attr.name = "msignals"; dev_attr_msignals.attr.mode = 256; dev_attr_msignals.attr.ignore_lockdep = 0; dev_attr_msignals.attr.key = 0; dev_attr_msignals.attr.skey.subkeys[ 0 ].__one_byte = 0; dev_attr_msignals.attr.skey.subkeys[ 1 ].__one_byte = 0; dev_attr_msignals.attr.skey.subkeys[ 2 ].__one_byte = 0; dev_attr_msignals.attr.skey.subkeys[ 3 ].__one_byte = 0; dev_attr_msignals.attr.skey.subkeys[ 4 ].__one_byte = 0; dev_attr_msignals.attr.skey.subkeys[ 5 ].__one_byte = 0; dev_attr_msignals.attr.skey.subkeys[ 6 ].__one_byte = 0; dev_attr_msignals.attr.skey.subkeys[ 7 ].__one_byte = 0; dev_attr_msignals.show = &(dgnc_tty_msignals_show); dev_attr_msignals.store = 0; dev_attr_iflag.attr.name = "iflag"; dev_attr_iflag.attr.mode = 256; dev_attr_iflag.attr.ignore_lockdep = 0; dev_attr_iflag.attr.key = 0; dev_attr_iflag.attr.skey.subkeys[ 0 ].__one_byte = 0; dev_attr_iflag.attr.skey.subkeys[ 1 ].__one_byte = 0; dev_attr_iflag.attr.skey.subkeys[ 2 ].__one_byte = 0; dev_attr_iflag.attr.skey.subkeys[ 3 ].__one_byte = 0; dev_attr_iflag.attr.skey.subkeys[ 4 ].__one_byte = 0; dev_attr_iflag.attr.skey.subkeys[ 5 ].__one_byte = 0; dev_attr_iflag.attr.skey.subkeys[ 6 ].__one_byte = 0; dev_attr_iflag.attr.skey.subkeys[ 7 ].__one_byte = 0; dev_attr_iflag.show = &(dgnc_tty_iflag_show); dev_attr_iflag.store = 0; dev_attr_cflag.attr.name = "cflag"; dev_attr_cflag.attr.mode = 256; dev_attr_cflag.attr.ignore_lockdep = 0; dev_attr_cflag.attr.key = 0; dev_attr_cflag.attr.skey.subkeys[ 0 ].__one_byte = 0; dev_attr_cflag.attr.skey.subkeys[ 1 ].__one_byte = 0; dev_attr_cflag.attr.skey.subkeys[ 2 ].__one_byte = 0; dev_attr_cflag.attr.skey.subkeys[ 3 ].__one_byte = 0; dev_attr_cflag.attr.skey.subkeys[ 4 ].__one_byte = 0; dev_attr_cflag.attr.skey.subkeys[ 5 ].__one_byte = 0; dev_attr_cflag.attr.skey.subkeys[ 6 ].__one_byte = 0; dev_attr_cflag.attr.skey.subkeys[ 7 ].__one_byte = 0; dev_attr_cflag.show = &(dgnc_tty_cflag_show); dev_attr_cflag.store = 0; dev_attr_oflag.attr.name = "oflag"; dev_attr_oflag.attr.mode = 256; dev_attr_oflag.attr.ignore_lockdep = 0; dev_attr_oflag.attr.key = 0; dev_attr_oflag.attr.skey.subkeys[ 0 ].__one_byte = 0; dev_attr_oflag.attr.skey.subkeys[ 1 ].__one_byte = 0; dev_attr_oflag.attr.skey.subkeys[ 2 ].__one_byte = 0; dev_attr_oflag.attr.skey.subkeys[ 3 ].__one_byte = 0; dev_attr_oflag.attr.skey.subkeys[ 4 ].__one_byte = 0; dev_attr_oflag.attr.skey.subkeys[ 5 ].__one_byte = 0; dev_attr_oflag.attr.skey.subkeys[ 6 ].__one_byte = 0; dev_attr_oflag.attr.skey.subkeys[ 7 ].__one_byte = 0; dev_attr_oflag.show = &(dgnc_tty_oflag_show); dev_attr_oflag.store = 0; dev_attr_lflag.attr.name = "lflag"; dev_attr_lflag.attr.mode = 256; dev_attr_lflag.attr.ignore_lockdep = 0; dev_attr_lflag.attr.key = 0; dev_attr_lflag.attr.skey.subkeys[ 0 ].__one_byte = 0; dev_attr_lflag.attr.skey.subkeys[ 1 ].__one_byte = 0; dev_attr_lflag.attr.skey.subkeys[ 2 ].__one_byte = 0; dev_attr_lflag.attr.skey.subkeys[ 3 ].__one_byte = 0; dev_attr_lflag.attr.skey.subkeys[ 4 ].__one_byte = 0; dev_attr_lflag.attr.skey.subkeys[ 5 ].__one_byte = 0; dev_attr_lflag.attr.skey.subkeys[ 6 ].__one_byte = 0; dev_attr_lflag.attr.skey.subkeys[ 7 ].__one_byte = 0; dev_attr_lflag.show = &(dgnc_tty_lflag_show); dev_attr_lflag.store = 0; dev_attr_digi_flag.attr.name = "digi_flag"; dev_attr_digi_flag.attr.mode = 256; dev_attr_digi_flag.attr.ignore_lockdep = 0; dev_attr_digi_flag.attr.key = 0; dev_attr_digi_flag.attr.skey.subkeys[ 0 ].__one_byte = 0; dev_attr_digi_flag.attr.skey.subkeys[ 1 ].__one_byte = 0; dev_attr_digi_flag.attr.skey.subkeys[ 2 ].__one_byte = 0; dev_attr_digi_flag.attr.skey.subkeys[ 3 ].__one_byte = 0; dev_attr_digi_flag.attr.skey.subkeys[ 4 ].__one_byte = 0; dev_attr_digi_flag.attr.skey.subkeys[ 5 ].__one_byte = 0; dev_attr_digi_flag.attr.skey.subkeys[ 6 ].__one_byte = 0; dev_attr_digi_flag.attr.skey.subkeys[ 7 ].__one_byte = 0; dev_attr_digi_flag.show = &(dgnc_tty_digi_flag_show); dev_attr_digi_flag.store = 0; dev_attr_rxcount.attr.name = "rxcount"; dev_attr_rxcount.attr.mode = 256; dev_attr_rxcount.attr.ignore_lockdep = 0; dev_attr_rxcount.attr.key = 0; dev_attr_rxcount.attr.skey.subkeys[ 0 ].__one_byte = 0; dev_attr_rxcount.attr.skey.subkeys[ 1 ].__one_byte = 0; dev_attr_rxcount.attr.skey.subkeys[ 2 ].__one_byte = 0; dev_attr_rxcount.attr.skey.subkeys[ 3 ].__one_byte = 0; dev_attr_rxcount.attr.skey.subkeys[ 4 ].__one_byte = 0; dev_attr_rxcount.attr.skey.subkeys[ 5 ].__one_byte = 0; dev_attr_rxcount.attr.skey.subkeys[ 6 ].__one_byte = 0; dev_attr_rxcount.attr.skey.subkeys[ 7 ].__one_byte = 0; dev_attr_rxcount.show = &(dgnc_tty_rxcount_show); dev_attr_rxcount.store = 0; dev_attr_txcount.attr.name = "txcount"; dev_attr_txcount.attr.mode = 256; dev_attr_txcount.attr.ignore_lockdep = 0; dev_attr_txcount.attr.key = 0; dev_attr_txcount.attr.skey.subkeys[ 0 ].__one_byte = 0; dev_attr_txcount.attr.skey.subkeys[ 1 ].__one_byte = 0; dev_attr_txcount.attr.skey.subkeys[ 2 ].__one_byte = 0; dev_attr_txcount.attr.skey.subkeys[ 3 ].__one_byte = 0; dev_attr_txcount.attr.skey.subkeys[ 4 ].__one_byte = 0; dev_attr_txcount.attr.skey.subkeys[ 5 ].__one_byte = 0; dev_attr_txcount.attr.skey.subkeys[ 6 ].__one_byte = 0; dev_attr_txcount.attr.skey.subkeys[ 7 ].__one_byte = 0; dev_attr_txcount.show = &(dgnc_tty_txcount_show); dev_attr_txcount.store = 0; dev_attr_custom_name.attr.name = "custom_name"; dev_attr_custom_name.attr.mode = 256; dev_attr_custom_name.attr.ignore_lockdep = 0; dev_attr_custom_name.attr.key = 0; dev_attr_custom_name.attr.skey.subkeys[ 0 ].__one_byte = 0; dev_attr_custom_name.attr.skey.subkeys[ 1 ].__one_byte = 0; dev_attr_custom_name.attr.skey.subkeys[ 2 ].__one_byte = 0; dev_attr_custom_name.attr.skey.subkeys[ 3 ].__one_byte = 0; dev_attr_custom_name.attr.skey.subkeys[ 4 ].__one_byte = 0; dev_attr_custom_name.attr.skey.subkeys[ 5 ].__one_byte = 0; dev_attr_custom_name.attr.skey.subkeys[ 6 ].__one_byte = 0; dev_attr_custom_name.attr.skey.subkeys[ 7 ].__one_byte = 0; dev_attr_custom_name.show = &(dgnc_tty_name_show); dev_attr_custom_name.store = 0; dgnc_sysfs_tty_entries[ 0 ] = &(&(dev_attr_state))->attr; dgnc_sysfs_tty_entries[ 1 ] = &(&(dev_attr_baud))->attr; dgnc_sysfs_tty_entries[ 2 ] = &(&(dev_attr_msignals))->attr; dgnc_sysfs_tty_entries[ 3 ] = &(&(dev_attr_iflag))->attr; dgnc_sysfs_tty_entries[ 4 ] = &(&(dev_attr_cflag))->attr; dgnc_sysfs_tty_entries[ 5 ] = &(&(dev_attr_oflag))->attr; dgnc_sysfs_tty_entries[ 6 ] = &(&(dev_attr_lflag))->attr; dgnc_sysfs_tty_entries[ 7 ] = &(&(dev_attr_digi_flag))->attr; dgnc_sysfs_tty_entries[ 8 ] = &(&(dev_attr_rxcount))->attr; dgnc_sysfs_tty_entries[ 9 ] = &(&(dev_attr_txcount))->attr; dgnc_sysfs_tty_entries[ 10 ] = &(&(dev_attr_custom_name))->attr; dgnc_sysfs_tty_entries[ 11 ] = 0; dgnc_tty_attribute_group.name = 0; dgnc_tty_attribute_group.is_visible = 0; dgnc_tty_attribute_group.attrs = &(dgnc_sysfs_tty_entries); dgnc_tty_attribute_group.bin_attrs = 0; return ; } -__BLAST_initialize_/home/ldvuser/ref_launch/work/current--X--drivers--X--defaultlinux-3.18-rc1.tar.xz--X--106_1a/linux-3.18-rc1.tar.xz/csd_deg_dscv/2290/dscv_tempdir/dscv/rcv/106_1a/main-ldv_main1_sequence_infinite_withcheck_stateful/preprocess/1-cpp/drivers/staging/dgnc/dgnc_utils.o.i() { return ; } -__BLAST_initialize_/home/ldvuser/ref_launch/work/current--X--drivers--X--defaultlinux-3.18-rc1.tar.xz--X--106_1a/linux-3.18-rc1.tar.xz/csd_deg_dscv/2290/dscv_tempdir/dscv/rcv/106_1a/main-ldv_main1_sequence_infinite_withcheck_stateful/preprocess/1-cpp//home/ldvuser/ref_launch/work/current--X--drivers--X--defaultlinux-3.18-rc1.tar.xz--X--106_1a/linux-3.18-rc1.tar.xz/csd_deg_dscv/2290/dscv_tempdir/rule-instrumentor/106_1a/common-model/ldv_common_model.i() { 847 ldv_initialize() { /* Function call is skipped due to function is undefined */} 853 ldv_handler_precall() { /* Function call is skipped due to function is undefined */} { } 228 rc = 0; 230 printk("dgnc: ") { /* Function call is skipped due to function is undefined */} 230 printk("%s, Digi International Part Number %s\n", "dgnc-1.3-16", "40002369_F") { /* Function call is skipped due to function is undefined */} { 271 rc = 0; { 765 -i = 0; dgnc_rawreadok = rawreadok; dgnc_trcbuf_size = trcbuf_size; dgnc_debug = debug; dgnc_NumBoards = 0; i = 0; 775 init_timer_key(&(dgnc_poll_timer), 0, "(&dgnc_poll_timer)", &(__key)) { /* Function call is skipped due to function is undefined */} 763 return ; } 277 printk("dgnc: ") { /* Function call is skipped due to function is undefined */} 277 printk("For the tools package or updated drivers please visit http://www.digi.com\n") { /* Function call is skipped due to function is undefined */} { 2166 tmp = __register_chrdev(major, 0, 256, name, fops) { /* Function call is skipped due to function is undefined */} } 291 dgnc_Major = rc; { 43 is_got = ldv_undef_ptr() { /* Function call is skipped due to function is undefined */} 53 ldv_usb_gadget_class = 1; } 293 dgnc_class = tmp; 294 device_create(dgnc_class, 0, dgnc_Major << 20, 0, "dgnc_mgmt") { /* Function call is skipped due to function is undefined */} { { 429 tmp___2 = __kmalloc(size, flags) { /* Function call is skipped due to function is undefined */} } 200 dgnc_TmpWriteBuf = tmp; 203 __retres2 = -12; } 304 printk("dgnc: ") { /* Function call is skipped due to function is undefined */} 304 printk("tty preinit - not enough memory (%d)\n", rc) { /* Function call is skipped due to function is undefined */} 305 __retres8 = rc; } 238 __retres2 = rc; } | Source code 1
2 #include <linux/kernel.h>
3 bool ldv_is_err(const void *ptr);
4 bool ldv_is_err_or_null(const void *ptr);
5 void* ldv_err_ptr(long error);
6 long ldv_ptr_err(const void *ptr);
7
8
9 // Provide model function prototypes before their usage.
10
11 void *ldv_create_class(void);
12 int ldv_register_class(void);
13 void ldv_unregister_class(void);
14
15 int ldv_register_chrdev_region(void);
16 void ldv_unregister_chrdev_region(void);
17
18 int ldv_register_usb_gadget(void);
19 void ldv_unregister_usb_gadget(void);
20
21 /*
22 * Copyright 2003 Digi International (www.digi.com)
23 * Scott H Kilau <Scott_Kilau at digi dot com>
24 *
25 * This program is free software; you can redistribute it and/or modify
26 * it under the terms of the GNU General Public License as published by
27 * the Free Software Foundation; either version 2, or (at your option)
28 * any later version.
29 *
30 * This program is distributed in the hope that it will be useful,
31 * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
32 * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
33 * PURPOSE. See the GNU General Public License for more details.
34 *
35 * You should have received a copy of the GNU General Public License
36 * along with this program; if not, write to the Free Software
37 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
38 *
39 *
40 * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
41 *
42 * This is shared code between Digi's CVS archive and the
43 * Linux Kernel sources.
44 * Changing the source just for reformatting needlessly breaks
45 * our CVS diff history.
46 *
47 * Send any bug fixes/changes to: Eng.Linux at digi dot com.
48 * Thank you.
49 *
50 */
51
52
53 #include <linux/kernel.h>
54 #include <linux/module.h>
55 #include <linux/pci.h>
56 #include <linux/slab.h>
57 #include <linux/sched.h>
58 #include "dgnc_driver.h"
59 #include "dgnc_pci.h"
60 #include "dpacompat.h"
61 #include "dgnc_mgmt.h"
62 #include "dgnc_tty.h"
63 #include "dgnc_cls.h"
64 #include "dgnc_neo.h"
65 #include "dgnc_sysfs.h"
66
67 MODULE_LICENSE("GPL");
68 MODULE_AUTHOR("Digi International, http://www.digi.com");
69 MODULE_DESCRIPTION("Driver for the Digi International Neo and Classic PCI based product line");
70 MODULE_SUPPORTED_DEVICE("dgnc");
71
72 /*
73 * insmod command line overrideable parameters
74 *
75 * NOTE: we use a set of macros to create the variables, which allows
76 * us to specify the variable type, name, initial value, and description.
77 */
78 PARM_INT(debug, 0x00, 0644, "Driver debugging level");
79 PARM_INT(rawreadok, 1, 0644, "Bypass flip buffers on input");
80 PARM_INT(trcbuf_size, 0x100000, 0644, "Debugging trace buffer size.");
81
82 /**************************************************************************
83 *
84 * protos for this file
85 *
86 */
87 static int dgnc_start(void);
88 static int dgnc_finalize_board_init(struct dgnc_board *brd);
89 static void dgnc_init_globals(void);
90 static int dgnc_found_board(struct pci_dev *pdev, int id);
91 static void dgnc_cleanup_board(struct dgnc_board *brd);
92 static void dgnc_poll_handler(ulong dummy);
93 static int dgnc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
94 static void dgnc_do_remap(struct dgnc_board *brd);
95
96 /*
97 * File operations permitted on Control/Management major.
98 */
99 static const struct file_operations dgnc_BoardFops = {
100 .owner = THIS_MODULE,
101 .unlocked_ioctl = dgnc_mgmt_ioctl,
102 .open = dgnc_mgmt_open,
103 .release = dgnc_mgmt_close
104 };
105
106
107 /*
108 * Globals
109 */
110 uint dgnc_NumBoards;
111 struct dgnc_board *dgnc_Board[MAXBOARDS];
112 DEFINE_SPINLOCK(dgnc_global_lock);
113 uint dgnc_Major;
114 int dgnc_poll_tick = 20; /* Poll interval - 20 ms */
115
116 /*
117 * Static vars.
118 */
119 static struct class *dgnc_class;
120
121 /*
122 * Poller stuff
123 */
124 static DEFINE_SPINLOCK(dgnc_poll_lock); /* Poll scheduling lock */
125 static ulong dgnc_poll_time; /* Time of next poll */
126 static uint dgnc_poll_stop; /* Used to tell poller to stop */
127 static struct timer_list dgnc_poll_timer;
128
129
130 static struct pci_device_id dgnc_pci_tbl[] = {
131 { DIGI_VID, PCI_DEVICE_CLASSIC_4_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
132 { DIGI_VID, PCI_DEVICE_CLASSIC_4_422_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
133 { DIGI_VID, PCI_DEVICE_CLASSIC_8_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
134 { DIGI_VID, PCI_DEVICE_CLASSIC_8_422_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
135 {0,} /* 0 terminated list. */
136 };
137 MODULE_DEVICE_TABLE(pci, dgnc_pci_tbl);
138
139 struct board_id {
140 unsigned char *name;
141 uint maxports;
142 unsigned int is_pci_express;
143 };
144
145 static struct board_id dgnc_Ids[] = {
146 { PCI_DEVICE_CLASSIC_4_PCI_NAME, 4, 0 },
147 { PCI_DEVICE_CLASSIC_4_422_PCI_NAME, 4, 0 },
148 { PCI_DEVICE_CLASSIC_8_PCI_NAME, 8, 0 },
149 { PCI_DEVICE_CLASSIC_8_422_PCI_NAME, 8, 0 },
150 { PCI_DEVICE_NEO_4_PCI_NAME, 4, 0 },
151 { PCI_DEVICE_NEO_8_PCI_NAME, 8, 0 },
152 { PCI_DEVICE_NEO_2DB9_PCI_NAME, 2, 0 },
153 { PCI_DEVICE_NEO_2DB9PRI_PCI_NAME, 2, 0 },
154 { PCI_DEVICE_NEO_2RJ45_PCI_NAME, 2, 0 },
155 { PCI_DEVICE_NEO_2RJ45PRI_PCI_NAME, 2, 0 },
156 { PCI_DEVICE_NEO_1_422_PCI_NAME, 1, 0 },
157 { PCI_DEVICE_NEO_1_422_485_PCI_NAME, 1, 0 },
158 { PCI_DEVICE_NEO_2_422_485_PCI_NAME, 2, 0 },
159 { PCI_DEVICE_NEO_EXPRESS_8_PCI_NAME, 8, 1 },
160 { PCI_DEVICE_NEO_EXPRESS_4_PCI_NAME, 4, 1 },
161 { PCI_DEVICE_NEO_EXPRESS_4RJ45_PCI_NAME, 4, 1 },
162 { PCI_DEVICE_NEO_EXPRESS_8RJ45_PCI_NAME, 8, 1 },
163 { NULL, 0, 0 }
164 };
165
166 static struct pci_driver dgnc_driver = {
167 .name = "dgnc",
168 .probe = dgnc_init_one,
169 .id_table = dgnc_pci_tbl,
170 };
171
172
173 char *dgnc_state_text[] = {
174 "Board Failed",
175 "Board Found",
176 "Board READY",
177 };
178
179
180 /************************************************************************
181 *
182 * Driver load/unload functions
183 *
184 ************************************************************************/
185
186 /*
187 * dgnc_cleanup_module()
188 *
189 * Module unload. This is where it all ends.
190 */
191 static void dgnc_cleanup_module(void)
192 {
193 int i;
194 unsigned long flags;
195
196 spin_lock_irqsave(&dgnc_poll_lock, flags);
197 dgnc_poll_stop = 1;
198 spin_unlock_irqrestore(&dgnc_poll_lock, flags);
199
200 /* Turn off poller right away. */
201 del_timer_sync(&dgnc_poll_timer);
202
203 dgnc_remove_driver_sysfiles(&dgnc_driver);
204
205 device_destroy(dgnc_class, MKDEV(dgnc_Major, 0));
206 class_destroy(dgnc_class);
207 unregister_chrdev(dgnc_Major, "dgnc");
208
209 for (i = 0; i < dgnc_NumBoards; ++i) {
210 dgnc_remove_ports_sysfiles(dgnc_Board[i]);
211 dgnc_tty_uninit(dgnc_Board[i]);
212 dgnc_cleanup_board(dgnc_Board[i]);
213 }
214
215 dgnc_tty_post_uninit();
216
217 if (dgnc_NumBoards)
218 pci_unregister_driver(&dgnc_driver);
219 }
220
221 /*
222 * init_module()
223 *
224 * Module load. This is where it all starts.
225 */
226 static int __init dgnc_init_module(void)
227 {
228 int rc = 0;
229
230 APR(("%s, Digi International Part Number %s\n", DG_NAME, DG_PART));
231
232 /*
233 * Initialize global stuff
234 */
235 rc = dgnc_start();
236
237 if (rc < 0)
238 return rc;
239
240 /*
241 * Find and configure all the cards
242 */
243 rc = pci_register_driver(&dgnc_driver);
244
245 /*
246 * If something went wrong in the scan, bail out of driver.
247 */
248 if (rc < 0) {
249 /* Only unregister the pci driver if it was actually registered. */
250 if (dgnc_NumBoards)
251 pci_unregister_driver(&dgnc_driver);
252 else
253 pr_warn("WARNING: dgnc driver load failed. No Digi Neo or Classic boards found.\n");
254
255 dgnc_cleanup_module();
256 } else {
257 dgnc_create_driver_sysfiles(&dgnc_driver);
258 }
259
260 return rc;
261 }
262
263 module_init(dgnc_init_module);
264 module_exit(dgnc_cleanup_module);
265
266 /*
267 * Start of driver.
268 */
269 static int dgnc_start(void)
270 {
271 int rc = 0;
272 unsigned long flags;
273
274 /* make sure that the globals are init'd before we do anything else */
275 dgnc_init_globals();
276
277 APR(("For the tools package or updated drivers please visit http://www.digi.com\n"));
278
279 /*
280 * Register our base character device into the kernel.
281 * This allows the download daemon to connect to the downld device
282 * before any of the boards are init'ed.
283 *
284 * Register management/dpa devices
285 */
286 rc = register_chrdev(0, "dgnc", &dgnc_BoardFops);
287 if (rc <= 0) {
288 APR(("Can't register dgnc driver device (%d)\n", rc));
289 return -ENXIO;
290 }
291 dgnc_Major = rc;
292
293 dgnc_class = class_create(THIS_MODULE, "dgnc_mgmt");
294 device_create(dgnc_class, NULL,
295 MKDEV(dgnc_Major, 0),
296 NULL, "dgnc_mgmt");
297
298 /*
299 * Init any global tty stuff.
300 */
301 rc = dgnc_tty_preinit();
302
303 if (rc < 0) {
304 APR(("tty preinit - not enough memory (%d)\n", rc));
305 return rc;
306 }
307
308 /* Start the poller */
309 spin_lock_irqsave(&dgnc_poll_lock, flags);
310 init_timer(&dgnc_poll_timer);
311 dgnc_poll_timer.function = dgnc_poll_handler;
312 dgnc_poll_timer.data = 0;
313 dgnc_poll_time = jiffies + dgnc_jiffies_from_ms(dgnc_poll_tick);
314 dgnc_poll_timer.expires = dgnc_poll_time;
315 spin_unlock_irqrestore(&dgnc_poll_lock, flags);
316
317 add_timer(&dgnc_poll_timer);
318
319 return rc;
320 }
321
322 /* returns count (>= 0), or negative on error */
323 static int dgnc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
324 {
325 int rc;
326
327 /* wake up and enable device */
328 rc = pci_enable_device(pdev);
329
330 if (rc < 0) {
331 rc = -EIO;
332 } else {
333 rc = dgnc_found_board(pdev, ent->driver_data);
334 if (rc == 0)
335 dgnc_NumBoards++;
336 }
337 return rc;
338 }
339
340 /*
341 * dgnc_cleanup_board()
342 *
343 * Free all the memory associated with a board
344 */
345 static void dgnc_cleanup_board(struct dgnc_board *brd)
346 {
347 int i = 0;
348
349 if (!brd || brd->magic != DGNC_BOARD_MAGIC)
350 return;
351
352 switch (brd->device) {
353 case PCI_DEVICE_CLASSIC_4_DID:
354 case PCI_DEVICE_CLASSIC_8_DID:
355 case PCI_DEVICE_CLASSIC_4_422_DID:
356 case PCI_DEVICE_CLASSIC_8_422_DID:
357
358 /* Tell card not to interrupt anymore. */
359 outb(0, brd->iobase + 0x4c);
360 break;
361
362 default:
363 break;
364 }
365
366 if (brd->irq)
367 free_irq(brd->irq, brd);
368
369 tasklet_kill(&brd->helper_tasklet);
370
371 if (brd->re_map_membase) {
372 iounmap(brd->re_map_membase);
373 brd->re_map_membase = NULL;
374 }
375
376 if (brd->msgbuf_head) {
377 unsigned long flags;
378
379 spin_lock_irqsave(&dgnc_global_lock, flags);
380 brd->msgbuf = NULL;
381 printk("%s", brd->msgbuf_head);
382 kfree(brd->msgbuf_head);
383 brd->msgbuf_head = NULL;
384 spin_unlock_irqrestore(&dgnc_global_lock, flags);
385 }
386
387 /* Free all allocated channels structs */
388 for (i = 0; i < MAXPORTS ; i++) {
389 if (brd->channels[i]) {
390 kfree(brd->channels[i]->ch_rqueue);
391 kfree(brd->channels[i]->ch_equeue);
392 kfree(brd->channels[i]->ch_wqueue);
393 kfree(brd->channels[i]);
394 brd->channels[i] = NULL;
395 }
396 }
397
398 kfree(brd->flipbuf);
399
400 dgnc_Board[brd->boardnum] = NULL;
401
402 kfree(brd);
403 }
404
405
406 /*
407 * dgnc_found_board()
408 *
409 * A board has been found, init it.
410 */
411 static int dgnc_found_board(struct pci_dev *pdev, int id)
412 {
413 struct dgnc_board *brd;
414 unsigned int pci_irq;
415 int i = 0;
416 int rc = 0;
417 unsigned long flags;
418
419 /* get the board structure and prep it */
420 dgnc_Board[dgnc_NumBoards] = kzalloc(sizeof(*brd), GFP_KERNEL);
421 brd = dgnc_Board[dgnc_NumBoards];
422
423 if (!brd)
424 return -ENOMEM;
425
426 /* make a temporary message buffer for the boot messages */
427 brd->msgbuf_head = kzalloc(sizeof(u8) * 8192, GFP_KERNEL);
428 brd->msgbuf = brd->msgbuf_head;
429
430 if (!brd->msgbuf) {
431 kfree(brd);
432 return -ENOMEM;
433 }
434
435 /* store the info for the board we've found */
436 brd->magic = DGNC_BOARD_MAGIC;
437 brd->boardnum = dgnc_NumBoards;
438 brd->vendor = dgnc_pci_tbl[id].vendor;
439 brd->device = dgnc_pci_tbl[id].device;
440 brd->pdev = pdev;
441 brd->pci_bus = pdev->bus->number;
442 brd->pci_slot = PCI_SLOT(pdev->devfn);
443 brd->name = dgnc_Ids[id].name;
444 brd->maxports = dgnc_Ids[id].maxports;
445 if (dgnc_Ids[i].is_pci_express)
446 brd->bd_flags |= BD_IS_PCI_EXPRESS;
447 brd->dpastatus = BD_NOFEP;
448 init_waitqueue_head(&brd->state_wait);
449
450 spin_lock_init(&brd->bd_lock);
451 spin_lock_init(&brd->bd_intr_lock);
452
453 brd->state = BOARD_FOUND;
454
455 for (i = 0; i < MAXPORTS; i++)
456 brd->channels[i] = NULL;
457
458 /* store which card & revision we have */
459 pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &brd->subvendor);
460 pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &brd->subdevice);
461 pci_read_config_byte(pdev, PCI_REVISION_ID, &brd->rev);
462
463 pci_irq = pdev->irq;
464 brd->irq = pci_irq;
465
466
467 switch (brd->device) {
468
469 case PCI_DEVICE_CLASSIC_4_DID:
470 case PCI_DEVICE_CLASSIC_8_DID:
471 case PCI_DEVICE_CLASSIC_4_422_DID:
472 case PCI_DEVICE_CLASSIC_8_422_DID:
473
474 brd->dpatype = T_CLASSIC | T_PCIBUS;
475
476 /*
477 * For PCI ClassicBoards
478 * PCI Local Address (i.e. "resource" number) space
479 * 0 PLX Memory Mapped Config
480 * 1 PLX I/O Mapped Config
481 * 2 I/O Mapped UARTs and Status
482 * 3 Memory Mapped VPD
483 * 4 Memory Mapped UARTs and Status
484 */
485
486
487 /* get the PCI Base Address Registers */
488 brd->membase = pci_resource_start(pdev, 4);
489
490 if (!brd->membase) {
491 APR(("card has no PCI IO resources, failing board.\n"));
492 return -ENODEV;
493 }
494
495 brd->membase_end = pci_resource_end(pdev, 4);
496
497 if (brd->membase & 1)
498 brd->membase &= ~3;
499 else
500 brd->membase &= ~15;
501
502 brd->iobase = pci_resource_start(pdev, 1);
503 brd->iobase_end = pci_resource_end(pdev, 1);
504 brd->iobase = ((unsigned int) (brd->iobase)) & 0xFFFE;
505
506 /* Assign the board_ops struct */
507 brd->bd_ops = &dgnc_cls_ops;
508
509 brd->bd_uart_offset = 0x8;
510 brd->bd_dividend = 921600;
511
512 dgnc_do_remap(brd);
513
514 /* Get and store the board VPD, if it exists */
515 brd->bd_ops->vpd(brd);
516
517 /*
518 * Enable Local Interrupt 1 (0x1),
519 * Local Interrupt 1 Polarity Active high (0x2),
520 * Enable PCI interrupt (0x40)
521 */
522 outb(0x43, brd->iobase + 0x4c);
523
524 break;
525
526
527 case PCI_DEVICE_NEO_4_DID:
528 case PCI_DEVICE_NEO_8_DID:
529 case PCI_DEVICE_NEO_2DB9_DID:
530 case PCI_DEVICE_NEO_2DB9PRI_DID:
531 case PCI_DEVICE_NEO_2RJ45_DID:
532 case PCI_DEVICE_NEO_2RJ45PRI_DID:
533 case PCI_DEVICE_NEO_1_422_DID:
534 case PCI_DEVICE_NEO_1_422_485_DID:
535 case PCI_DEVICE_NEO_2_422_485_DID:
536 case PCI_DEVICE_NEO_EXPRESS_8_DID:
537 case PCI_DEVICE_NEO_EXPRESS_4_DID:
538 case PCI_DEVICE_NEO_EXPRESS_4RJ45_DID:
539 case PCI_DEVICE_NEO_EXPRESS_8RJ45_DID:
540
541 /*
542 * This chip is set up 100% when we get to it.
543 * No need to enable global interrupts or anything.
544 */
545 if (brd->bd_flags & BD_IS_PCI_EXPRESS)
546 brd->dpatype = T_NEO_EXPRESS | T_PCIBUS;
547 else
548 brd->dpatype = T_NEO | T_PCIBUS;
549
550 /* get the PCI Base Address Registers */
551 brd->membase = pci_resource_start(pdev, 0);
552 brd->membase_end = pci_resource_end(pdev, 0);
553
554 if (brd->membase & 1)
555 brd->membase &= ~3;
556 else
557 brd->membase &= ~15;
558
559 /* Assign the board_ops struct */
560 brd->bd_ops = &dgnc_neo_ops;
561
562 brd->bd_uart_offset = 0x200;
563 brd->bd_dividend = 921600;
564
565 dgnc_do_remap(brd);
566
567 if (brd->re_map_membase) {
568
569 /* After remap is complete, we need to read and store the dvid */
570 brd->dvid = readb(brd->re_map_membase + 0x8D);
571
572 /* Get and store the board VPD, if it exists */
573 brd->bd_ops->vpd(brd);
574 }
575 break;
576
577 default:
578 APR(("Did not find any compatible Neo or Classic PCI boards in system.\n"));
579 return -ENXIO;
580
581 }
582
583 /*
584 * Do tty device initialization.
585 */
586
587 rc = dgnc_tty_register(brd);
588 if (rc < 0) {
589 dgnc_tty_uninit(brd);
590 APR(("Can't register tty devices (%d)\n", rc));
591 brd->state = BOARD_FAILED;
592 brd->dpastatus = BD_NOFEP;
593 goto failed;
594 }
595
596 rc = dgnc_finalize_board_init(brd);
597 if (rc < 0) {
598 APR(("Can't finalize board init (%d)\n", rc));
599 brd->state = BOARD_FAILED;
600 brd->dpastatus = BD_NOFEP;
601
602 goto failed;
603 }
604
605 rc = dgnc_tty_init(brd);
606 if (rc < 0) {
607 dgnc_tty_uninit(brd);
608 APR(("Can't init tty devices (%d)\n", rc));
609 brd->state = BOARD_FAILED;
610 brd->dpastatus = BD_NOFEP;
611
612 goto failed;
613 }
614
615 brd->state = BOARD_READY;
616 brd->dpastatus = BD_RUNNING;
617
618 dgnc_create_ports_sysfiles(brd);
619
620 /* init our poll helper tasklet */
621 tasklet_init(&brd->helper_tasklet, brd->bd_ops->tasklet, (unsigned long) brd);
622
623 spin_lock_irqsave(&dgnc_global_lock, flags);
624 brd->msgbuf = NULL;
625 printk("%s", brd->msgbuf_head);
626 kfree(brd->msgbuf_head);
627 brd->msgbuf_head = NULL;
628 spin_unlock_irqrestore(&dgnc_global_lock, flags);
629
630 /*
631 * allocate flip buffer for board.
632 *
633 * Okay to malloc with GFP_KERNEL, we are not at interrupt
634 * context, and there are no locks held.
635 */
636 brd->flipbuf = kzalloc(MYFLIPLEN, GFP_KERNEL);
637
638 wake_up_interruptible(&brd->state_wait);
639
640 return 0;
641
642 failed:
643
644 return -ENXIO;
645
646 }
647
648
649 static int dgnc_finalize_board_init(struct dgnc_board *brd)
650 {
651 int rc = 0;
652
653 if (!brd || brd->magic != DGNC_BOARD_MAGIC)
654 return -ENODEV;
655
656 if (brd->irq) {
657 rc = request_irq(brd->irq, brd->bd_ops->intr,
658 IRQF_SHARED, "DGNC", brd);
659
660 if (rc) {
661 dev_err(&brd->pdev->dev,
662 "Failed to hook IRQ %d\n", brd->irq);
663 brd->state = BOARD_FAILED;
664 brd->dpastatus = BD_NOFEP;
665 rc = -ENODEV;
666 }
667 }
668 return rc;
669 }
670
671 /*
672 * Remap PCI memory.
673 */
674 static void dgnc_do_remap(struct dgnc_board *brd)
675 {
676
677 if (!brd || brd->magic != DGNC_BOARD_MAGIC)
678 return;
679
680 brd->re_map_membase = ioremap(brd->membase, 0x1000);
681 }
682
683
684 /*****************************************************************************
685 *
686 * Function:
687 *
688 * dgnc_poll_handler
689 *
690 * Author:
691 *
692 * Scott H Kilau
693 *
694 * Parameters:
695 *
696 * dummy -- ignored
697 *
698 * Return Values:
699 *
700 * none
701 *
702 * Description:
703 *
704 * As each timer expires, it determines (a) whether the "transmit"
705 * waiter needs to be woken up, and (b) whether the poller needs to
706 * be rescheduled.
707 *
708 ******************************************************************************/
709
710 static void dgnc_poll_handler(ulong dummy)
711 {
712 struct dgnc_board *brd;
713 unsigned long flags;
714 int i;
715 unsigned long new_time;
716
717 /* Go thru each board, kicking off a tasklet for each if needed */
718 for (i = 0; i < dgnc_NumBoards; i++) {
719 brd = dgnc_Board[i];
720
721 spin_lock_irqsave(&brd->bd_lock, flags);
722
723 /* If board is in a failed state, don't bother scheduling a tasklet */
724 if (brd->state == BOARD_FAILED) {
725 spin_unlock_irqrestore(&brd->bd_lock, flags);
726 continue;
727 }
728
729 /* Schedule a poll helper task */
730 tasklet_schedule(&brd->helper_tasklet);
731
732 spin_unlock_irqrestore(&brd->bd_lock, flags);
733 }
734
735 /*
736 * Schedule ourself back at the nominal wakeup interval.
737 */
738 spin_lock_irqsave(&dgnc_poll_lock, flags);
739 dgnc_poll_time += dgnc_jiffies_from_ms(dgnc_poll_tick);
740
741 new_time = dgnc_poll_time - jiffies;
742
743 if ((ulong) new_time >= 2 * dgnc_poll_tick)
744 dgnc_poll_time = jiffies + dgnc_jiffies_from_ms(dgnc_poll_tick);
745
746 init_timer(&dgnc_poll_timer);
747 dgnc_poll_timer.function = dgnc_poll_handler;
748 dgnc_poll_timer.data = 0;
749 dgnc_poll_timer.expires = dgnc_poll_time;
750 spin_unlock_irqrestore(&dgnc_poll_lock, flags);
751
752 if (!dgnc_poll_stop)
753 add_timer(&dgnc_poll_timer);
754 }
755
756 /*
757 * dgnc_init_globals()
758 *
759 * This is where we initialize the globals from the static insmod
760 * configuration variables. These are declared near the head of
761 * this file.
762 */
763 static void dgnc_init_globals(void)
764 {
765 int i = 0;
766
767 dgnc_rawreadok = rawreadok;
768 dgnc_trcbuf_size = trcbuf_size;
769 dgnc_debug = debug;
770 dgnc_NumBoards = 0;
771
772 for (i = 0; i < MAXBOARDS; i++)
773 dgnc_Board[i] = NULL;
774
775 init_timer(&dgnc_poll_timer);
776 }
777
778
779
780
781
782
783 /* LDV_COMMENT_BEGIN_MAIN */
784 #ifdef LDV_MAIN1_sequence_infinite_withcheck_stateful
785
786 /*###########################################################################*/
787
788 /*############## Driver Environment Generator 0.2 output ####################*/
789
790 /*###########################################################################*/
791
792
793
794 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
795 void ldv_check_final_state(void);
796
797 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
798 void ldv_check_return_value(int res);
799
800 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
801 void ldv_check_return_value_probe(int res);
802
803 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
804 void ldv_initialize(void);
805
806 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
807 void ldv_handler_precall(void);
808
809 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
810 int nondet_int(void);
811
812 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
813 int LDV_IN_INTERRUPT;
814
815 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
816 void ldv_main1_sequence_infinite_withcheck_stateful(void) {
817
818
819
820 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
821 /*============================= VARIABLE DECLARATION PART =============================*/
822 /** STRUCT: struct type: pci_driver, struct name: dgnc_driver **/
823 /* content: static int dgnc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)*/
824 /* LDV_COMMENT_END_PREP */
825 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "dgnc_init_one" */
826 struct pci_dev * var_group1;
827 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "dgnc_init_one" */
828 const struct pci_device_id * var_dgnc_init_one_3_p1;
829 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "dgnc_init_one" */
830 static int res_dgnc_init_one_3;
831
832
833
834
835 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
836 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
837 /*============================= VARIABLE INITIALIZING PART =============================*/
838 LDV_IN_INTERRUPT=1;
839
840
841
842
843 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
844 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
845 /*============================= FUNCTION CALL SECTION =============================*/
846 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
847 ldv_initialize();
848
849 /** INIT: init_type: ST_MODULE_INIT **/
850 /* content: static int __init dgnc_init_module(void)*/
851 /* LDV_COMMENT_END_PREP */
852 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver init function after driver loading to kernel. This function declared as "MODULE_INIT(function name)". */
853 ldv_handler_precall();
854 if(dgnc_init_module())
855 goto ldv_final;
856 int ldv_s_dgnc_driver_pci_driver = 0;
857
858
859 while( nondet_int()
860 || !(ldv_s_dgnc_driver_pci_driver == 0)
861 ) {
862
863 switch(nondet_int()) {
864
865 case 0: {
866
867 /** STRUCT: struct type: pci_driver, struct name: dgnc_driver **/
868 if(ldv_s_dgnc_driver_pci_driver==0) {
869
870 /* content: static int dgnc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)*/
871 /* LDV_COMMENT_END_PREP */
872 /* LDV_COMMENT_FUNCTION_CALL Function from field "probe" from driver structure with callbacks "dgnc_driver". Standart function test for correct return result. */
873 res_dgnc_init_one_3 = dgnc_init_one( var_group1, var_dgnc_init_one_3_p1);
874 ldv_check_return_value(res_dgnc_init_one_3);
875 ldv_check_return_value_probe(res_dgnc_init_one_3);
876 if(res_dgnc_init_one_3)
877 goto ldv_module_exit;
878 ldv_s_dgnc_driver_pci_driver=0;
879
880 }
881
882 }
883
884 break;
885 default: break;
886
887 }
888
889 }
890
891 ldv_module_exit:
892
893 /** INIT: init_type: ST_MODULE_EXIT **/
894 /* content: static void dgnc_cleanup_module(void)*/
895 /* LDV_COMMENT_END_PREP */
896 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver release function before driver will be uploaded from kernel. This function declared as "MODULE_EXIT(function name)". */
897 ldv_handler_precall();
898 dgnc_cleanup_module();
899
900 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
901 ldv_final: ldv_check_final_state();
902
903 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
904 return;
905
906 }
907 #endif
908
909 /* LDV_COMMENT_END_MAIN */ 1
2 #include <linux/kernel.h>
3 bool ldv_is_err(const void *ptr);
4 bool ldv_is_err_or_null(const void *ptr);
5 void* ldv_err_ptr(long error);
6 long ldv_ptr_err(const void *ptr);
7
8
9 // Provide model function prototypes before their usage.
10
11 void *ldv_create_class(void);
12 int ldv_register_class(void);
13 void ldv_unregister_class(void);
14
15 int ldv_register_chrdev_region(void);
16 void ldv_unregister_chrdev_region(void);
17
18 int ldv_register_usb_gadget(void);
19 void ldv_unregister_usb_gadget(void);
20
21 /*
22 * Copyright 2003 Digi International (www.digi.com)
23 * Scott H Kilau <Scott_Kilau at digi dot com>
24 *
25 * This program is free software; you can redistribute it and/or modify
26 * it under the terms of the GNU General Public License as published by
27 * the Free Software Foundation; either version 2, or (at your option)
28 * any later version.
29 *
30 * This program is distributed in the hope that it will be useful,
31 * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
32 * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
33 * PURPOSE. See the GNU General Public License for more details.
34 *
35 * You should have received a copy of the GNU General Public License
36 * along with this program; if not, write to the Free Software
37 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
38 *
39 *
40 * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
41 *
42 * This is shared code between Digi's CVS archive and the
43 * Linux Kernel sources.
44 * Changing the source just for reformatting needlessly breaks
45 * our CVS diff history.
46 *
47 * Send any bug fixes/changes to: Eng.Linux at digi dot com.
48 * Thank you.
49 */
50
51 /************************************************************************
52 *
53 * This file implements the tty driver functionality for the
54 * Neo and ClassicBoard PCI based product lines.
55 *
56 ************************************************************************
57 *
58 */
59
60 #include <linux/kernel.h>
61 #include <linux/sched.h> /* For jiffies, task states */
62 #include <linux/interrupt.h> /* For tasklet and interrupt structs/defines */
63 #include <linux/module.h>
64 #include <linux/ctype.h>
65 #include <linux/tty.h>
66 #include <linux/tty_flip.h>
67 #include <linux/serial_reg.h>
68 #include <linux/slab.h>
69 #include <linux/delay.h> /* For udelay */
70 #include <linux/uaccess.h> /* For copy_from_user/copy_to_user */
71 #include <linux/pci.h>
72
73 #include "dgnc_driver.h"
74 #include "dgnc_tty.h"
75 #include "dgnc_types.h"
76 #include "dgnc_neo.h"
77 #include "dgnc_cls.h"
78 #include "dpacompat.h"
79 #include "dgnc_sysfs.h"
80 #include "dgnc_utils.h"
81
82 #define init_MUTEX(sem) sema_init(sem, 1)
83 #define DECLARE_MUTEX(name) \
84 struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
85
86 /*
87 * internal variables
88 */
89 static struct dgnc_board *dgnc_BoardsByMajor[256];
90 static unsigned char *dgnc_TmpWriteBuf;
91 static DECLARE_MUTEX(dgnc_TmpWriteSem);
92
93 /*
94 * Default transparent print information.
95 */
96 static struct digi_t dgnc_digi_init = {
97 .digi_flags = DIGI_COOK, /* Flags */
98 .digi_maxcps = 100, /* Max CPS */
99 .digi_maxchar = 50, /* Max chars in print queue */
100 .digi_bufsize = 100, /* Printer buffer size */
101 .digi_onlen = 4, /* size of printer on string */
102 .digi_offlen = 4, /* size of printer off string */
103 .digi_onstr = "\033[5i", /* ANSI printer on string ] */
104 .digi_offstr = "\033[4i", /* ANSI printer off string ] */
105 .digi_term = "ansi" /* default terminal type */
106 };
107
108
109 /*
110 * Define a local default termios struct. All ports will be created
111 * with this termios initially.
112 *
113 * This defines a raw port at 9600 baud, 8 data bits, no parity,
114 * 1 stop bit.
115 */
116 static struct ktermios DgncDefaultTermios = {
117 .c_iflag = (DEFAULT_IFLAGS), /* iflags */
118 .c_oflag = (DEFAULT_OFLAGS), /* oflags */
119 .c_cflag = (DEFAULT_CFLAGS), /* cflags */
120 .c_lflag = (DEFAULT_LFLAGS), /* lflags */
121 .c_cc = INIT_C_CC,
122 .c_line = 0,
123 };
124
125
126 /* Our function prototypes */
127 static int dgnc_tty_open(struct tty_struct *tty, struct file *file);
128 static void dgnc_tty_close(struct tty_struct *tty, struct file *file);
129 static int dgnc_block_til_ready(struct tty_struct *tty, struct file *file, struct channel_t *ch);
130 static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg);
131 static int dgnc_tty_digigeta(struct tty_struct *tty, struct digi_t __user *retinfo);
132 static int dgnc_tty_digiseta(struct tty_struct *tty, struct digi_t __user *new_info);
133 static int dgnc_tty_write_room(struct tty_struct *tty);
134 static int dgnc_tty_put_char(struct tty_struct *tty, unsigned char c);
135 static int dgnc_tty_chars_in_buffer(struct tty_struct *tty);
136 static void dgnc_tty_start(struct tty_struct *tty);
137 static void dgnc_tty_stop(struct tty_struct *tty);
138 static void dgnc_tty_throttle(struct tty_struct *tty);
139 static void dgnc_tty_unthrottle(struct tty_struct *tty);
140 static void dgnc_tty_flush_chars(struct tty_struct *tty);
141 static void dgnc_tty_flush_buffer(struct tty_struct *tty);
142 static void dgnc_tty_hangup(struct tty_struct *tty);
143 static int dgnc_set_modem_info(struct tty_struct *tty, unsigned int command, unsigned int __user *value);
144 static int dgnc_get_modem_info(struct channel_t *ch, unsigned int __user *value);
145 static int dgnc_tty_tiocmget(struct tty_struct *tty);
146 static int dgnc_tty_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear);
147 static int dgnc_tty_send_break(struct tty_struct *tty, int msec);
148 static void dgnc_tty_wait_until_sent(struct tty_struct *tty, int timeout);
149 static int dgnc_tty_write(struct tty_struct *tty, const unsigned char *buf, int count);
150 static void dgnc_tty_set_termios(struct tty_struct *tty, struct ktermios *old_termios);
151 static void dgnc_tty_send_xchar(struct tty_struct *tty, char ch);
152
153
154 static const struct tty_operations dgnc_tty_ops = {
155 .open = dgnc_tty_open,
156 .close = dgnc_tty_close,
157 .write = dgnc_tty_write,
158 .write_room = dgnc_tty_write_room,
159 .flush_buffer = dgnc_tty_flush_buffer,
160 .chars_in_buffer = dgnc_tty_chars_in_buffer,
161 .flush_chars = dgnc_tty_flush_chars,
162 .ioctl = dgnc_tty_ioctl,
163 .set_termios = dgnc_tty_set_termios,
164 .stop = dgnc_tty_stop,
165 .start = dgnc_tty_start,
166 .throttle = dgnc_tty_throttle,
167 .unthrottle = dgnc_tty_unthrottle,
168 .hangup = dgnc_tty_hangup,
169 .put_char = dgnc_tty_put_char,
170 .tiocmget = dgnc_tty_tiocmget,
171 .tiocmset = dgnc_tty_tiocmset,
172 .break_ctl = dgnc_tty_send_break,
173 .wait_until_sent = dgnc_tty_wait_until_sent,
174 .send_xchar = dgnc_tty_send_xchar
175 };
176
177 /************************************************************************
178 *
179 * TTY Initialization/Cleanup Functions
180 *
181 ************************************************************************/
182
183 /*
184 * dgnc_tty_preinit()
185 *
186 * Initialize any global tty related data before we download any boards.
187 */
188 int dgnc_tty_preinit(void)
189 {
190 /*
191 * Allocate a buffer for doing the copy from user space to
192 * kernel space in dgnc_write(). We only use one buffer and
193 * control access to it with a semaphore. If we are paging, we
194 * are already in trouble so one buffer won't hurt much anyway.
195 *
196 * We are okay to sleep in the malloc, as this routine
197 * is only called during module load, (not in interrupt context),
198 * and with no locks held.
199 */
200 dgnc_TmpWriteBuf = kmalloc(WRITEBUFLEN, GFP_KERNEL);
201
202 if (!dgnc_TmpWriteBuf)
203 return -ENOMEM;
204
205 return 0;
206 }
207
208
209 /*
210 * dgnc_tty_register()
211 *
212 * Init the tty subsystem for this board.
213 */
214 int dgnc_tty_register(struct dgnc_board *brd)
215 {
216 int rc = 0;
217
218 brd->SerialDriver.magic = TTY_DRIVER_MAGIC;
219
220 snprintf(brd->SerialName, MAXTTYNAMELEN, "tty_dgnc_%d_", brd->boardnum);
221
222 brd->SerialDriver.name = brd->SerialName;
223 brd->SerialDriver.name_base = 0;
224 brd->SerialDriver.major = 0;
225 brd->SerialDriver.minor_start = 0;
226 brd->SerialDriver.num = brd->maxports;
227 brd->SerialDriver.type = TTY_DRIVER_TYPE_SERIAL;
228 brd->SerialDriver.subtype = SERIAL_TYPE_NORMAL;
229 brd->SerialDriver.init_termios = DgncDefaultTermios;
230 brd->SerialDriver.driver_name = DRVSTR;
231 brd->SerialDriver.flags = (TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_HARDWARE_BREAK);
232
233 /*
234 * The kernel wants space to store pointers to
235 * tty_struct's and termios's.
236 */
237 brd->SerialDriver.ttys = kcalloc(brd->maxports, sizeof(*brd->SerialDriver.ttys), GFP_KERNEL);
238 if (!brd->SerialDriver.ttys)
239 return -ENOMEM;
240
241 kref_init(&brd->SerialDriver.kref);
242 brd->SerialDriver.termios = kcalloc(brd->maxports, sizeof(*brd->SerialDriver.termios), GFP_KERNEL);
243 if (!brd->SerialDriver.termios)
244 return -ENOMEM;
245
246 /*
247 * Entry points for driver. Called by the kernel from
248 * tty_io.c and n_tty.c.
249 */
250 tty_set_operations(&brd->SerialDriver, &dgnc_tty_ops);
251
252 if (!brd->dgnc_Major_Serial_Registered) {
253 /* Register tty devices */
254 rc = tty_register_driver(&brd->SerialDriver);
255 if (rc < 0) {
256 APR(("Can't register tty device (%d)\n", rc));
257 return rc;
258 }
259 brd->dgnc_Major_Serial_Registered = TRUE;
260 }
261
262 /*
263 * If we're doing transparent print, we have to do all of the above
264 * again, separately so we don't get the LD confused about what major
265 * we are when we get into the dgnc_tty_open() routine.
266 */
267 brd->PrintDriver.magic = TTY_DRIVER_MAGIC;
268 snprintf(brd->PrintName, MAXTTYNAMELEN, "pr_dgnc_%d_", brd->boardnum);
269
270 brd->PrintDriver.name = brd->PrintName;
271 brd->PrintDriver.name_base = 0;
272 brd->PrintDriver.major = brd->SerialDriver.major;
273 brd->PrintDriver.minor_start = 0x80;
274 brd->PrintDriver.num = brd->maxports;
275 brd->PrintDriver.type = TTY_DRIVER_TYPE_SERIAL;
276 brd->PrintDriver.subtype = SERIAL_TYPE_NORMAL;
277 brd->PrintDriver.init_termios = DgncDefaultTermios;
278 brd->PrintDriver.driver_name = DRVSTR;
279 brd->PrintDriver.flags = (TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_HARDWARE_BREAK);
280
281 /*
282 * The kernel wants space to store pointers to
283 * tty_struct's and termios's. Must be separated from
284 * the Serial Driver so we don't get confused
285 */
286 brd->PrintDriver.ttys = kcalloc(brd->maxports, sizeof(*brd->PrintDriver.ttys), GFP_KERNEL);
287 if (!brd->PrintDriver.ttys)
288 return -ENOMEM;
289 kref_init(&brd->PrintDriver.kref);
290 brd->PrintDriver.termios = kcalloc(brd->maxports, sizeof(*brd->PrintDriver.termios), GFP_KERNEL);
291 if (!brd->PrintDriver.termios)
292 return -ENOMEM;
293
294 /*
295 * Entry points for driver. Called by the kernel from
296 * tty_io.c and n_tty.c.
297 */
298 tty_set_operations(&brd->PrintDriver, &dgnc_tty_ops);
299
300 if (!brd->dgnc_Major_TransparentPrint_Registered) {
301 /* Register Transparent Print devices */
302 rc = tty_register_driver(&brd->PrintDriver);
303 if (rc < 0) {
304 APR(("Can't register Transparent Print device (%d)\n", rc));
305 return rc;
306 }
307 brd->dgnc_Major_TransparentPrint_Registered = TRUE;
308 }
309
310 dgnc_BoardsByMajor[brd->SerialDriver.major] = brd;
311 brd->dgnc_Serial_Major = brd->SerialDriver.major;
312 brd->dgnc_TransparentPrint_Major = brd->PrintDriver.major;
313
314 return rc;
315 }
316
317
318 /*
319 * dgnc_tty_init()
320 *
321 * Init the tty subsystem. Called once per board after board has been
322 * downloaded and init'ed.
323 */
324 int dgnc_tty_init(struct dgnc_board *brd)
325 {
326 int i;
327 void __iomem *vaddr;
328 struct channel_t *ch;
329
330 if (!brd)
331 return -ENXIO;
332
333 /*
334 * Initialize board structure elements.
335 */
336
337 vaddr = brd->re_map_membase;
338
339 brd->nasync = brd->maxports;
340
341 /*
342 * Allocate channel memory that might not have been allocated
343 * when the driver was first loaded.
344 */
345 for (i = 0; i < brd->nasync; i++) {
346 if (!brd->channels[i]) {
347
348 /*
349 * Okay to malloc with GFP_KERNEL, we are not at
350 * interrupt context, and there are no locks held.
351 */
352 brd->channels[i] = kzalloc(sizeof(*brd->channels[i]), GFP_KERNEL);
353 }
354 }
355
356 ch = brd->channels[0];
357 vaddr = brd->re_map_membase;
358
359 /* Set up channel variables */
360 for (i = 0; i < brd->nasync; i++, ch = brd->channels[i]) {
361
362 if (!brd->channels[i])
363 continue;
364
365 spin_lock_init(&ch->ch_lock);
366
367 /* Store all our magic numbers */
368 ch->magic = DGNC_CHANNEL_MAGIC;
369 ch->ch_tun.magic = DGNC_UNIT_MAGIC;
370 ch->ch_tun.un_ch = ch;
371 ch->ch_tun.un_type = DGNC_SERIAL;
372 ch->ch_tun.un_dev = i;
373
374 ch->ch_pun.magic = DGNC_UNIT_MAGIC;
375 ch->ch_pun.un_ch = ch;
376 ch->ch_pun.un_type = DGNC_PRINT;
377 ch->ch_pun.un_dev = i + 128;
378
379 if (brd->bd_uart_offset == 0x200)
380 ch->ch_neo_uart = vaddr + (brd->bd_uart_offset * i);
381 else
382 ch->ch_cls_uart = vaddr + (brd->bd_uart_offset * i);
383
384 ch->ch_bd = brd;
385 ch->ch_portnum = i;
386 ch->ch_digi = dgnc_digi_init;
387
388 /* .25 second delay */
389 ch->ch_close_delay = 250;
390
391 init_waitqueue_head(&ch->ch_flags_wait);
392 init_waitqueue_head(&ch->ch_tun.un_flags_wait);
393 init_waitqueue_head(&ch->ch_pun.un_flags_wait);
394 init_waitqueue_head(&ch->ch_sniff_wait);
395
396 {
397 struct device *classp;
398
399 classp = tty_register_device(&brd->SerialDriver, i,
400 &(ch->ch_bd->pdev->dev));
401 ch->ch_tun.un_sysfs = classp;
402 dgnc_create_tty_sysfs(&ch->ch_tun, classp);
403
404 classp = tty_register_device(&brd->PrintDriver, i,
405 &(ch->ch_bd->pdev->dev));
406 ch->ch_pun.un_sysfs = classp;
407 dgnc_create_tty_sysfs(&ch->ch_pun, classp);
408 }
409
410 }
411
412 return 0;
413 }
414
415
416 /*
417 * dgnc_tty_post_uninit()
418 *
419 * UnInitialize any global tty related data.
420 */
421 void dgnc_tty_post_uninit(void)
422 {
423 kfree(dgnc_TmpWriteBuf);
424 dgnc_TmpWriteBuf = NULL;
425 }
426
427
428 /*
429 * dgnc_tty_uninit()
430 *
431 * Uninitialize the TTY portion of this driver. Free all memory and
432 * resources.
433 */
434 void dgnc_tty_uninit(struct dgnc_board *brd)
435 {
436 int i = 0;
437
438 if (brd->dgnc_Major_Serial_Registered) {
439 dgnc_BoardsByMajor[brd->SerialDriver.major] = NULL;
440 brd->dgnc_Serial_Major = 0;
441 for (i = 0; i < brd->nasync; i++) {
442 dgnc_remove_tty_sysfs(brd->channels[i]->ch_tun.un_sysfs);
443 tty_unregister_device(&brd->SerialDriver, i);
444 }
445 tty_unregister_driver(&brd->SerialDriver);
446 brd->dgnc_Major_Serial_Registered = FALSE;
447 }
448
449 if (brd->dgnc_Major_TransparentPrint_Registered) {
450 dgnc_BoardsByMajor[brd->PrintDriver.major] = NULL;
451 brd->dgnc_TransparentPrint_Major = 0;
452 for (i = 0; i < brd->nasync; i++) {
453 dgnc_remove_tty_sysfs(brd->channels[i]->ch_pun.un_sysfs);
454 tty_unregister_device(&brd->PrintDriver, i);
455 }
456 tty_unregister_driver(&brd->PrintDriver);
457 brd->dgnc_Major_TransparentPrint_Registered = FALSE;
458 }
459
460 kfree(brd->SerialDriver.ttys);
461 brd->SerialDriver.ttys = NULL;
462 kfree(brd->PrintDriver.ttys);
463 brd->PrintDriver.ttys = NULL;
464 }
465
466
467 #define TMPBUFLEN (1024)
468
469 /*
470 * dgnc_sniff - Dump data out to the "sniff" buffer if the
471 * proc sniff file is opened...
472 */
473 void dgnc_sniff_nowait_nolock(struct channel_t *ch, unsigned char *text, unsigned char *buf, int len)
474 {
475 struct timeval tv;
476 int n;
477 int r;
478 int nbuf;
479 int i;
480 int tmpbuflen;
481 char *tmpbuf;
482 char *p;
483 int too_much_data;
484
485 tmpbuf = kzalloc(TMPBUFLEN, GFP_ATOMIC);
486 if (!tmpbuf)
487 return;
488 p = tmpbuf;
489
490 /* Leave if sniff not open */
491 if (!(ch->ch_sniff_flags & SNIFF_OPEN))
492 goto exit;
493
494 do_gettimeofday(&tv);
495
496 /* Create our header for data dump */
497 p += sprintf(p, "<%ld %ld><%s><", tv.tv_sec, tv.tv_usec, text);
498 tmpbuflen = p - tmpbuf;
499
500 do {
501 too_much_data = 0;
502
503 for (i = 0; i < len && tmpbuflen < (TMPBUFLEN - 4); i++) {
504 p += sprintf(p, "%02x ", *buf);
505 buf++;
506 tmpbuflen = p - tmpbuf;
507 }
508
509 if (tmpbuflen < (TMPBUFLEN - 4)) {
510 if (i > 0)
511 p += sprintf(p - 1, "%s\n", ">");
512 else
513 p += sprintf(p, "%s\n", ">");
514 } else {
515 too_much_data = 1;
516 len -= i;
517 }
518
519 nbuf = strlen(tmpbuf);
520 p = tmpbuf;
521
522 /*
523 * Loop while data remains.
524 */
525 while (nbuf > 0 && ch->ch_sniff_buf) {
526 /*
527 * Determine the amount of available space left in the
528 * buffer. If there's none, wait until some appears.
529 */
530 n = (ch->ch_sniff_out - ch->ch_sniff_in - 1) & SNIFF_MASK;
531
532 /*
533 * If there is no space left to write to in our sniff buffer,
534 * we have no choice but to drop the data.
535 * We *cannot* sleep here waiting for space, because this
536 * function was probably called by the interrupt/timer routines!
537 */
538 if (n == 0)
539 goto exit;
540
541 /*
542 * Copy as much data as will fit.
543 */
544
545 if (n > nbuf)
546 n = nbuf;
547
548 r = SNIFF_MAX - ch->ch_sniff_in;
549
550 if (r <= n) {
551 memcpy(ch->ch_sniff_buf + ch->ch_sniff_in, p, r);
552
553 n -= r;
554 ch->ch_sniff_in = 0;
555 p += r;
556 nbuf -= r;
557 }
558
559 memcpy(ch->ch_sniff_buf + ch->ch_sniff_in, p, n);
560
561 ch->ch_sniff_in += n;
562 p += n;
563 nbuf -= n;
564
565 /*
566 * Wakeup any thread waiting for data
567 */
568 if (ch->ch_sniff_flags & SNIFF_WAIT_DATA) {
569 ch->ch_sniff_flags &= ~SNIFF_WAIT_DATA;
570 wake_up_interruptible(&ch->ch_sniff_wait);
571 }
572 }
573
574 /*
575 * If the user sent us too much data to push into our tmpbuf,
576 * we need to keep looping around on all the data.
577 */
578 if (too_much_data) {
579 p = tmpbuf;
580 tmpbuflen = 0;
581 }
582
583 } while (too_much_data);
584
585 exit:
586 kfree(tmpbuf);
587 }
588
589
590 /*=======================================================================
591 *
592 * dgnc_wmove - Write data to transmit queue.
593 *
594 * ch - Pointer to channel structure.
595 * buf - Poiter to characters to be moved.
596 * n - Number of characters to move.
597 *
598 *=======================================================================*/
599 static void dgnc_wmove(struct channel_t *ch, char *buf, uint n)
600 {
601 int remain;
602 uint head;
603
604 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
605 return;
606
607 head = ch->ch_w_head & WQUEUEMASK;
608
609 /*
610 * If the write wraps over the top of the circular buffer,
611 * move the portion up to the wrap point, and reset the
612 * pointers to the bottom.
613 */
614 remain = WQUEUESIZE - head;
615
616 if (n >= remain) {
617 n -= remain;
618 memcpy(ch->ch_wqueue + head, buf, remain);
619 head = 0;
620 buf += remain;
621 }
622
623 if (n > 0) {
624 /*
625 * Move rest of data.
626 */
627 remain = n;
628 memcpy(ch->ch_wqueue + head, buf, remain);
629 head += remain;
630 }
631
632 head &= WQUEUEMASK;
633 ch->ch_w_head = head;
634 }
635
636
637
638
639 /*=======================================================================
640 *
641 * dgnc_input - Process received data.
642 *
643 * ch - Pointer to channel structure.
644 *
645 *=======================================================================*/
646 void dgnc_input(struct channel_t *ch)
647 {
648 struct dgnc_board *bd;
649 struct tty_struct *tp;
650 struct tty_ldisc *ld;
651 uint rmask;
652 ushort head;
653 ushort tail;
654 int data_len;
655 unsigned long flags;
656 int flip_len;
657 int len = 0;
658 int n = 0;
659 int s = 0;
660 int i = 0;
661
662 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
663 return;
664
665 tp = ch->ch_tun.un_tty;
666
667 bd = ch->ch_bd;
668 if (!bd || bd->magic != DGNC_BOARD_MAGIC)
669 return;
670
671 spin_lock_irqsave(&ch->ch_lock, flags);
672
673 /*
674 * Figure the number of characters in the buffer.
675 * Exit immediately if none.
676 */
677 rmask = RQUEUEMASK;
678 head = ch->ch_r_head & rmask;
679 tail = ch->ch_r_tail & rmask;
680 data_len = (head - tail) & rmask;
681
682 if (data_len == 0) {
683 spin_unlock_irqrestore(&ch->ch_lock, flags);
684 return;
685 }
686
687 /*
688 * If the device is not open, or CREAD is off,
689 * flush input data and return immediately.
690 */
691 if (!tp || (tp->magic != TTY_MAGIC) || !(ch->ch_tun.un_flags & UN_ISOPEN) ||
692 !(tp->termios.c_cflag & CREAD) || (ch->ch_tun.un_flags & UN_CLOSING)) {
693
694 ch->ch_r_head = tail;
695
696 /* Force queue flow control to be released, if needed */
697 dgnc_check_queue_flow_control(ch);
698
699 spin_unlock_irqrestore(&ch->ch_lock, flags);
700 return;
701 }
702
703 /*
704 * If we are throttled, simply don't read any data.
705 */
706 if (ch->ch_flags & CH_FORCED_STOPI) {
707 spin_unlock_irqrestore(&ch->ch_lock, flags);
708 return;
709 }
710
711 flip_len = TTY_FLIPBUF_SIZE;
712
713 /* Chop down the length, if needed */
714 len = min(data_len, flip_len);
715 len = min(len, (N_TTY_BUF_SIZE - 1));
716
717 ld = tty_ldisc_ref(tp);
718
719 #ifdef TTY_DONT_FLIP
720 /*
721 * If the DONT_FLIP flag is on, don't flush our buffer, and act
722 * like the ld doesn't have any space to put the data right now.
723 */
724 if (test_bit(TTY_DONT_FLIP, &tp->flags))
725 len = 0;
726 #endif
727
728 /*
729 * If we were unable to get a reference to the ld,
730 * don't flush our buffer, and act like the ld doesn't
731 * have any space to put the data right now.
732 */
733 if (!ld) {
734 len = 0;
735 } else {
736 /*
737 * If ld doesn't have a pointer to a receive_buf function,
738 * flush the data, then act like the ld doesn't have any
739 * space to put the data right now.
740 */
741 if (!ld->ops->receive_buf) {
742 ch->ch_r_head = ch->ch_r_tail;
743 len = 0;
744 }
745 }
746
747 if (len <= 0) {
748 spin_unlock_irqrestore(&ch->ch_lock, flags);
749 if (ld)
750 tty_ldisc_deref(ld);
751 return;
752 }
753
754 /*
755 * The tty layer in the kernel has changed in 2.6.16+.
756 *
757 * The flip buffers in the tty structure are no longer exposed,
758 * and probably will be going away eventually.
759 *
760 * If we are completely raw, we don't need to go through a lot
761 * of the tty layers that exist.
762 * In this case, we take the shortest and fastest route we
763 * can to relay the data to the user.
764 *
765 * On the other hand, if we are not raw, we need to go through
766 * the new 2.6.16+ tty layer, which has its API more well defined.
767 */
768 len = tty_buffer_request_room(tp->port, len);
769 n = len;
770
771 /*
772 * n now contains the most amount of data we can copy,
773 * bounded either by how much the Linux tty layer can handle,
774 * or the amount of data the card actually has pending...
775 */
776 while (n) {
777 s = ((head >= tail) ? head : RQUEUESIZE) - tail;
778 s = min(s, n);
779
780 if (s <= 0)
781 break;
782
783 /*
784 * If conditions are such that ld needs to see all
785 * UART errors, we will have to walk each character
786 * and error byte and send them to the buffer one at
787 * a time.
788 */
789 if (I_PARMRK(tp) || I_BRKINT(tp) || I_INPCK(tp)) {
790 for (i = 0; i < s; i++) {
791 if (*(ch->ch_equeue + tail + i) & UART_LSR_BI)
792 tty_insert_flip_char(tp->port, *(ch->ch_rqueue + tail + i), TTY_BREAK);
793 else if (*(ch->ch_equeue + tail + i) & UART_LSR_PE)
794 tty_insert_flip_char(tp->port, *(ch->ch_rqueue + tail + i), TTY_PARITY);
795 else if (*(ch->ch_equeue + tail + i) & UART_LSR_FE)
796 tty_insert_flip_char(tp->port, *(ch->ch_rqueue + tail + i), TTY_FRAME);
797 else
798 tty_insert_flip_char(tp->port, *(ch->ch_rqueue + tail + i), TTY_NORMAL);
799 }
800 } else {
801 tty_insert_flip_string(tp->port, ch->ch_rqueue + tail, s);
802 }
803
804 dgnc_sniff_nowait_nolock(ch, "USER READ", ch->ch_rqueue + tail, s);
805
806 tail += s;
807 n -= s;
808 /* Flip queue if needed */
809 tail &= rmask;
810 }
811
812 ch->ch_r_tail = tail & rmask;
813 ch->ch_e_tail = tail & rmask;
814 dgnc_check_queue_flow_control(ch);
815 spin_unlock_irqrestore(&ch->ch_lock, flags);
816
817 /* Tell the tty layer its okay to "eat" the data now */
818 tty_flip_buffer_push(tp->port);
819
820 if (ld)
821 tty_ldisc_deref(ld);
822 }
823
824
825 /************************************************************************
826 * Determines when CARRIER changes state and takes appropriate
827 * action.
828 ************************************************************************/
829 void dgnc_carrier(struct channel_t *ch)
830 {
831 struct dgnc_board *bd;
832
833 int virt_carrier = 0;
834 int phys_carrier = 0;
835
836 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
837 return;
838
839 bd = ch->ch_bd;
840
841 if (!bd || bd->magic != DGNC_BOARD_MAGIC)
842 return;
843
844 if (ch->ch_mistat & UART_MSR_DCD)
845 phys_carrier = 1;
846
847 if (ch->ch_digi.digi_flags & DIGI_FORCEDCD)
848 virt_carrier = 1;
849
850 if (ch->ch_c_cflag & CLOCAL)
851 virt_carrier = 1;
852
853 /*
854 * Test for a VIRTUAL carrier transition to HIGH.
855 */
856 if (((ch->ch_flags & CH_FCAR) == 0) && (virt_carrier == 1)) {
857
858 /*
859 * When carrier rises, wake any threads waiting
860 * for carrier in the open routine.
861 */
862
863 if (waitqueue_active(&(ch->ch_flags_wait)))
864 wake_up_interruptible(&ch->ch_flags_wait);
865 }
866
867 /*
868 * Test for a PHYSICAL carrier transition to HIGH.
869 */
870 if (((ch->ch_flags & CH_CD) == 0) && (phys_carrier == 1)) {
871
872 /*
873 * When carrier rises, wake any threads waiting
874 * for carrier in the open routine.
875 */
876
877 if (waitqueue_active(&(ch->ch_flags_wait)))
878 wake_up_interruptible(&ch->ch_flags_wait);
879 }
880
881 /*
882 * Test for a PHYSICAL transition to low, so long as we aren't
883 * currently ignoring physical transitions (which is what "virtual
884 * carrier" indicates).
885 *
886 * The transition of the virtual carrier to low really doesn't
887 * matter... it really only means "ignore carrier state", not
888 * "make pretend that carrier is there".
889 */
890 if ((virt_carrier == 0) && ((ch->ch_flags & CH_CD) != 0) &&
891 (phys_carrier == 0)) {
892
893 /*
894 * When carrier drops:
895 *
896 * Drop carrier on all open units.
897 *
898 * Flush queues, waking up any task waiting in the
899 * line discipline.
900 *
901 * Send a hangup to the control terminal.
902 *
903 * Enable all select calls.
904 */
905 if (waitqueue_active(&(ch->ch_flags_wait)))
906 wake_up_interruptible(&ch->ch_flags_wait);
907
908 if (ch->ch_tun.un_open_count > 0)
909 tty_hangup(ch->ch_tun.un_tty);
910
911 if (ch->ch_pun.un_open_count > 0)
912 tty_hangup(ch->ch_pun.un_tty);
913 }
914
915 /*
916 * Make sure that our cached values reflect the current reality.
917 */
918 if (virt_carrier == 1)
919 ch->ch_flags |= CH_FCAR;
920 else
921 ch->ch_flags &= ~CH_FCAR;
922
923 if (phys_carrier == 1)
924 ch->ch_flags |= CH_CD;
925 else
926 ch->ch_flags &= ~CH_CD;
927 }
928
929 /*
930 * Assign the custom baud rate to the channel structure
931 */
932 static void dgnc_set_custom_speed(struct channel_t *ch, uint newrate)
933 {
934 int testdiv;
935 int testrate_high;
936 int testrate_low;
937 int deltahigh;
938 int deltalow;
939
940 if (newrate <= 0) {
941 ch->ch_custom_speed = 0;
942 return;
943 }
944
945 /*
946 * Since the divisor is stored in a 16-bit integer, we make sure
947 * we don't allow any rates smaller than a 16-bit integer would allow.
948 * And of course, rates above the dividend won't fly.
949 */
950 if (newrate && newrate < ((ch->ch_bd->bd_dividend / 0xFFFF) + 1))
951 newrate = ((ch->ch_bd->bd_dividend / 0xFFFF) + 1);
952
953 if (newrate && newrate > ch->ch_bd->bd_dividend)
954 newrate = ch->ch_bd->bd_dividend;
955
956 if (newrate > 0) {
957 testdiv = ch->ch_bd->bd_dividend / newrate;
958
959 /*
960 * If we try to figure out what rate the board would use
961 * with the test divisor, it will be either equal or higher
962 * than the requested baud rate. If we then determine the
963 * rate with a divisor one higher, we will get the next lower
964 * supported rate below the requested.
965 */
966 testrate_high = ch->ch_bd->bd_dividend / testdiv;
967 testrate_low = ch->ch_bd->bd_dividend / (testdiv + 1);
968
969 /*
970 * If the rate for the requested divisor is correct, just
971 * use it and be done.
972 */
973 if (testrate_high != newrate) {
974 /*
975 * Otherwise, pick the rate that is closer (i.e. whichever rate
976 * has a smaller delta).
977 */
978 deltahigh = testrate_high - newrate;
979 deltalow = newrate - testrate_low;
980
981 if (deltahigh < deltalow)
982 newrate = testrate_high;
983 else
984 newrate = testrate_low;
985 }
986 }
987
988 ch->ch_custom_speed = newrate;
989 }
990
991
992 void dgnc_check_queue_flow_control(struct channel_t *ch)
993 {
994 int qleft = 0;
995
996 /* Store how much space we have left in the queue */
997 qleft = ch->ch_r_tail - ch->ch_r_head - 1;
998 if (qleft < 0)
999 qleft += RQUEUEMASK + 1;
1000
1001 /*
1002 * Check to see if we should enforce flow control on our queue because
1003 * the ld (or user) isn't reading data out of our queue fast enuf.
1004 *
1005 * NOTE: This is done based on what the current flow control of the
1006 * port is set for.
1007 *
1008 * 1) HWFLOW (RTS) - Turn off the UART's Receive interrupt.
1009 * This will cause the UART's FIFO to back up, and force
1010 * the RTS signal to be dropped.
1011 * 2) SWFLOW (IXOFF) - Keep trying to send a stop character to
1012 * the other side, in hopes it will stop sending data to us.
1013 * 3) NONE - Nothing we can do. We will simply drop any extra data
1014 * that gets sent into us when the queue fills up.
1015 */
1016 if (qleft < 256) {
1017 /* HWFLOW */
1018 if (ch->ch_digi.digi_flags & CTSPACE || ch->ch_c_cflag & CRTSCTS) {
1019 if (!(ch->ch_flags & CH_RECEIVER_OFF)) {
1020 ch->ch_bd->bd_ops->disable_receiver(ch);
1021 ch->ch_flags |= (CH_RECEIVER_OFF);
1022 }
1023 }
1024 /* SWFLOW */
1025 else if (ch->ch_c_iflag & IXOFF) {
1026 if (ch->ch_stops_sent <= MAX_STOPS_SENT) {
1027 ch->ch_bd->bd_ops->send_stop_character(ch);
1028 ch->ch_stops_sent++;
1029 }
1030 }
1031 /* No FLOW */
1032 else {
1033 /* Empty... Can't do anything about the impending overflow... */
1034 }
1035 }
1036
1037 /*
1038 * Check to see if we should unenforce flow control because
1039 * ld (or user) finally read enuf data out of our queue.
1040 *
1041 * NOTE: This is done based on what the current flow control of the
1042 * port is set for.
1043 *
1044 * 1) HWFLOW (RTS) - Turn back on the UART's Receive interrupt.
1045 * This will cause the UART's FIFO to raise RTS back up,
1046 * which will allow the other side to start sending data again.
1047 * 2) SWFLOW (IXOFF) - Send a start character to
1048 * the other side, so it will start sending data to us again.
1049 * 3) NONE - Do nothing. Since we didn't do anything to turn off the
1050 * other side, we don't need to do anything now.
1051 */
1052 if (qleft > (RQUEUESIZE / 2)) {
1053 /* HWFLOW */
1054 if (ch->ch_digi.digi_flags & RTSPACE || ch->ch_c_cflag & CRTSCTS) {
1055 if (ch->ch_flags & CH_RECEIVER_OFF) {
1056 ch->ch_bd->bd_ops->enable_receiver(ch);
1057 ch->ch_flags &= ~(CH_RECEIVER_OFF);
1058 }
1059 }
1060 /* SWFLOW */
1061 else if (ch->ch_c_iflag & IXOFF && ch->ch_stops_sent) {
1062 ch->ch_stops_sent = 0;
1063 ch->ch_bd->bd_ops->send_start_character(ch);
1064 }
1065 /* No FLOW */
1066 else {
1067 /* Nothing needed. */
1068 }
1069 }
1070 }
1071
1072
1073 void dgnc_wakeup_writes(struct channel_t *ch)
1074 {
1075 int qlen = 0;
1076 unsigned long flags;
1077
1078 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
1079 return;
1080
1081 spin_lock_irqsave(&ch->ch_lock, flags);
1082
1083 /*
1084 * If channel now has space, wake up anyone waiting on the condition.
1085 */
1086 qlen = ch->ch_w_head - ch->ch_w_tail;
1087 if (qlen < 0)
1088 qlen += WQUEUESIZE;
1089
1090 if (qlen >= (WQUEUESIZE - 256)) {
1091 spin_unlock_irqrestore(&ch->ch_lock, flags);
1092 return;
1093 }
1094
1095 if (ch->ch_tun.un_flags & UN_ISOPEN) {
1096 if ((ch->ch_tun.un_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
1097 ch->ch_tun.un_tty->ldisc->ops->write_wakeup) {
1098 spin_unlock_irqrestore(&ch->ch_lock, flags);
1099 (ch->ch_tun.un_tty->ldisc->ops->write_wakeup)(ch->ch_tun.un_tty);
1100 spin_lock_irqsave(&ch->ch_lock, flags);
1101 }
1102
1103 wake_up_interruptible(&ch->ch_tun.un_tty->write_wait);
1104
1105 /*
1106 * If unit is set to wait until empty, check to make sure
1107 * the queue AND FIFO are both empty.
1108 */
1109 if (ch->ch_tun.un_flags & UN_EMPTY) {
1110 if ((qlen == 0) && (ch->ch_bd->bd_ops->get_uart_bytes_left(ch) == 0)) {
1111 ch->ch_tun.un_flags &= ~(UN_EMPTY);
1112
1113 /*
1114 * If RTS Toggle mode is on, whenever
1115 * the queue and UART is empty, keep RTS low.
1116 */
1117 if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) {
1118 ch->ch_mostat &= ~(UART_MCR_RTS);
1119 ch->ch_bd->bd_ops->assert_modem_signals(ch);
1120 }
1121
1122 /*
1123 * If DTR Toggle mode is on, whenever
1124 * the queue and UART is empty, keep DTR low.
1125 */
1126 if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) {
1127 ch->ch_mostat &= ~(UART_MCR_DTR);
1128 ch->ch_bd->bd_ops->assert_modem_signals(ch);
1129 }
1130 }
1131 }
1132
1133 wake_up_interruptible(&ch->ch_tun.un_flags_wait);
1134 }
1135
1136 if (ch->ch_pun.un_flags & UN_ISOPEN) {
1137 if ((ch->ch_pun.un_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
1138 ch->ch_pun.un_tty->ldisc->ops->write_wakeup) {
1139 spin_unlock_irqrestore(&ch->ch_lock, flags);
1140 (ch->ch_pun.un_tty->ldisc->ops->write_wakeup)(ch->ch_pun.un_tty);
1141 spin_lock_irqsave(&ch->ch_lock, flags);
1142 }
1143
1144 wake_up_interruptible(&ch->ch_pun.un_tty->write_wait);
1145
1146 /*
1147 * If unit is set to wait until empty, check to make sure
1148 * the queue AND FIFO are both empty.
1149 */
1150 if (ch->ch_pun.un_flags & UN_EMPTY) {
1151 if ((qlen == 0) && (ch->ch_bd->bd_ops->get_uart_bytes_left(ch) == 0))
1152 ch->ch_pun.un_flags &= ~(UN_EMPTY);
1153 }
1154
1155 wake_up_interruptible(&ch->ch_pun.un_flags_wait);
1156 }
1157
1158 spin_unlock_irqrestore(&ch->ch_lock, flags);
1159 }
1160
1161
1162
1163 /************************************************************************
1164 *
1165 * TTY Entry points and helper functions
1166 *
1167 ************************************************************************/
1168
1169 /*
1170 * dgnc_tty_open()
1171 *
1172 */
1173 static int dgnc_tty_open(struct tty_struct *tty, struct file *file)
1174 {
1175 struct dgnc_board *brd;
1176 struct channel_t *ch;
1177 struct un_t *un;
1178 uint major = 0;
1179 uint minor = 0;
1180 int rc = 0;
1181 unsigned long flags;
1182
1183 rc = 0;
1184
1185 major = MAJOR(tty_devnum(tty));
1186 minor = MINOR(tty_devnum(tty));
1187
1188 if (major > 255)
1189 return -ENXIO;
1190
1191 /* Get board pointer from our array of majors we have allocated */
1192 brd = dgnc_BoardsByMajor[major];
1193 if (!brd)
1194 return -ENXIO;
1195
1196 /*
1197 * If board is not yet up to a state of READY, go to
1198 * sleep waiting for it to happen or they cancel the open.
1199 */
1200 rc = wait_event_interruptible(brd->state_wait,
1201 (brd->state & BOARD_READY));
1202
1203 if (rc)
1204 return rc;
1205
1206 spin_lock_irqsave(&brd->bd_lock, flags);
1207
1208 /* If opened device is greater than our number of ports, bail. */
1209 if (PORT_NUM(minor) > brd->nasync) {
1210 spin_unlock_irqrestore(&brd->bd_lock, flags);
1211 return -ENXIO;
1212 }
1213
1214 ch = brd->channels[PORT_NUM(minor)];
1215 if (!ch) {
1216 spin_unlock_irqrestore(&brd->bd_lock, flags);
1217 return -ENXIO;
1218 }
1219
1220 /* Drop board lock */
1221 spin_unlock_irqrestore(&brd->bd_lock, flags);
1222
1223 /* Grab channel lock */
1224 spin_lock_irqsave(&ch->ch_lock, flags);
1225
1226 /* Figure out our type */
1227 if (!IS_PRINT(minor)) {
1228 un = &brd->channels[PORT_NUM(minor)]->ch_tun;
1229 un->un_type = DGNC_SERIAL;
1230 } else if (IS_PRINT(minor)) {
1231 un = &brd->channels[PORT_NUM(minor)]->ch_pun;
1232 un->un_type = DGNC_PRINT;
1233 } else {
1234 spin_unlock_irqrestore(&ch->ch_lock, flags);
1235 return -ENXIO;
1236 }
1237
1238 /*
1239 * If the port is still in a previous open, and in a state
1240 * where we simply cannot safely keep going, wait until the
1241 * state clears.
1242 */
1243 spin_unlock_irqrestore(&ch->ch_lock, flags);
1244
1245 rc = wait_event_interruptible(ch->ch_flags_wait, ((ch->ch_flags & CH_OPENING) == 0));
1246
1247 /* If ret is non-zero, user ctrl-c'ed us */
1248 if (rc)
1249 return -EINTR;
1250
1251 /*
1252 * If either unit is in the middle of the fragile part of close,
1253 * we just cannot touch the channel safely.
1254 * Go to sleep, knowing that when the channel can be
1255 * touched safely, the close routine will signal the
1256 * ch_flags_wait to wake us back up.
1257 */
1258 rc = wait_event_interruptible(ch->ch_flags_wait,
1259 (((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & UN_CLOSING) == 0));
1260
1261 /* If ret is non-zero, user ctrl-c'ed us */
1262 if (rc)
1263 return -EINTR;
1264
1265 spin_lock_irqsave(&ch->ch_lock, flags);
1266
1267
1268 /* Store our unit into driver_data, so we always have it available. */
1269 tty->driver_data = un;
1270
1271
1272 /*
1273 * Initialize tty's
1274 */
1275 if (!(un->un_flags & UN_ISOPEN)) {
1276 /* Store important variables. */
1277 un->un_tty = tty;
1278
1279 /* Maybe do something here to the TTY struct as well? */
1280 }
1281
1282
1283 /*
1284 * Allocate channel buffers for read/write/error.
1285 * Set flag, so we don't get trounced on.
1286 */
1287 ch->ch_flags |= (CH_OPENING);
1288
1289 /* Drop locks, as malloc with GFP_KERNEL can sleep */
1290 spin_unlock_irqrestore(&ch->ch_lock, flags);
1291
1292 if (!ch->ch_rqueue)
1293 ch->ch_rqueue = kzalloc(RQUEUESIZE, GFP_KERNEL);
1294 if (!ch->ch_equeue)
1295 ch->ch_equeue = kzalloc(EQUEUESIZE, GFP_KERNEL);
1296 if (!ch->ch_wqueue)
1297 ch->ch_wqueue = kzalloc(WQUEUESIZE, GFP_KERNEL);
1298
1299 spin_lock_irqsave(&ch->ch_lock, flags);
1300
1301 ch->ch_flags &= ~(CH_OPENING);
1302 wake_up_interruptible(&ch->ch_flags_wait);
1303
1304 /*
1305 * Initialize if neither terminal or printer is open.
1306 */
1307 if (!((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & UN_ISOPEN)) {
1308
1309 /*
1310 * Flush input queues.
1311 */
1312 ch->ch_r_head = 0;
1313 ch->ch_r_tail = 0;
1314 ch->ch_e_head = 0;
1315 ch->ch_e_tail = 0;
1316 ch->ch_w_head = 0;
1317 ch->ch_w_tail = 0;
1318
1319 brd->bd_ops->flush_uart_write(ch);
1320 brd->bd_ops->flush_uart_read(ch);
1321
1322 ch->ch_flags = 0;
1323 ch->ch_cached_lsr = 0;
1324 ch->ch_stop_sending_break = 0;
1325 ch->ch_stops_sent = 0;
1326
1327 ch->ch_c_cflag = tty->termios.c_cflag;
1328 ch->ch_c_iflag = tty->termios.c_iflag;
1329 ch->ch_c_oflag = tty->termios.c_oflag;
1330 ch->ch_c_lflag = tty->termios.c_lflag;
1331 ch->ch_startc = tty->termios.c_cc[VSTART];
1332 ch->ch_stopc = tty->termios.c_cc[VSTOP];
1333
1334 /*
1335 * Bring up RTS and DTR...
1336 * Also handle RTS or DTR toggle if set.
1337 */
1338 if (!(ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE))
1339 ch->ch_mostat |= (UART_MCR_RTS);
1340 if (!(ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE))
1341 ch->ch_mostat |= (UART_MCR_DTR);
1342
1343 /* Tell UART to init itself */
1344 brd->bd_ops->uart_init(ch);
1345 }
1346
1347 /*
1348 * Run param in case we changed anything
1349 */
1350 brd->bd_ops->param(tty);
1351
1352 dgnc_carrier(ch);
1353
1354 /*
1355 * follow protocol for opening port
1356 */
1357
1358 spin_unlock_irqrestore(&ch->ch_lock, flags);
1359
1360 rc = dgnc_block_til_ready(tty, file, ch);
1361
1362 /* No going back now, increment our unit and channel counters */
1363 spin_lock_irqsave(&ch->ch_lock, flags);
1364 ch->ch_open_count++;
1365 un->un_open_count++;
1366 un->un_flags |= (UN_ISOPEN);
1367 spin_unlock_irqrestore(&ch->ch_lock, flags);
1368
1369 return rc;
1370 }
1371
1372
1373 /*
1374 * dgnc_block_til_ready()
1375 *
1376 * Wait for DCD, if needed.
1377 */
1378 static int dgnc_block_til_ready(struct tty_struct *tty, struct file *file, struct channel_t *ch)
1379 {
1380 int retval = 0;
1381 struct un_t *un = NULL;
1382 unsigned long flags;
1383 uint old_flags = 0;
1384 int sleep_on_un_flags = 0;
1385
1386 if (!tty || tty->magic != TTY_MAGIC || !file || !ch || ch->magic != DGNC_CHANNEL_MAGIC)
1387 return -ENXIO;
1388
1389 un = tty->driver_data;
1390 if (!un || un->magic != DGNC_UNIT_MAGIC)
1391 return -ENXIO;
1392
1393 spin_lock_irqsave(&ch->ch_lock, flags);
1394
1395 ch->ch_wopen++;
1396
1397 /* Loop forever */
1398 while (1) {
1399
1400 sleep_on_un_flags = 0;
1401
1402 /*
1403 * If board has failed somehow during our sleep, bail with error.
1404 */
1405 if (ch->ch_bd->state == BOARD_FAILED) {
1406 retval = -ENXIO;
1407 break;
1408 }
1409
1410 /* If tty was hung up, break out of loop and set error. */
1411 if (tty_hung_up_p(file)) {
1412 retval = -EAGAIN;
1413 break;
1414 }
1415
1416 /*
1417 * If either unit is in the middle of the fragile part of close,
1418 * we just cannot touch the channel safely.
1419 * Go back to sleep, knowing that when the channel can be
1420 * touched safely, the close routine will signal the
1421 * ch_wait_flags to wake us back up.
1422 */
1423 if (!((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & UN_CLOSING)) {
1424
1425 /*
1426 * Our conditions to leave cleanly and happily:
1427 * 1) NONBLOCKING on the tty is set.
1428 * 2) CLOCAL is set.
1429 * 3) DCD (fake or real) is active.
1430 */
1431
1432 if (file->f_flags & O_NONBLOCK)
1433 break;
1434
1435 if (tty->flags & (1 << TTY_IO_ERROR)) {
1436 retval = -EIO;
1437 break;
1438 }
1439
1440 if (ch->ch_flags & CH_CD)
1441 break;
1442
1443 if (ch->ch_flags & CH_FCAR)
1444 break;
1445 } else {
1446 sleep_on_un_flags = 1;
1447 }
1448
1449 /*
1450 * If there is a signal pending, the user probably
1451 * interrupted (ctrl-c) us.
1452 * Leave loop with error set.
1453 */
1454 if (signal_pending(current)) {
1455 retval = -ERESTARTSYS;
1456 break;
1457 }
1458
1459 /*
1460 * Store the flags before we let go of channel lock
1461 */
1462 if (sleep_on_un_flags)
1463 old_flags = ch->ch_tun.un_flags | ch->ch_pun.un_flags;
1464 else
1465 old_flags = ch->ch_flags;
1466
1467 /*
1468 * Let go of channel lock before calling schedule.
1469 * Our poller will get any FEP events and wake us up when DCD
1470 * eventually goes active.
1471 */
1472
1473 spin_unlock_irqrestore(&ch->ch_lock, flags);
1474
1475 /*
1476 * Wait for something in the flags to change from the current value.
1477 */
1478 if (sleep_on_un_flags)
1479 retval = wait_event_interruptible(un->un_flags_wait,
1480 (old_flags != (ch->ch_tun.un_flags | ch->ch_pun.un_flags)));
1481 else
1482 retval = wait_event_interruptible(ch->ch_flags_wait,
1483 (old_flags != ch->ch_flags));
1484
1485 /*
1486 * We got woken up for some reason.
1487 * Before looping around, grab our channel lock.
1488 */
1489 spin_lock_irqsave(&ch->ch_lock, flags);
1490 }
1491
1492 ch->ch_wopen--;
1493
1494 spin_unlock_irqrestore(&ch->ch_lock, flags);
1495
1496 if (retval)
1497 return retval;
1498
1499 return 0;
1500 }
1501
1502
1503 /*
1504 * dgnc_tty_hangup()
1505 *
1506 * Hangup the port. Like a close, but don't wait for output to drain.
1507 */
1508 static void dgnc_tty_hangup(struct tty_struct *tty)
1509 {
1510 struct un_t *un;
1511
1512 if (!tty || tty->magic != TTY_MAGIC)
1513 return;
1514
1515 un = tty->driver_data;
1516 if (!un || un->magic != DGNC_UNIT_MAGIC)
1517 return;
1518
1519 /* flush the transmit queues */
1520 dgnc_tty_flush_buffer(tty);
1521
1522 }
1523
1524
1525 /*
1526 * dgnc_tty_close()
1527 *
1528 */
1529 static void dgnc_tty_close(struct tty_struct *tty, struct file *file)
1530 {
1531 struct ktermios *ts;
1532 struct dgnc_board *bd;
1533 struct channel_t *ch;
1534 struct un_t *un;
1535 unsigned long flags;
1536 int rc = 0;
1537
1538 if (!tty || tty->magic != TTY_MAGIC)
1539 return;
1540
1541 un = tty->driver_data;
1542 if (!un || un->magic != DGNC_UNIT_MAGIC)
1543 return;
1544
1545 ch = un->un_ch;
1546 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
1547 return;
1548
1549 bd = ch->ch_bd;
1550 if (!bd || bd->magic != DGNC_BOARD_MAGIC)
1551 return;
1552
1553 ts = &tty->termios;
1554
1555 spin_lock_irqsave(&ch->ch_lock, flags);
1556
1557 /*
1558 * Determine if this is the last close or not - and if we agree about
1559 * which type of close it is with the Line Discipline
1560 */
1561 if ((tty->count == 1) && (un->un_open_count != 1)) {
1562 /*
1563 * Uh, oh. tty->count is 1, which means that the tty
1564 * structure will be freed. un_open_count should always
1565 * be one in these conditions. If it's greater than
1566 * one, we've got real problems, since it means the
1567 * serial port won't be shutdown.
1568 */
1569 APR(("tty->count is 1, un open count is %d\n", un->un_open_count));
1570 un->un_open_count = 1;
1571 }
1572
1573 if (un->un_open_count)
1574 un->un_open_count--;
1575 else
1576 APR(("bad serial port open count of %d\n", un->un_open_count));
1577
1578 ch->ch_open_count--;
1579
1580 if (ch->ch_open_count && un->un_open_count) {
1581 spin_unlock_irqrestore(&ch->ch_lock, flags);
1582 return;
1583 }
1584
1585 /* OK, its the last close on the unit */
1586 un->un_flags |= UN_CLOSING;
1587
1588 tty->closing = 1;
1589
1590
1591 /*
1592 * Only officially close channel if count is 0 and
1593 * DIGI_PRINTER bit is not set.
1594 */
1595 if ((ch->ch_open_count == 0) && !(ch->ch_digi.digi_flags & DIGI_PRINTER)) {
1596
1597 ch->ch_flags &= ~(CH_STOPI | CH_FORCED_STOPI);
1598
1599 /*
1600 * turn off print device when closing print device.
1601 */
1602 if ((un->un_type == DGNC_PRINT) && (ch->ch_flags & CH_PRON)) {
1603 dgnc_wmove(ch, ch->ch_digi.digi_offstr,
1604 (int) ch->ch_digi.digi_offlen);
1605 ch->ch_flags &= ~CH_PRON;
1606 }
1607
1608 spin_unlock_irqrestore(&ch->ch_lock, flags);
1609 /* wait for output to drain */
1610 /* This will also return if we take an interrupt */
1611
1612 rc = bd->bd_ops->drain(tty, 0);
1613
1614 dgnc_tty_flush_buffer(tty);
1615 tty_ldisc_flush(tty);
1616
1617 spin_lock_irqsave(&ch->ch_lock, flags);
1618
1619 tty->closing = 0;
1620
1621 /*
1622 * If we have HUPCL set, lower DTR and RTS
1623 */
1624 if (ch->ch_c_cflag & HUPCL) {
1625
1626 /* Drop RTS/DTR */
1627 ch->ch_mostat &= ~(UART_MCR_DTR | UART_MCR_RTS);
1628 bd->bd_ops->assert_modem_signals(ch);
1629
1630 /*
1631 * Go to sleep to ensure RTS/DTR
1632 * have been dropped for modems to see it.
1633 */
1634 if (ch->ch_close_delay) {
1635 spin_unlock_irqrestore(&ch->ch_lock,
1636 flags);
1637 dgnc_ms_sleep(ch->ch_close_delay);
1638 spin_lock_irqsave(&ch->ch_lock, flags);
1639 }
1640 }
1641
1642 ch->ch_old_baud = 0;
1643
1644 /* Turn off UART interrupts for this port */
1645 ch->ch_bd->bd_ops->uart_off(ch);
1646 } else {
1647 /*
1648 * turn off print device when closing print device.
1649 */
1650 if ((un->un_type == DGNC_PRINT) && (ch->ch_flags & CH_PRON)) {
1651 dgnc_wmove(ch, ch->ch_digi.digi_offstr,
1652 (int) ch->ch_digi.digi_offlen);
1653 ch->ch_flags &= ~CH_PRON;
1654 }
1655 }
1656
1657 un->un_tty = NULL;
1658 un->un_flags &= ~(UN_ISOPEN | UN_CLOSING);
1659
1660 wake_up_interruptible(&ch->ch_flags_wait);
1661 wake_up_interruptible(&un->un_flags_wait);
1662
1663 spin_unlock_irqrestore(&ch->ch_lock, flags);
1664 }
1665
1666
1667 /*
1668 * dgnc_tty_chars_in_buffer()
1669 *
1670 * Return number of characters that have not been transmitted yet.
1671 *
1672 * This routine is used by the line discipline to determine if there
1673 * is data waiting to be transmitted/drained/flushed or not.
1674 */
1675 static int dgnc_tty_chars_in_buffer(struct tty_struct *tty)
1676 {
1677 struct channel_t *ch = NULL;
1678 struct un_t *un = NULL;
1679 ushort thead;
1680 ushort ttail;
1681 uint tmask;
1682 uint chars = 0;
1683 unsigned long flags;
1684
1685 if (tty == NULL)
1686 return 0;
1687
1688 un = tty->driver_data;
1689 if (!un || un->magic != DGNC_UNIT_MAGIC)
1690 return 0;
1691
1692 ch = un->un_ch;
1693 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
1694 return 0;
1695
1696 spin_lock_irqsave(&ch->ch_lock, flags);
1697
1698 tmask = WQUEUEMASK;
1699 thead = ch->ch_w_head & tmask;
1700 ttail = ch->ch_w_tail & tmask;
1701
1702 spin_unlock_irqrestore(&ch->ch_lock, flags);
1703
1704 if (ttail == thead) {
1705 chars = 0;
1706 } else {
1707 if (thead >= ttail)
1708 chars = thead - ttail;
1709 else
1710 chars = thead - ttail + WQUEUESIZE;
1711 }
1712
1713 return chars;
1714 }
1715
1716
1717 /*
1718 * dgnc_maxcps_room
1719 *
1720 * Reduces bytes_available to the max number of characters
1721 * that can be sent currently given the maxcps value, and
1722 * returns the new bytes_available. This only affects printer
1723 * output.
1724 */
1725 static int dgnc_maxcps_room(struct tty_struct *tty, int bytes_available)
1726 {
1727 struct channel_t *ch = NULL;
1728 struct un_t *un = NULL;
1729
1730 if (!tty)
1731 return bytes_available;
1732
1733 un = tty->driver_data;
1734 if (!un || un->magic != DGNC_UNIT_MAGIC)
1735 return bytes_available;
1736
1737 ch = un->un_ch;
1738 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
1739 return bytes_available;
1740
1741 /*
1742 * If its not the Transparent print device, return
1743 * the full data amount.
1744 */
1745 if (un->un_type != DGNC_PRINT)
1746 return bytes_available;
1747
1748 if (ch->ch_digi.digi_maxcps > 0 && ch->ch_digi.digi_bufsize > 0) {
1749 int cps_limit = 0;
1750 unsigned long current_time = jiffies;
1751 unsigned long buffer_time = current_time +
1752 (HZ * ch->ch_digi.digi_bufsize) / ch->ch_digi.digi_maxcps;
1753
1754 if (ch->ch_cpstime < current_time) {
1755 /* buffer is empty */
1756 ch->ch_cpstime = current_time; /* reset ch_cpstime */
1757 cps_limit = ch->ch_digi.digi_bufsize;
1758 } else if (ch->ch_cpstime < buffer_time) {
1759 /* still room in the buffer */
1760 cps_limit = ((buffer_time - ch->ch_cpstime) * ch->ch_digi.digi_maxcps) / HZ;
1761 } else {
1762 /* no room in the buffer */
1763 cps_limit = 0;
1764 }
1765
1766 bytes_available = min(cps_limit, bytes_available);
1767 }
1768
1769 return bytes_available;
1770 }
1771
1772
1773 /*
1774 * dgnc_tty_write_room()
1775 *
1776 * Return space available in Tx buffer
1777 */
1778 static int dgnc_tty_write_room(struct tty_struct *tty)
1779 {
1780 struct channel_t *ch = NULL;
1781 struct un_t *un = NULL;
1782 ushort head;
1783 ushort tail;
1784 ushort tmask;
1785 int ret = 0;
1786 unsigned long flags;
1787
1788 if (tty == NULL || dgnc_TmpWriteBuf == NULL)
1789 return 0;
1790
1791 un = tty->driver_data;
1792 if (!un || un->magic != DGNC_UNIT_MAGIC)
1793 return 0;
1794
1795 ch = un->un_ch;
1796 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
1797 return 0;
1798
1799 spin_lock_irqsave(&ch->ch_lock, flags);
1800
1801 tmask = WQUEUEMASK;
1802 head = (ch->ch_w_head) & tmask;
1803 tail = (ch->ch_w_tail) & tmask;
1804
1805 ret = tail - head - 1;
1806 if (ret < 0)
1807 ret += WQUEUESIZE;
1808
1809 /* Limit printer to maxcps */
1810 ret = dgnc_maxcps_room(tty, ret);
1811
1812 /*
1813 * If we are printer device, leave space for
1814 * possibly both the on and off strings.
1815 */
1816 if (un->un_type == DGNC_PRINT) {
1817 if (!(ch->ch_flags & CH_PRON))
1818 ret -= ch->ch_digi.digi_onlen;
1819 ret -= ch->ch_digi.digi_offlen;
1820 } else {
1821 if (ch->ch_flags & CH_PRON)
1822 ret -= ch->ch_digi.digi_offlen;
1823 }
1824
1825 if (ret < 0)
1826 ret = 0;
1827
1828 spin_unlock_irqrestore(&ch->ch_lock, flags);
1829
1830 return ret;
1831 }
1832
1833
1834 /*
1835 * dgnc_tty_put_char()
1836 *
1837 * Put a character into ch->ch_buf
1838 *
1839 * - used by the line discipline for OPOST processing
1840 */
1841 static int dgnc_tty_put_char(struct tty_struct *tty, unsigned char c)
1842 {
1843 /*
1844 * Simply call tty_write.
1845 */
1846 dgnc_tty_write(tty, &c, 1);
1847 return 1;
1848 }
1849
1850
1851 /*
1852 * dgnc_tty_write()
1853 *
1854 * Take data from the user or kernel and send it out to the FEP.
1855 * In here exists all the Transparent Print magic as well.
1856 */
1857 static int dgnc_tty_write(struct tty_struct *tty,
1858 const unsigned char *buf, int count)
1859 {
1860 struct channel_t *ch = NULL;
1861 struct un_t *un = NULL;
1862 int bufcount = 0, n = 0;
1863 int orig_count = 0;
1864 unsigned long flags;
1865 ushort head;
1866 ushort tail;
1867 ushort tmask;
1868 uint remain;
1869 int from_user = 0;
1870
1871 if (tty == NULL || dgnc_TmpWriteBuf == NULL)
1872 return 0;
1873
1874 un = tty->driver_data;
1875 if (!un || un->magic != DGNC_UNIT_MAGIC)
1876 return 0;
1877
1878 ch = un->un_ch;
1879 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
1880 return 0;
1881
1882 if (!count)
1883 return 0;
1884
1885 /*
1886 * Store original amount of characters passed in.
1887 * This helps to figure out if we should ask the FEP
1888 * to send us an event when it has more space available.
1889 */
1890 orig_count = count;
1891
1892 spin_lock_irqsave(&ch->ch_lock, flags);
1893
1894 /* Get our space available for the channel from the board */
1895 tmask = WQUEUEMASK;
1896 head = (ch->ch_w_head) & tmask;
1897 tail = (ch->ch_w_tail) & tmask;
1898
1899 bufcount = tail - head - 1;
1900 if (bufcount < 0)
1901 bufcount += WQUEUESIZE;
1902
1903 /*
1904 * Limit printer output to maxcps overall, with bursts allowed
1905 * up to bufsize characters.
1906 */
1907 bufcount = dgnc_maxcps_room(tty, bufcount);
1908
1909 /*
1910 * Take minimum of what the user wants to send, and the
1911 * space available in the FEP buffer.
1912 */
1913 count = min(count, bufcount);
1914
1915 /*
1916 * Bail if no space left.
1917 */
1918 if (count <= 0) {
1919 spin_unlock_irqrestore(&ch->ch_lock, flags);
1920 return 0;
1921 }
1922
1923 /*
1924 * Output the printer ON string, if we are in terminal mode, but
1925 * need to be in printer mode.
1926 */
1927 if ((un->un_type == DGNC_PRINT) && !(ch->ch_flags & CH_PRON)) {
1928 dgnc_wmove(ch, ch->ch_digi.digi_onstr,
1929 (int) ch->ch_digi.digi_onlen);
1930 head = (ch->ch_w_head) & tmask;
1931 ch->ch_flags |= CH_PRON;
1932 }
1933
1934 /*
1935 * On the other hand, output the printer OFF string, if we are
1936 * currently in printer mode, but need to output to the terminal.
1937 */
1938 if ((un->un_type != DGNC_PRINT) && (ch->ch_flags & CH_PRON)) {
1939 dgnc_wmove(ch, ch->ch_digi.digi_offstr,
1940 (int) ch->ch_digi.digi_offlen);
1941 head = (ch->ch_w_head) & tmask;
1942 ch->ch_flags &= ~CH_PRON;
1943 }
1944
1945 /*
1946 * If there is nothing left to copy, or I can't handle any more data, leave.
1947 */
1948 if (count <= 0) {
1949 spin_unlock_irqrestore(&ch->ch_lock, flags);
1950 return 0;
1951 }
1952
1953 if (from_user) {
1954
1955 count = min(count, WRITEBUFLEN);
1956
1957 spin_unlock_irqrestore(&ch->ch_lock, flags);
1958
1959 /*
1960 * If data is coming from user space, copy it into a temporary
1961 * buffer so we don't get swapped out while doing the copy to
1962 * the board.
1963 */
1964 /* we're allowed to block if it's from_user */
1965 if (down_interruptible(&dgnc_TmpWriteSem))
1966 return -EINTR;
1967
1968 /*
1969 * copy_from_user() returns the number
1970 * of bytes that could *NOT* be copied.
1971 */
1972 count -= copy_from_user(dgnc_TmpWriteBuf, (const unsigned char __user *) buf, count);
1973
1974 if (!count) {
1975 up(&dgnc_TmpWriteSem);
1976 return -EFAULT;
1977 }
1978
1979 spin_lock_irqsave(&ch->ch_lock, flags);
1980
1981 buf = dgnc_TmpWriteBuf;
1982
1983 }
1984
1985 n = count;
1986
1987 /*
1988 * If the write wraps over the top of the circular buffer,
1989 * move the portion up to the wrap point, and reset the
1990 * pointers to the bottom.
1991 */
1992 remain = WQUEUESIZE - head;
1993
1994 if (n >= remain) {
1995 n -= remain;
1996 memcpy(ch->ch_wqueue + head, buf, remain);
1997 dgnc_sniff_nowait_nolock(ch, "USER WRITE", ch->ch_wqueue + head, remain);
1998 head = 0;
1999 buf += remain;
2000 }
2001
2002 if (n > 0) {
2003 /*
2004 * Move rest of data.
2005 */
2006 remain = n;
2007 memcpy(ch->ch_wqueue + head, buf, remain);
2008 dgnc_sniff_nowait_nolock(ch, "USER WRITE", ch->ch_wqueue + head, remain);
2009 head += remain;
2010 }
2011
2012 if (count) {
2013 head &= tmask;
2014 ch->ch_w_head = head;
2015 }
2016
2017 /* Update printer buffer empty time. */
2018 if ((un->un_type == DGNC_PRINT) && (ch->ch_digi.digi_maxcps > 0)
2019 && (ch->ch_digi.digi_bufsize > 0)) {
2020 ch->ch_cpstime += (HZ * count) / ch->ch_digi.digi_maxcps;
2021 }
2022
2023 if (from_user) {
2024 spin_unlock_irqrestore(&ch->ch_lock, flags);
2025 up(&dgnc_TmpWriteSem);
2026 } else {
2027 spin_unlock_irqrestore(&ch->ch_lock, flags);
2028 }
2029
2030 if (count) {
2031 /*
2032 * Channel lock is grabbed and then released
2033 * inside this routine.
2034 */
2035 ch->ch_bd->bd_ops->copy_data_from_queue_to_uart(ch);
2036 }
2037
2038 return count;
2039 }
2040
2041
2042 /*
2043 * Return modem signals to ld.
2044 */
2045
2046 static int dgnc_tty_tiocmget(struct tty_struct *tty)
2047 {
2048 struct channel_t *ch;
2049 struct un_t *un;
2050 int result = -EIO;
2051 unsigned char mstat = 0;
2052 unsigned long flags;
2053
2054 if (!tty || tty->magic != TTY_MAGIC)
2055 return result;
2056
2057 un = tty->driver_data;
2058 if (!un || un->magic != DGNC_UNIT_MAGIC)
2059 return result;
2060
2061 ch = un->un_ch;
2062 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
2063 return result;
2064
2065 spin_lock_irqsave(&ch->ch_lock, flags);
2066
2067 mstat = (ch->ch_mostat | ch->ch_mistat);
2068
2069 spin_unlock_irqrestore(&ch->ch_lock, flags);
2070
2071 result = 0;
2072
2073 if (mstat & UART_MCR_DTR)
2074 result |= TIOCM_DTR;
2075 if (mstat & UART_MCR_RTS)
2076 result |= TIOCM_RTS;
2077 if (mstat & UART_MSR_CTS)
2078 result |= TIOCM_CTS;
2079 if (mstat & UART_MSR_DSR)
2080 result |= TIOCM_DSR;
2081 if (mstat & UART_MSR_RI)
2082 result |= TIOCM_RI;
2083 if (mstat & UART_MSR_DCD)
2084 result |= TIOCM_CD;
2085
2086 return result;
2087 }
2088
2089
2090 /*
2091 * dgnc_tty_tiocmset()
2092 *
2093 * Set modem signals, called by ld.
2094 */
2095
2096 static int dgnc_tty_tiocmset(struct tty_struct *tty,
2097 unsigned int set, unsigned int clear)
2098 {
2099 struct dgnc_board *bd;
2100 struct channel_t *ch;
2101 struct un_t *un;
2102 int ret = -EIO;
2103 unsigned long flags;
2104
2105 if (!tty || tty->magic != TTY_MAGIC)
2106 return ret;
2107
2108 un = tty->driver_data;
2109 if (!un || un->magic != DGNC_UNIT_MAGIC)
2110 return ret;
2111
2112 ch = un->un_ch;
2113 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
2114 return ret;
2115
2116 bd = ch->ch_bd;
2117 if (!bd || bd->magic != DGNC_BOARD_MAGIC)
2118 return ret;
2119
2120 spin_lock_irqsave(&ch->ch_lock, flags);
2121
2122 if (set & TIOCM_RTS)
2123 ch->ch_mostat |= UART_MCR_RTS;
2124
2125 if (set & TIOCM_DTR)
2126 ch->ch_mostat |= UART_MCR_DTR;
2127
2128 if (clear & TIOCM_RTS)
2129 ch->ch_mostat &= ~(UART_MCR_RTS);
2130
2131 if (clear & TIOCM_DTR)
2132 ch->ch_mostat &= ~(UART_MCR_DTR);
2133
2134 ch->ch_bd->bd_ops->assert_modem_signals(ch);
2135
2136 spin_unlock_irqrestore(&ch->ch_lock, flags);
2137
2138 return 0;
2139 }
2140
2141
2142 /*
2143 * dgnc_tty_send_break()
2144 *
2145 * Send a Break, called by ld.
2146 */
2147 static int dgnc_tty_send_break(struct tty_struct *tty, int msec)
2148 {
2149 struct dgnc_board *bd;
2150 struct channel_t *ch;
2151 struct un_t *un;
2152 int ret = -EIO;
2153 unsigned long flags;
2154
2155 if (!tty || tty->magic != TTY_MAGIC)
2156 return ret;
2157
2158 un = tty->driver_data;
2159 if (!un || un->magic != DGNC_UNIT_MAGIC)
2160 return ret;
2161
2162 ch = un->un_ch;
2163 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
2164 return ret;
2165
2166 bd = ch->ch_bd;
2167 if (!bd || bd->magic != DGNC_BOARD_MAGIC)
2168 return ret;
2169
2170 switch (msec) {
2171 case -1:
2172 msec = 0xFFFF;
2173 break;
2174 case 0:
2175 msec = 0;
2176 break;
2177 default:
2178 break;
2179 }
2180
2181 spin_lock_irqsave(&ch->ch_lock, flags);
2182
2183 ch->ch_bd->bd_ops->send_break(ch, msec);
2184
2185 spin_unlock_irqrestore(&ch->ch_lock, flags);
2186
2187 return 0;
2188
2189 }
2190
2191
2192 /*
2193 * dgnc_tty_wait_until_sent()
2194 *
2195 * wait until data has been transmitted, called by ld.
2196 */
2197 static void dgnc_tty_wait_until_sent(struct tty_struct *tty, int timeout)
2198 {
2199 struct dgnc_board *bd;
2200 struct channel_t *ch;
2201 struct un_t *un;
2202 int rc;
2203
2204 if (!tty || tty->magic != TTY_MAGIC)
2205 return;
2206
2207 un = tty->driver_data;
2208 if (!un || un->magic != DGNC_UNIT_MAGIC)
2209 return;
2210
2211 ch = un->un_ch;
2212 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
2213 return;
2214
2215 bd = ch->ch_bd;
2216 if (!bd || bd->magic != DGNC_BOARD_MAGIC)
2217 return;
2218
2219 rc = bd->bd_ops->drain(tty, 0);
2220 }
2221
2222
2223 /*
2224 * dgnc_send_xchar()
2225 *
2226 * send a high priority character, called by ld.
2227 */
2228 static void dgnc_tty_send_xchar(struct tty_struct *tty, char c)
2229 {
2230 struct dgnc_board *bd;
2231 struct channel_t *ch;
2232 struct un_t *un;
2233 unsigned long flags;
2234
2235 if (!tty || tty->magic != TTY_MAGIC)
2236 return;
2237
2238 un = tty->driver_data;
2239 if (!un || un->magic != DGNC_UNIT_MAGIC)
2240 return;
2241
2242 ch = un->un_ch;
2243 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
2244 return;
2245
2246 bd = ch->ch_bd;
2247 if (!bd || bd->magic != DGNC_BOARD_MAGIC)
2248 return;
2249
2250 dev_dbg(tty->dev, "dgnc_tty_send_xchar start\n");
2251
2252 spin_lock_irqsave(&ch->ch_lock, flags);
2253 bd->bd_ops->send_immediate_char(ch, c);
2254 spin_unlock_irqrestore(&ch->ch_lock, flags);
2255
2256 dev_dbg(tty->dev, "dgnc_tty_send_xchar finish\n");
2257 }
2258
2259
2260
2261
2262 /*
2263 * Return modem signals to ld.
2264 */
2265 static inline int dgnc_get_mstat(struct channel_t *ch)
2266 {
2267 unsigned char mstat;
2268 int result = -EIO;
2269 unsigned long flags;
2270
2271 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
2272 return -ENXIO;
2273
2274 spin_lock_irqsave(&ch->ch_lock, flags);
2275
2276 mstat = (ch->ch_mostat | ch->ch_mistat);
2277
2278 spin_unlock_irqrestore(&ch->ch_lock, flags);
2279
2280 result = 0;
2281
2282 if (mstat & UART_MCR_DTR)
2283 result |= TIOCM_DTR;
2284 if (mstat & UART_MCR_RTS)
2285 result |= TIOCM_RTS;
2286 if (mstat & UART_MSR_CTS)
2287 result |= TIOCM_CTS;
2288 if (mstat & UART_MSR_DSR)
2289 result |= TIOCM_DSR;
2290 if (mstat & UART_MSR_RI)
2291 result |= TIOCM_RI;
2292 if (mstat & UART_MSR_DCD)
2293 result |= TIOCM_CD;
2294
2295 return result;
2296 }
2297
2298
2299
2300 /*
2301 * Return modem signals to ld.
2302 */
2303 static int dgnc_get_modem_info(struct channel_t *ch, unsigned int __user *value)
2304 {
2305 int result;
2306
2307 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
2308 return -ENXIO;
2309
2310 result = dgnc_get_mstat(ch);
2311
2312 if (result < 0)
2313 return -ENXIO;
2314
2315 return put_user(result, value);
2316 }
2317
2318
2319 /*
2320 * dgnc_set_modem_info()
2321 *
2322 * Set modem signals, called by ld.
2323 */
2324 static int dgnc_set_modem_info(struct tty_struct *tty, unsigned int command, unsigned int __user *value)
2325 {
2326 struct dgnc_board *bd;
2327 struct channel_t *ch;
2328 struct un_t *un;
2329 int ret = -ENXIO;
2330 unsigned int arg = 0;
2331 unsigned long flags;
2332
2333 if (!tty || tty->magic != TTY_MAGIC)
2334 return ret;
2335
2336 un = tty->driver_data;
2337 if (!un || un->magic != DGNC_UNIT_MAGIC)
2338 return ret;
2339
2340 ch = un->un_ch;
2341 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
2342 return ret;
2343
2344 bd = ch->ch_bd;
2345 if (!bd || bd->magic != DGNC_BOARD_MAGIC)
2346 return ret;
2347
2348 ret = 0;
2349
2350 ret = get_user(arg, value);
2351 if (ret)
2352 return ret;
2353
2354 switch (command) {
2355 case TIOCMBIS:
2356 if (arg & TIOCM_RTS)
2357 ch->ch_mostat |= UART_MCR_RTS;
2358
2359 if (arg & TIOCM_DTR)
2360 ch->ch_mostat |= UART_MCR_DTR;
2361
2362 break;
2363
2364 case TIOCMBIC:
2365 if (arg & TIOCM_RTS)
2366 ch->ch_mostat &= ~(UART_MCR_RTS);
2367
2368 if (arg & TIOCM_DTR)
2369 ch->ch_mostat &= ~(UART_MCR_DTR);
2370
2371 break;
2372
2373 case TIOCMSET:
2374
2375 if (arg & TIOCM_RTS)
2376 ch->ch_mostat |= UART_MCR_RTS;
2377 else
2378 ch->ch_mostat &= ~(UART_MCR_RTS);
2379
2380 if (arg & TIOCM_DTR)
2381 ch->ch_mostat |= UART_MCR_DTR;
2382 else
2383 ch->ch_mostat &= ~(UART_MCR_DTR);
2384
2385 break;
2386
2387 default:
2388 return -EINVAL;
2389 }
2390
2391 spin_lock_irqsave(&ch->ch_lock, flags);
2392
2393 ch->ch_bd->bd_ops->assert_modem_signals(ch);
2394
2395 spin_unlock_irqrestore(&ch->ch_lock, flags);
2396
2397 return 0;
2398 }
2399
2400
2401 /*
2402 * dgnc_tty_digigeta()
2403 *
2404 * Ioctl to get the information for ditty.
2405 *
2406 *
2407 *
2408 */
2409 static int dgnc_tty_digigeta(struct tty_struct *tty, struct digi_t __user *retinfo)
2410 {
2411 struct channel_t *ch;
2412 struct un_t *un;
2413 struct digi_t tmp;
2414 unsigned long flags;
2415
2416 if (!retinfo)
2417 return -EFAULT;
2418
2419 if (!tty || tty->magic != TTY_MAGIC)
2420 return -EFAULT;
2421
2422 un = tty->driver_data;
2423 if (!un || un->magic != DGNC_UNIT_MAGIC)
2424 return -EFAULT;
2425
2426 ch = un->un_ch;
2427 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
2428 return -EFAULT;
2429
2430 memset(&tmp, 0, sizeof(tmp));
2431
2432 spin_lock_irqsave(&ch->ch_lock, flags);
2433 memcpy(&tmp, &ch->ch_digi, sizeof(tmp));
2434 spin_unlock_irqrestore(&ch->ch_lock, flags);
2435
2436 if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
2437 return -EFAULT;
2438
2439 return 0;
2440 }
2441
2442
2443 /*
2444 * dgnc_tty_digiseta()
2445 *
2446 * Ioctl to set the information for ditty.
2447 *
2448 *
2449 *
2450 */
2451 static int dgnc_tty_digiseta(struct tty_struct *tty, struct digi_t __user *new_info)
2452 {
2453 struct dgnc_board *bd;
2454 struct channel_t *ch;
2455 struct un_t *un;
2456 struct digi_t new_digi;
2457 unsigned long flags;
2458
2459 if (!tty || tty->magic != TTY_MAGIC)
2460 return -EFAULT;
2461
2462 un = tty->driver_data;
2463 if (!un || un->magic != DGNC_UNIT_MAGIC)
2464 return -EFAULT;
2465
2466 ch = un->un_ch;
2467 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
2468 return -EFAULT;
2469
2470 bd = ch->ch_bd;
2471 if (!bd || bd->magic != DGNC_BOARD_MAGIC)
2472 return -EFAULT;
2473
2474 if (copy_from_user(&new_digi, new_info, sizeof(new_digi)))
2475 return -EFAULT;
2476
2477 spin_lock_irqsave(&ch->ch_lock, flags);
2478
2479 /*
2480 * Handle transistions to and from RTS Toggle.
2481 */
2482 if (!(ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) && (new_digi.digi_flags & DIGI_RTS_TOGGLE))
2483 ch->ch_mostat &= ~(UART_MCR_RTS);
2484 if ((ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) && !(new_digi.digi_flags & DIGI_RTS_TOGGLE))
2485 ch->ch_mostat |= (UART_MCR_RTS);
2486
2487 /*
2488 * Handle transistions to and from DTR Toggle.
2489 */
2490 if (!(ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) && (new_digi.digi_flags & DIGI_DTR_TOGGLE))
2491 ch->ch_mostat &= ~(UART_MCR_DTR);
2492 if ((ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) && !(new_digi.digi_flags & DIGI_DTR_TOGGLE))
2493 ch->ch_mostat |= (UART_MCR_DTR);
2494
2495 memcpy(&ch->ch_digi, &new_digi, sizeof(new_digi));
2496
2497 if (ch->ch_digi.digi_maxcps < 1)
2498 ch->ch_digi.digi_maxcps = 1;
2499
2500 if (ch->ch_digi.digi_maxcps > 10000)
2501 ch->ch_digi.digi_maxcps = 10000;
2502
2503 if (ch->ch_digi.digi_bufsize < 10)
2504 ch->ch_digi.digi_bufsize = 10;
2505
2506 if (ch->ch_digi.digi_maxchar < 1)
2507 ch->ch_digi.digi_maxchar = 1;
2508
2509 if (ch->ch_digi.digi_maxchar > ch->ch_digi.digi_bufsize)
2510 ch->ch_digi.digi_maxchar = ch->ch_digi.digi_bufsize;
2511
2512 if (ch->ch_digi.digi_onlen > DIGI_PLEN)
2513 ch->ch_digi.digi_onlen = DIGI_PLEN;
2514
2515 if (ch->ch_digi.digi_offlen > DIGI_PLEN)
2516 ch->ch_digi.digi_offlen = DIGI_PLEN;
2517
2518 ch->ch_bd->bd_ops->param(tty);
2519
2520 spin_unlock_irqrestore(&ch->ch_lock, flags);
2521
2522 return 0;
2523 }
2524
2525
2526 /*
2527 * dgnc_set_termios()
2528 */
2529 static void dgnc_tty_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
2530 {
2531 struct dgnc_board *bd;
2532 struct channel_t *ch;
2533 struct un_t *un;
2534 unsigned long flags;
2535
2536 if (!tty || tty->magic != TTY_MAGIC)
2537 return;
2538
2539 un = tty->driver_data;
2540 if (!un || un->magic != DGNC_UNIT_MAGIC)
2541 return;
2542
2543 ch = un->un_ch;
2544 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
2545 return;
2546
2547 bd = ch->ch_bd;
2548 if (!bd || bd->magic != DGNC_BOARD_MAGIC)
2549 return;
2550
2551 spin_lock_irqsave(&ch->ch_lock, flags);
2552
2553 ch->ch_c_cflag = tty->termios.c_cflag;
2554 ch->ch_c_iflag = tty->termios.c_iflag;
2555 ch->ch_c_oflag = tty->termios.c_oflag;
2556 ch->ch_c_lflag = tty->termios.c_lflag;
2557 ch->ch_startc = tty->termios.c_cc[VSTART];
2558 ch->ch_stopc = tty->termios.c_cc[VSTOP];
2559
2560 ch->ch_bd->bd_ops->param(tty);
2561 dgnc_carrier(ch);
2562
2563 spin_unlock_irqrestore(&ch->ch_lock, flags);
2564 }
2565
2566
2567 static void dgnc_tty_throttle(struct tty_struct *tty)
2568 {
2569 struct channel_t *ch;
2570 struct un_t *un;
2571 unsigned long flags;
2572
2573 if (!tty || tty->magic != TTY_MAGIC)
2574 return;
2575
2576 un = tty->driver_data;
2577 if (!un || un->magic != DGNC_UNIT_MAGIC)
2578 return;
2579
2580 ch = un->un_ch;
2581 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
2582 return;
2583
2584 spin_lock_irqsave(&ch->ch_lock, flags);
2585
2586 ch->ch_flags |= (CH_FORCED_STOPI);
2587
2588 spin_unlock_irqrestore(&ch->ch_lock, flags);
2589 }
2590
2591
2592 static void dgnc_tty_unthrottle(struct tty_struct *tty)
2593 {
2594 struct channel_t *ch;
2595 struct un_t *un;
2596 unsigned long flags;
2597
2598 if (!tty || tty->magic != TTY_MAGIC)
2599 return;
2600
2601 un = tty->driver_data;
2602 if (!un || un->magic != DGNC_UNIT_MAGIC)
2603 return;
2604
2605 ch = un->un_ch;
2606 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
2607 return;
2608
2609 spin_lock_irqsave(&ch->ch_lock, flags);
2610
2611 ch->ch_flags &= ~(CH_FORCED_STOPI);
2612
2613 spin_unlock_irqrestore(&ch->ch_lock, flags);
2614 }
2615
2616
2617 static void dgnc_tty_start(struct tty_struct *tty)
2618 {
2619 struct dgnc_board *bd;
2620 struct channel_t *ch;
2621 struct un_t *un;
2622 unsigned long flags;
2623
2624 if (!tty || tty->magic != TTY_MAGIC)
2625 return;
2626
2627 un = tty->driver_data;
2628 if (!un || un->magic != DGNC_UNIT_MAGIC)
2629 return;
2630
2631 ch = un->un_ch;
2632 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
2633 return;
2634
2635 bd = ch->ch_bd;
2636 if (!bd || bd->magic != DGNC_BOARD_MAGIC)
2637 return;
2638
2639 spin_lock_irqsave(&ch->ch_lock, flags);
2640
2641 ch->ch_flags &= ~(CH_FORCED_STOP);
2642
2643 spin_unlock_irqrestore(&ch->ch_lock, flags);
2644 }
2645
2646
2647 static void dgnc_tty_stop(struct tty_struct *tty)
2648 {
2649 struct dgnc_board *bd;
2650 struct channel_t *ch;
2651 struct un_t *un;
2652 unsigned long flags;
2653
2654 if (!tty || tty->magic != TTY_MAGIC)
2655 return;
2656
2657 un = tty->driver_data;
2658 if (!un || un->magic != DGNC_UNIT_MAGIC)
2659 return;
2660
2661 ch = un->un_ch;
2662 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
2663 return;
2664
2665 bd = ch->ch_bd;
2666 if (!bd || bd->magic != DGNC_BOARD_MAGIC)
2667 return;
2668
2669 spin_lock_irqsave(&ch->ch_lock, flags);
2670
2671 ch->ch_flags |= (CH_FORCED_STOP);
2672
2673 spin_unlock_irqrestore(&ch->ch_lock, flags);
2674 }
2675
2676
2677 /*
2678 * dgnc_tty_flush_chars()
2679 *
2680 * Flush the cook buffer
2681 *
2682 * Note to self, and any other poor souls who venture here:
2683 *
2684 * flush in this case DOES NOT mean dispose of the data.
2685 * instead, it means "stop buffering and send it if you
2686 * haven't already." Just guess how I figured that out... SRW 2-Jun-98
2687 *
2688 * It is also always called in interrupt context - JAR 8-Sept-99
2689 */
2690 static void dgnc_tty_flush_chars(struct tty_struct *tty)
2691 {
2692 struct dgnc_board *bd;
2693 struct channel_t *ch;
2694 struct un_t *un;
2695 unsigned long flags;
2696
2697 if (!tty || tty->magic != TTY_MAGIC)
2698 return;
2699
2700 un = tty->driver_data;
2701 if (!un || un->magic != DGNC_UNIT_MAGIC)
2702 return;
2703
2704 ch = un->un_ch;
2705 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
2706 return;
2707
2708 bd = ch->ch_bd;
2709 if (!bd || bd->magic != DGNC_BOARD_MAGIC)
2710 return;
2711
2712 spin_lock_irqsave(&ch->ch_lock, flags);
2713
2714 /* Do something maybe here */
2715
2716 spin_unlock_irqrestore(&ch->ch_lock, flags);
2717 }
2718
2719
2720
2721 /*
2722 * dgnc_tty_flush_buffer()
2723 *
2724 * Flush Tx buffer (make in == out)
2725 */
2726 static void dgnc_tty_flush_buffer(struct tty_struct *tty)
2727 {
2728 struct channel_t *ch;
2729 struct un_t *un;
2730 unsigned long flags;
2731
2732 if (!tty || tty->magic != TTY_MAGIC)
2733 return;
2734
2735 un = tty->driver_data;
2736 if (!un || un->magic != DGNC_UNIT_MAGIC)
2737 return;
2738
2739 ch = un->un_ch;
2740 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
2741 return;
2742
2743 spin_lock_irqsave(&ch->ch_lock, flags);
2744
2745 ch->ch_flags &= ~CH_STOP;
2746
2747 /* Flush our write queue */
2748 ch->ch_w_head = ch->ch_w_tail;
2749
2750 /* Flush UARTs transmit FIFO */
2751 ch->ch_bd->bd_ops->flush_uart_write(ch);
2752
2753 if (ch->ch_tun.un_flags & (UN_LOW|UN_EMPTY)) {
2754 ch->ch_tun.un_flags &= ~(UN_LOW|UN_EMPTY);
2755 wake_up_interruptible(&ch->ch_tun.un_flags_wait);
2756 }
2757 if (ch->ch_pun.un_flags & (UN_LOW|UN_EMPTY)) {
2758 ch->ch_pun.un_flags &= ~(UN_LOW|UN_EMPTY);
2759 wake_up_interruptible(&ch->ch_pun.un_flags_wait);
2760 }
2761
2762 spin_unlock_irqrestore(&ch->ch_lock, flags);
2763 }
2764
2765
2766
2767 /*****************************************************************************
2768 *
2769 * The IOCTL function and all of its helpers
2770 *
2771 *****************************************************************************/
2772
2773 /*
2774 * dgnc_tty_ioctl()
2775 *
2776 * The usual assortment of ioctl's
2777 */
2778 static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
2779 unsigned long arg)
2780 {
2781 struct dgnc_board *bd;
2782 struct channel_t *ch;
2783 struct un_t *un;
2784 int rc;
2785 unsigned long flags;
2786 void __user *uarg = (void __user *) arg;
2787
2788 if (!tty || tty->magic != TTY_MAGIC)
2789 return -ENODEV;
2790
2791 un = tty->driver_data;
2792 if (!un || un->magic != DGNC_UNIT_MAGIC)
2793 return -ENODEV;
2794
2795 ch = un->un_ch;
2796 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
2797 return -ENODEV;
2798
2799 bd = ch->ch_bd;
2800 if (!bd || bd->magic != DGNC_BOARD_MAGIC)
2801 return -ENODEV;
2802
2803 spin_lock_irqsave(&ch->ch_lock, flags);
2804
2805 if (un->un_open_count <= 0) {
2806 spin_unlock_irqrestore(&ch->ch_lock, flags);
2807 return -EIO;
2808 }
2809
2810 switch (cmd) {
2811
2812 /* Here are all the standard ioctl's that we MUST implement */
2813
2814 case TCSBRK:
2815 /*
2816 * TCSBRK is SVID version: non-zero arg --> no break
2817 * this behaviour is exploited by tcdrain().
2818 *
2819 * According to POSIX.1 spec (7.2.2.1.2) breaks should be
2820 * between 0.25 and 0.5 seconds so we'll ask for something
2821 * in the middle: 0.375 seconds.
2822 */
2823 rc = tty_check_change(tty);
2824 spin_unlock_irqrestore(&ch->ch_lock, flags);
2825 if (rc)
2826 return rc;
2827
2828 rc = ch->ch_bd->bd_ops->drain(tty, 0);
2829
2830 if (rc)
2831 return -EINTR;
2832
2833 spin_lock_irqsave(&ch->ch_lock, flags);
2834
2835 if (((cmd == TCSBRK) && (!arg)) || (cmd == TCSBRKP))
2836 ch->ch_bd->bd_ops->send_break(ch, 250);
2837
2838 spin_unlock_irqrestore(&ch->ch_lock, flags);
2839
2840 return 0;
2841
2842
2843 case TCSBRKP:
2844 /* support for POSIX tcsendbreak()
2845 * According to POSIX.1 spec (7.2.2.1.2) breaks should be
2846 * between 0.25 and 0.5 seconds so we'll ask for something
2847 * in the middle: 0.375 seconds.
2848 */
2849 rc = tty_check_change(tty);
2850 spin_unlock_irqrestore(&ch->ch_lock, flags);
2851 if (rc)
2852 return rc;
2853
2854 rc = ch->ch_bd->bd_ops->drain(tty, 0);
2855 if (rc)
2856 return -EINTR;
2857
2858 spin_lock_irqsave(&ch->ch_lock, flags);
2859
2860 ch->ch_bd->bd_ops->send_break(ch, 250);
2861
2862 spin_unlock_irqrestore(&ch->ch_lock, flags);
2863
2864 return 0;
2865
2866 case TIOCSBRK:
2867 rc = tty_check_change(tty);
2868 spin_unlock_irqrestore(&ch->ch_lock, flags);
2869 if (rc)
2870 return rc;
2871
2872 rc = ch->ch_bd->bd_ops->drain(tty, 0);
2873 if (rc)
2874 return -EINTR;
2875
2876 spin_lock_irqsave(&ch->ch_lock, flags);
2877
2878 ch->ch_bd->bd_ops->send_break(ch, 250);
2879
2880 spin_unlock_irqrestore(&ch->ch_lock, flags);
2881
2882 return 0;
2883
2884 case TIOCCBRK:
2885 /* Do Nothing */
2886 spin_unlock_irqrestore(&ch->ch_lock, flags);
2887 return 0;
2888
2889 case TIOCGSOFTCAR:
2890
2891 spin_unlock_irqrestore(&ch->ch_lock, flags);
2892
2893 rc = put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned long __user *) arg);
2894 return rc;
2895
2896 case TIOCSSOFTCAR:
2897
2898 spin_unlock_irqrestore(&ch->ch_lock, flags);
2899 rc = get_user(arg, (unsigned long __user *) arg);
2900 if (rc)
2901 return rc;
2902
2903 spin_lock_irqsave(&ch->ch_lock, flags);
2904 tty->termios.c_cflag = ((tty->termios.c_cflag & ~CLOCAL) | (arg ? CLOCAL : 0));
2905 ch->ch_bd->bd_ops->param(tty);
2906 spin_unlock_irqrestore(&ch->ch_lock, flags);
2907
2908 return 0;
2909
2910 case TIOCMGET:
2911 spin_unlock_irqrestore(&ch->ch_lock, flags);
2912 return dgnc_get_modem_info(ch, uarg);
2913
2914 case TIOCMBIS:
2915 case TIOCMBIC:
2916 case TIOCMSET:
2917 spin_unlock_irqrestore(&ch->ch_lock, flags);
2918 return dgnc_set_modem_info(tty, cmd, uarg);
2919
2920 /*
2921 * Here are any additional ioctl's that we want to implement
2922 */
2923
2924 case TCFLSH:
2925 /*
2926 * The linux tty driver doesn't have a flush
2927 * input routine for the driver, assuming all backed
2928 * up data is in the line disc. buffers. However,
2929 * we all know that's not the case. Here, we
2930 * act on the ioctl, but then lie and say we didn't
2931 * so the line discipline will process the flush
2932 * also.
2933 */
2934 rc = tty_check_change(tty);
2935 if (rc) {
2936 spin_unlock_irqrestore(&ch->ch_lock, flags);
2937 return rc;
2938 }
2939
2940 if ((arg == TCIFLUSH) || (arg == TCIOFLUSH)) {
2941 ch->ch_r_head = ch->ch_r_tail;
2942 ch->ch_bd->bd_ops->flush_uart_read(ch);
2943 /* Force queue flow control to be released, if needed */
2944 dgnc_check_queue_flow_control(ch);
2945 }
2946
2947 if ((arg == TCOFLUSH) || (arg == TCIOFLUSH)) {
2948 if (!(un->un_type == DGNC_PRINT)) {
2949 ch->ch_w_head = ch->ch_w_tail;
2950 ch->ch_bd->bd_ops->flush_uart_write(ch);
2951
2952 if (ch->ch_tun.un_flags & (UN_LOW|UN_EMPTY)) {
2953 ch->ch_tun.un_flags &= ~(UN_LOW|UN_EMPTY);
2954 wake_up_interruptible(&ch->ch_tun.un_flags_wait);
2955 }
2956
2957 if (ch->ch_pun.un_flags & (UN_LOW|UN_EMPTY)) {
2958 ch->ch_pun.un_flags &= ~(UN_LOW|UN_EMPTY);
2959 wake_up_interruptible(&ch->ch_pun.un_flags_wait);
2960 }
2961
2962 }
2963 }
2964
2965 /* pretend we didn't recognize this IOCTL */
2966 spin_unlock_irqrestore(&ch->ch_lock, flags);
2967 return -ENOIOCTLCMD;
2968 case TCSETSF:
2969 case TCSETSW:
2970 /*
2971 * The linux tty driver doesn't have a flush
2972 * input routine for the driver, assuming all backed
2973 * up data is in the line disc. buffers. However,
2974 * we all know that's not the case. Here, we
2975 * act on the ioctl, but then lie and say we didn't
2976 * so the line discipline will process the flush
2977 * also.
2978 */
2979 if (cmd == TCSETSF) {
2980 /* flush rx */
2981 ch->ch_flags &= ~CH_STOP;
2982 ch->ch_r_head = ch->ch_r_tail;
2983 ch->ch_bd->bd_ops->flush_uart_read(ch);
2984 /* Force queue flow control to be released, if needed */
2985 dgnc_check_queue_flow_control(ch);
2986 }
2987
2988 /* now wait for all the output to drain */
2989 spin_unlock_irqrestore(&ch->ch_lock, flags);
2990 rc = ch->ch_bd->bd_ops->drain(tty, 0);
2991 if (rc)
2992 return -EINTR;
2993
2994 /* pretend we didn't recognize this */
2995 return -ENOIOCTLCMD;
2996
2997 case TCSETAW:
2998
2999 spin_unlock_irqrestore(&ch->ch_lock, flags);
3000 rc = ch->ch_bd->bd_ops->drain(tty, 0);
3001 if (rc)
3002 return -EINTR;
3003
3004 /* pretend we didn't recognize this */
3005 return -ENOIOCTLCMD;
3006
3007 case TCXONC:
3008 spin_unlock_irqrestore(&ch->ch_lock, flags);
3009 /* Make the ld do it */
3010 return -ENOIOCTLCMD;
3011
3012 case DIGI_GETA:
3013 /* get information for ditty */
3014 spin_unlock_irqrestore(&ch->ch_lock, flags);
3015 return dgnc_tty_digigeta(tty, uarg);
3016
3017 case DIGI_SETAW:
3018 case DIGI_SETAF:
3019
3020 /* set information for ditty */
3021 if (cmd == (DIGI_SETAW)) {
3022
3023 spin_unlock_irqrestore(&ch->ch_lock, flags);
3024 rc = ch->ch_bd->bd_ops->drain(tty, 0);
3025
3026 if (rc)
3027 return -EINTR;
3028
3029 spin_lock_irqsave(&ch->ch_lock, flags);
3030 } else {
3031 tty_ldisc_flush(tty);
3032 }
3033 /* fall thru */
3034
3035 case DIGI_SETA:
3036 spin_unlock_irqrestore(&ch->ch_lock, flags);
3037 return dgnc_tty_digiseta(tty, uarg);
3038
3039 case DIGI_LOOPBACK:
3040 {
3041 uint loopback = 0;
3042 /* Let go of locks when accessing user space, could sleep */
3043 spin_unlock_irqrestore(&ch->ch_lock, flags);
3044 rc = get_user(loopback, (unsigned int __user *) arg);
3045 if (rc)
3046 return rc;
3047 spin_lock_irqsave(&ch->ch_lock, flags);
3048
3049 /* Enable/disable internal loopback for this port */
3050 if (loopback)
3051 ch->ch_flags |= CH_LOOPBACK;
3052 else
3053 ch->ch_flags &= ~(CH_LOOPBACK);
3054
3055 ch->ch_bd->bd_ops->param(tty);
3056 spin_unlock_irqrestore(&ch->ch_lock, flags);
3057 return 0;
3058 }
3059
3060 case DIGI_GETCUSTOMBAUD:
3061 spin_unlock_irqrestore(&ch->ch_lock, flags);
3062 rc = put_user(ch->ch_custom_speed, (unsigned int __user *) arg);
3063 return rc;
3064
3065 case DIGI_SETCUSTOMBAUD:
3066 {
3067 int new_rate;
3068 /* Let go of locks when accessing user space, could sleep */
3069 spin_unlock_irqrestore(&ch->ch_lock, flags);
3070 rc = get_user(new_rate, (int __user *) arg);
3071 if (rc)
3072 return rc;
3073 spin_lock_irqsave(&ch->ch_lock, flags);
3074 dgnc_set_custom_speed(ch, new_rate);
3075 ch->ch_bd->bd_ops->param(tty);
3076 spin_unlock_irqrestore(&ch->ch_lock, flags);
3077 return 0;
3078 }
3079
3080 /*
3081 * This ioctl allows insertion of a character into the front
3082 * of any pending data to be transmitted.
3083 *
3084 * This ioctl is to satify the "Send Character Immediate"
3085 * call that the RealPort protocol spec requires.
3086 */
3087 case DIGI_REALPORT_SENDIMMEDIATE:
3088 {
3089 unsigned char c;
3090
3091 spin_unlock_irqrestore(&ch->ch_lock, flags);
3092 rc = get_user(c, (unsigned char __user *) arg);
3093 if (rc)
3094 return rc;
3095 spin_lock_irqsave(&ch->ch_lock, flags);
3096 ch->ch_bd->bd_ops->send_immediate_char(ch, c);
3097 spin_unlock_irqrestore(&ch->ch_lock, flags);
3098 return 0;
3099 }
3100
3101 /*
3102 * This ioctl returns all the current counts for the port.
3103 *
3104 * This ioctl is to satify the "Line Error Counters"
3105 * call that the RealPort protocol spec requires.
3106 */
3107 case DIGI_REALPORT_GETCOUNTERS:
3108 {
3109 struct digi_getcounter buf;
3110
3111 buf.norun = ch->ch_err_overrun;
3112 buf.noflow = 0; /* The driver doesn't keep this stat */
3113 buf.nframe = ch->ch_err_frame;
3114 buf.nparity = ch->ch_err_parity;
3115 buf.nbreak = ch->ch_err_break;
3116 buf.rbytes = ch->ch_rxcount;
3117 buf.tbytes = ch->ch_txcount;
3118
3119 spin_unlock_irqrestore(&ch->ch_lock, flags);
3120
3121 if (copy_to_user(uarg, &buf, sizeof(buf)))
3122 return -EFAULT;
3123
3124 return 0;
3125 }
3126
3127 /*
3128 * This ioctl returns all current events.
3129 *
3130 * This ioctl is to satify the "Event Reporting"
3131 * call that the RealPort protocol spec requires.
3132 */
3133 case DIGI_REALPORT_GETEVENTS:
3134 {
3135 unsigned int events = 0;
3136
3137 /* NOTE: MORE EVENTS NEEDS TO BE ADDED HERE */
3138 if (ch->ch_flags & CH_BREAK_SENDING)
3139 events |= EV_TXB;
3140 if ((ch->ch_flags & CH_STOP) || (ch->ch_flags & CH_FORCED_STOP))
3141 events |= (EV_OPU | EV_OPS);
3142
3143 if ((ch->ch_flags & CH_STOPI) || (ch->ch_flags & CH_FORCED_STOPI))
3144 events |= (EV_IPU | EV_IPS);
3145
3146 spin_unlock_irqrestore(&ch->ch_lock, flags);
3147 rc = put_user(events, (unsigned int __user *) arg);
3148 return rc;
3149 }
3150
3151 /*
3152 * This ioctl returns TOUT and TIN counters based
3153 * upon the values passed in by the RealPort Server.
3154 * It also passes back whether the UART Transmitter is
3155 * empty as well.
3156 */
3157 case DIGI_REALPORT_GETBUFFERS:
3158 {
3159 struct digi_getbuffer buf;
3160 int tdist;
3161 int count;
3162
3163 spin_unlock_irqrestore(&ch->ch_lock, flags);
3164
3165 /*
3166 * Get data from user first.
3167 */
3168 if (copy_from_user(&buf, uarg, sizeof(buf)))
3169 return -EFAULT;
3170
3171 spin_lock_irqsave(&ch->ch_lock, flags);
3172
3173 /*
3174 * Figure out how much data is in our RX and TX queues.
3175 */
3176 buf.rxbuf = (ch->ch_r_head - ch->ch_r_tail) & RQUEUEMASK;
3177 buf.txbuf = (ch->ch_w_head - ch->ch_w_tail) & WQUEUEMASK;
3178
3179 /*
3180 * Is the UART empty? Add that value to whats in our TX queue.
3181 */
3182 count = buf.txbuf + ch->ch_bd->bd_ops->get_uart_bytes_left(ch);
3183
3184 /*
3185 * Figure out how much data the RealPort Server believes should
3186 * be in our TX queue.
3187 */
3188 tdist = (buf.tIn - buf.tOut) & 0xffff;
3189
3190 /*
3191 * If we have more data than the RealPort Server believes we
3192 * should have, reduce our count to its amount.
3193 *
3194 * This count difference CAN happen because the Linux LD can
3195 * insert more characters into our queue for OPOST processing
3196 * that the RealPort Server doesn't know about.
3197 */
3198 if (buf.txbuf > tdist)
3199 buf.txbuf = tdist;
3200
3201 /*
3202 * Report whether our queue and UART TX are completely empty.
3203 */
3204 if (count)
3205 buf.txdone = 0;
3206 else
3207 buf.txdone = 1;
3208
3209 spin_unlock_irqrestore(&ch->ch_lock, flags);
3210
3211 if (copy_to_user(uarg, &buf, sizeof(buf)))
3212 return -EFAULT;
3213
3214 return 0;
3215 }
3216 default:
3217 spin_unlock_irqrestore(&ch->ch_lock, flags);
3218
3219 return -ENOIOCTLCMD;
3220 }
3221 }
3222
3223
3224
3225
3226
3227 /* LDV_COMMENT_BEGIN_MAIN */
3228 #ifdef LDV_MAIN4_sequence_infinite_withcheck_stateful
3229
3230 /*###########################################################################*/
3231
3232 /*############## Driver Environment Generator 0.2 output ####################*/
3233
3234 /*###########################################################################*/
3235
3236
3237
3238 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */
3239 void ldv_check_final_state(void);
3240
3241 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */
3242 void ldv_check_return_value(int res);
3243
3244 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */
3245 void ldv_check_return_value_probe(int res);
3246
3247 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */
3248 void ldv_initialize(void);
3249
3250 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */
3251 void ldv_handler_precall(void);
3252
3253 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */
3254 int nondet_int(void);
3255
3256 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */
3257 int LDV_IN_INTERRUPT;
3258
3259 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */
3260 void ldv_main4_sequence_infinite_withcheck_stateful(void) {
3261
3262
3263
3264 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */
3265 /*============================= VARIABLE DECLARATION PART =============================*/
3266 /** STRUCT: struct type: tty_operations, struct name: dgnc_tty_ops **/
3267 /* content: static int dgnc_tty_open(struct tty_struct *tty, struct file *file)*/
3268 /* LDV_COMMENT_BEGIN_PREP */
3269 #define init_MUTEX(sem) sema_init(sem, 1)
3270 #define DECLARE_MUTEX(name) \
3271 struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
3272 #define TMPBUFLEN (1024)
3273 #ifdef TTY_DONT_FLIP
3274 #endif
3275 /* LDV_COMMENT_END_PREP */
3276 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "dgnc_tty_open" */
3277 struct tty_struct * var_group1;
3278 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "dgnc_tty_open" */
3279 struct file * var_group2;
3280 /* LDV_COMMENT_VAR_DECLARE Variable declaration for test return result from function call "dgnc_tty_open" */
3281 static int res_dgnc_tty_open_12;
3282 /* content: static void dgnc_tty_close(struct tty_struct *tty, struct file *file)*/
3283 /* LDV_COMMENT_BEGIN_PREP */
3284 #define init_MUTEX(sem) sema_init(sem, 1)
3285 #define DECLARE_MUTEX(name) \
3286 struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
3287 #define TMPBUFLEN (1024)
3288 #ifdef TTY_DONT_FLIP
3289 #endif
3290 /* LDV_COMMENT_END_PREP */
3291 /* content: static int dgnc_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)*/
3292 /* LDV_COMMENT_BEGIN_PREP */
3293 #define init_MUTEX(sem) sema_init(sem, 1)
3294 #define DECLARE_MUTEX(name) \
3295 struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
3296 #define TMPBUFLEN (1024)
3297 #ifdef TTY_DONT_FLIP
3298 #endif
3299 /* LDV_COMMENT_END_PREP */
3300 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "dgnc_tty_write" */
3301 const unsigned char * var_dgnc_tty_write_20_p1;
3302 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "dgnc_tty_write" */
3303 int var_dgnc_tty_write_20_p2;
3304 /* content: static int dgnc_tty_write_room(struct tty_struct *tty)*/
3305 /* LDV_COMMENT_BEGIN_PREP */
3306 #define init_MUTEX(sem) sema_init(sem, 1)
3307 #define DECLARE_MUTEX(name) \
3308 struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
3309 #define TMPBUFLEN (1024)
3310 #ifdef TTY_DONT_FLIP
3311 #endif
3312 /* LDV_COMMENT_END_PREP */
3313 /* content: static void dgnc_tty_flush_buffer(struct tty_struct *tty)*/
3314 /* LDV_COMMENT_BEGIN_PREP */
3315 #define init_MUTEX(sem) sema_init(sem, 1)
3316 #define DECLARE_MUTEX(name) \
3317 struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
3318 #define TMPBUFLEN (1024)
3319 #ifdef TTY_DONT_FLIP
3320 #endif
3321 /* LDV_COMMENT_END_PREP */
3322 /* content: static int dgnc_tty_chars_in_buffer(struct tty_struct *tty)*/
3323 /* LDV_COMMENT_BEGIN_PREP */
3324 #define init_MUTEX(sem) sema_init(sem, 1)
3325 #define DECLARE_MUTEX(name) \
3326 struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
3327 #define TMPBUFLEN (1024)
3328 #ifdef TTY_DONT_FLIP
3329 #endif
3330 /* LDV_COMMENT_END_PREP */
3331 /* content: static void dgnc_tty_flush_chars(struct tty_struct *tty)*/
3332 /* LDV_COMMENT_BEGIN_PREP */
3333 #define init_MUTEX(sem) sema_init(sem, 1)
3334 #define DECLARE_MUTEX(name) \
3335 struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
3336 #define TMPBUFLEN (1024)
3337 #ifdef TTY_DONT_FLIP
3338 #endif
3339 /* LDV_COMMENT_END_PREP */
3340 /* content: static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)*/
3341 /* LDV_COMMENT_BEGIN_PREP */
3342 #define init_MUTEX(sem) sema_init(sem, 1)
3343 #define DECLARE_MUTEX(name) \
3344 struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
3345 #define TMPBUFLEN (1024)
3346 #ifdef TTY_DONT_FLIP
3347 #endif
3348 /* LDV_COMMENT_END_PREP */
3349 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "dgnc_tty_ioctl" */
3350 unsigned int var_dgnc_tty_ioctl_38_p1;
3351 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "dgnc_tty_ioctl" */
3352 unsigned long var_dgnc_tty_ioctl_38_p2;
3353 /* content: static void dgnc_tty_set_termios(struct tty_struct *tty, struct ktermios *old_termios)*/
3354 /* LDV_COMMENT_BEGIN_PREP */
3355 #define init_MUTEX(sem) sema_init(sem, 1)
3356 #define DECLARE_MUTEX(name) \
3357 struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
3358 #define TMPBUFLEN (1024)
3359 #ifdef TTY_DONT_FLIP
3360 #endif
3361 /* LDV_COMMENT_END_PREP */
3362 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "dgnc_tty_set_termios" */
3363 struct ktermios * var_group3;
3364 /* content: static void dgnc_tty_stop(struct tty_struct *tty)*/
3365 /* LDV_COMMENT_BEGIN_PREP */
3366 #define init_MUTEX(sem) sema_init(sem, 1)
3367 #define DECLARE_MUTEX(name) \
3368 struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
3369 #define TMPBUFLEN (1024)
3370 #ifdef TTY_DONT_FLIP
3371 #endif
3372 /* LDV_COMMENT_END_PREP */
3373 /* content: static void dgnc_tty_start(struct tty_struct *tty)*/
3374 /* LDV_COMMENT_BEGIN_PREP */
3375 #define init_MUTEX(sem) sema_init(sem, 1)
3376 #define DECLARE_MUTEX(name) \
3377 struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
3378 #define TMPBUFLEN (1024)
3379 #ifdef TTY_DONT_FLIP
3380 #endif
3381 /* LDV_COMMENT_END_PREP */
3382 /* content: static void dgnc_tty_throttle(struct tty_struct *tty)*/
3383 /* LDV_COMMENT_BEGIN_PREP */
3384 #define init_MUTEX(sem) sema_init(sem, 1)
3385 #define DECLARE_MUTEX(name) \
3386 struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
3387 #define TMPBUFLEN (1024)
3388 #ifdef TTY_DONT_FLIP
3389 #endif
3390 /* LDV_COMMENT_END_PREP */
3391 /* content: static void dgnc_tty_unthrottle(struct tty_struct *tty)*/
3392 /* LDV_COMMENT_BEGIN_PREP */
3393 #define init_MUTEX(sem) sema_init(sem, 1)
3394 #define DECLARE_MUTEX(name) \
3395 struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
3396 #define TMPBUFLEN (1024)
3397 #ifdef TTY_DONT_FLIP
3398 #endif
3399 /* LDV_COMMENT_END_PREP */
3400 /* content: static void dgnc_tty_hangup(struct tty_struct *tty)*/
3401 /* LDV_COMMENT_BEGIN_PREP */
3402 #define init_MUTEX(sem) sema_init(sem, 1)
3403 #define DECLARE_MUTEX(name) \
3404 struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
3405 #define TMPBUFLEN (1024)
3406 #ifdef TTY_DONT_FLIP
3407 #endif
3408 /* LDV_COMMENT_END_PREP */
3409 /* content: static int dgnc_tty_put_char(struct tty_struct *tty, unsigned char c)*/
3410 /* LDV_COMMENT_BEGIN_PREP */
3411 #define init_MUTEX(sem) sema_init(sem, 1)
3412 #define DECLARE_MUTEX(name) \
3413 struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
3414 #define TMPBUFLEN (1024)
3415 #ifdef TTY_DONT_FLIP
3416 #endif
3417 /* LDV_COMMENT_END_PREP */
3418 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "dgnc_tty_put_char" */
3419 unsigned char var_dgnc_tty_put_char_19_p1;
3420 /* content: static int dgnc_tty_tiocmget(struct tty_struct *tty)*/
3421 /* LDV_COMMENT_BEGIN_PREP */
3422 #define init_MUTEX(sem) sema_init(sem, 1)
3423 #define DECLARE_MUTEX(name) \
3424 struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
3425 #define TMPBUFLEN (1024)
3426 #ifdef TTY_DONT_FLIP
3427 #endif
3428 /* LDV_COMMENT_END_PREP */
3429 /* content: static int dgnc_tty_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear)*/
3430 /* LDV_COMMENT_BEGIN_PREP */
3431 #define init_MUTEX(sem) sema_init(sem, 1)
3432 #define DECLARE_MUTEX(name) \
3433 struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
3434 #define TMPBUFLEN (1024)
3435 #ifdef TTY_DONT_FLIP
3436 #endif
3437 /* LDV_COMMENT_END_PREP */
3438 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "dgnc_tty_tiocmset" */
3439 unsigned int var_dgnc_tty_tiocmset_22_p1;
3440 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "dgnc_tty_tiocmset" */
3441 unsigned int var_dgnc_tty_tiocmset_22_p2;
3442 /* content: static int dgnc_tty_send_break(struct tty_struct *tty, int msec)*/
3443 /* LDV_COMMENT_BEGIN_PREP */
3444 #define init_MUTEX(sem) sema_init(sem, 1)
3445 #define DECLARE_MUTEX(name) \
3446 struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
3447 #define TMPBUFLEN (1024)
3448 #ifdef TTY_DONT_FLIP
3449 #endif
3450 /* LDV_COMMENT_END_PREP */
3451 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "dgnc_tty_send_break" */
3452 int var_dgnc_tty_send_break_23_p1;
3453 /* content: static void dgnc_tty_wait_until_sent(struct tty_struct *tty, int timeout)*/
3454 /* LDV_COMMENT_BEGIN_PREP */
3455 #define init_MUTEX(sem) sema_init(sem, 1)
3456 #define DECLARE_MUTEX(name) \
3457 struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
3458 #define TMPBUFLEN (1024)
3459 #ifdef TTY_DONT_FLIP
3460 #endif
3461 /* LDV_COMMENT_END_PREP */
3462 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "dgnc_tty_wait_until_sent" */
3463 int var_dgnc_tty_wait_until_sent_24_p1;
3464 /* content: static void dgnc_tty_send_xchar(struct tty_struct *tty, char c)*/
3465 /* LDV_COMMENT_BEGIN_PREP */
3466 #define init_MUTEX(sem) sema_init(sem, 1)
3467 #define DECLARE_MUTEX(name) \
3468 struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
3469 #define TMPBUFLEN (1024)
3470 #ifdef TTY_DONT_FLIP
3471 #endif
3472 /* LDV_COMMENT_END_PREP */
3473 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "dgnc_tty_send_xchar" */
3474 char var_dgnc_tty_send_xchar_25_p1;
3475
3476
3477
3478
3479 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */
3480 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */
3481 /*============================= VARIABLE INITIALIZING PART =============================*/
3482 LDV_IN_INTERRUPT=1;
3483
3484
3485
3486
3487 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */
3488 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */
3489 /*============================= FUNCTION CALL SECTION =============================*/
3490 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */
3491 ldv_initialize();
3492 int ldv_s_dgnc_tty_ops_tty_operations = 0;
3493
3494
3495
3496 while( nondet_int()
3497 || !(ldv_s_dgnc_tty_ops_tty_operations == 0)
3498 ) {
3499
3500 switch(nondet_int()) {
3501
3502 case 0: {
3503
3504 /** STRUCT: struct type: tty_operations, struct name: dgnc_tty_ops **/
3505 if(ldv_s_dgnc_tty_ops_tty_operations==0) {
3506
3507 /* content: static int dgnc_tty_open(struct tty_struct *tty, struct file *file)*/
3508 /* LDV_COMMENT_BEGIN_PREP */
3509 #define init_MUTEX(sem) sema_init(sem, 1)
3510 #define DECLARE_MUTEX(name) \
3511 struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
3512 #define TMPBUFLEN (1024)
3513 #ifdef TTY_DONT_FLIP
3514 #endif
3515 /* LDV_COMMENT_END_PREP */
3516 /* LDV_COMMENT_FUNCTION_CALL Function from field "open" from driver structure with callbacks "dgnc_tty_ops". Standart function test for correct return result. */
3517 ldv_handler_precall();
3518 res_dgnc_tty_open_12 = dgnc_tty_open( var_group1, var_group2);
3519 ldv_check_return_value(res_dgnc_tty_open_12);
3520 if(res_dgnc_tty_open_12)
3521 goto ldv_module_exit;
3522 ldv_s_dgnc_tty_ops_tty_operations++;
3523
3524 }
3525
3526 }
3527
3528 break;
3529 case 1: {
3530
3531 /** STRUCT: struct type: tty_operations, struct name: dgnc_tty_ops **/
3532 if(ldv_s_dgnc_tty_ops_tty_operations==1) {
3533
3534 /* content: static void dgnc_tty_close(struct tty_struct *tty, struct file *file)*/
3535 /* LDV_COMMENT_BEGIN_PREP */
3536 #define init_MUTEX(sem) sema_init(sem, 1)
3537 #define DECLARE_MUTEX(name) \
3538 struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
3539 #define TMPBUFLEN (1024)
3540 #ifdef TTY_DONT_FLIP
3541 #endif
3542 /* LDV_COMMENT_END_PREP */
3543 /* LDV_COMMENT_FUNCTION_CALL Function from field "close" from driver structure with callbacks "dgnc_tty_ops" */
3544 ldv_handler_precall();
3545 dgnc_tty_close( var_group1, var_group2);
3546 ldv_s_dgnc_tty_ops_tty_operations=0;
3547
3548 }
3549
3550 }
3551
3552 break;
3553 case 2: {
3554
3555 /** STRUCT: struct type: tty_operations, struct name: dgnc_tty_ops **/
3556
3557
3558 /* content: static int dgnc_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)*/
3559 /* LDV_COMMENT_BEGIN_PREP */
3560 #define init_MUTEX(sem) sema_init(sem, 1)
3561 #define DECLARE_MUTEX(name) \
3562 struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
3563 #define TMPBUFLEN (1024)
3564 #ifdef TTY_DONT_FLIP
3565 #endif
3566 /* LDV_COMMENT_END_PREP */
3567 /* LDV_COMMENT_FUNCTION_CALL Function from field "write" from driver structure with callbacks "dgnc_tty_ops" */
3568 ldv_handler_precall();
3569 dgnc_tty_write( var_group1, var_dgnc_tty_write_20_p1, var_dgnc_tty_write_20_p2);
3570
3571
3572
3573
3574 }
3575
3576 break;
3577 case 3: {
3578
3579 /** STRUCT: struct type: tty_operations, struct name: dgnc_tty_ops **/
3580
3581
3582 /* content: static int dgnc_tty_write_room(struct tty_struct *tty)*/
3583 /* LDV_COMMENT_BEGIN_PREP */
3584 #define init_MUTEX(sem) sema_init(sem, 1)
3585 #define DECLARE_MUTEX(name) \
3586 struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
3587 #define TMPBUFLEN (1024)
3588 #ifdef TTY_DONT_FLIP
3589 #endif
3590 /* LDV_COMMENT_END_PREP */
3591 /* LDV_COMMENT_FUNCTION_CALL Function from field "write_room" from driver structure with callbacks "dgnc_tty_ops" */
3592 ldv_handler_precall();
3593 dgnc_tty_write_room( var_group1);
3594
3595
3596
3597
3598 }
3599
3600 break;
3601 case 4: {
3602
3603 /** STRUCT: struct type: tty_operations, struct name: dgnc_tty_ops **/
3604
3605
3606 /* content: static void dgnc_tty_flush_buffer(struct tty_struct *tty)*/
3607 /* LDV_COMMENT_BEGIN_PREP */
3608 #define init_MUTEX(sem) sema_init(sem, 1)
3609 #define DECLARE_MUTEX(name) \
3610 struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
3611 #define TMPBUFLEN (1024)
3612 #ifdef TTY_DONT_FLIP
3613 #endif
3614 /* LDV_COMMENT_END_PREP */
3615 /* LDV_COMMENT_FUNCTION_CALL Function from field "flush_buffer" from driver structure with callbacks "dgnc_tty_ops" */
3616 ldv_handler_precall();
3617 dgnc_tty_flush_buffer( var_group1);
3618
3619
3620
3621
3622 }
3623
3624 break;
3625 case 5: {
3626
3627 /** STRUCT: struct type: tty_operations, struct name: dgnc_tty_ops **/
3628
3629
3630 /* content: static int dgnc_tty_chars_in_buffer(struct tty_struct *tty)*/
3631 /* LDV_COMMENT_BEGIN_PREP */
3632 #define init_MUTEX(sem) sema_init(sem, 1)
3633 #define DECLARE_MUTEX(name) \
3634 struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
3635 #define TMPBUFLEN (1024)
3636 #ifdef TTY_DONT_FLIP
3637 #endif
3638 /* LDV_COMMENT_END_PREP */
3639 /* LDV_COMMENT_FUNCTION_CALL Function from field "chars_in_buffer" from driver structure with callbacks "dgnc_tty_ops" */
3640 ldv_handler_precall();
3641 dgnc_tty_chars_in_buffer( var_group1);
3642
3643
3644
3645
3646 }
3647
3648 break;
3649 case 6: {
3650
3651 /** STRUCT: struct type: tty_operations, struct name: dgnc_tty_ops **/
3652
3653
3654 /* content: static void dgnc_tty_flush_chars(struct tty_struct *tty)*/
3655 /* LDV_COMMENT_BEGIN_PREP */
3656 #define init_MUTEX(sem) sema_init(sem, 1)
3657 #define DECLARE_MUTEX(name) \
3658 struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
3659 #define TMPBUFLEN (1024)
3660 #ifdef TTY_DONT_FLIP
3661 #endif
3662 /* LDV_COMMENT_END_PREP */
3663 /* LDV_COMMENT_FUNCTION_CALL Function from field "flush_chars" from driver structure with callbacks "dgnc_tty_ops" */
3664 ldv_handler_precall();
3665 dgnc_tty_flush_chars( var_group1);
3666
3667
3668
3669
3670 }
3671
3672 break;
3673 case 7: {
3674
3675 /** STRUCT: struct type: tty_operations, struct name: dgnc_tty_ops **/
3676
3677
3678 /* content: static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)*/
3679 /* LDV_COMMENT_BEGIN_PREP */
3680 #define init_MUTEX(sem) sema_init(sem, 1)
3681 #define DECLARE_MUTEX(name) \
3682 struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
3683 #define TMPBUFLEN (1024)
3684 #ifdef TTY_DONT_FLIP
3685 #endif
3686 /* LDV_COMMENT_END_PREP */
3687 /* LDV_COMMENT_FUNCTION_CALL Function from field "ioctl" from driver structure with callbacks "dgnc_tty_ops" */
3688 ldv_handler_precall();
3689 dgnc_tty_ioctl( var_group1, var_dgnc_tty_ioctl_38_p1, var_dgnc_tty_ioctl_38_p2);
3690
3691
3692
3693
3694 }
3695
3696 break;
3697 case 8: {
3698
3699 /** STRUCT: struct type: tty_operations, struct name: dgnc_tty_ops **/
3700
3701
3702 /* content: static void dgnc_tty_set_termios(struct tty_struct *tty, struct ktermios *old_termios)*/
3703 /* LDV_COMMENT_BEGIN_PREP */
3704 #define init_MUTEX(sem) sema_init(sem, 1)
3705 #define DECLARE_MUTEX(name) \
3706 struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
3707 #define TMPBUFLEN (1024)
3708 #ifdef TTY_DONT_FLIP
3709 #endif
3710 /* LDV_COMMENT_END_PREP */
3711 /* LDV_COMMENT_FUNCTION_CALL Function from field "set_termios" from driver structure with callbacks "dgnc_tty_ops" */
3712 ldv_handler_precall();
3713 dgnc_tty_set_termios( var_group1, var_group3);
3714
3715
3716
3717
3718 }
3719
3720 break;
3721 case 9: {
3722
3723 /** STRUCT: struct type: tty_operations, struct name: dgnc_tty_ops **/
3724
3725
3726 /* content: static void dgnc_tty_stop(struct tty_struct *tty)*/
3727 /* LDV_COMMENT_BEGIN_PREP */
3728 #define init_MUTEX(sem) sema_init(sem, 1)
3729 #define DECLARE_MUTEX(name) \
3730 struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
3731 #define TMPBUFLEN (1024)
3732 #ifdef TTY_DONT_FLIP
3733 #endif
3734 /* LDV_COMMENT_END_PREP */
3735 /* LDV_COMMENT_FUNCTION_CALL Function from field "stop" from driver structure with callbacks "dgnc_tty_ops" */
3736 ldv_handler_precall();
3737 dgnc_tty_stop( var_group1);
3738
3739
3740
3741
3742 }
3743
3744 break;
3745 case 10: {
3746
3747 /** STRUCT: struct type: tty_operations, struct name: dgnc_tty_ops **/
3748
3749
3750 /* content: static void dgnc_tty_start(struct tty_struct *tty)*/
3751 /* LDV_COMMENT_BEGIN_PREP */
3752 #define init_MUTEX(sem) sema_init(sem, 1)
3753 #define DECLARE_MUTEX(name) \
3754 struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
3755 #define TMPBUFLEN (1024)
3756 #ifdef TTY_DONT_FLIP
3757 #endif
3758 /* LDV_COMMENT_END_PREP */
3759 /* LDV_COMMENT_FUNCTION_CALL Function from field "start" from driver structure with callbacks "dgnc_tty_ops" */
3760 ldv_handler_precall();
3761 dgnc_tty_start( var_group1);
3762
3763
3764
3765
3766 }
3767
3768 break;
3769 case 11: {
3770
3771 /** STRUCT: struct type: tty_operations, struct name: dgnc_tty_ops **/
3772
3773
3774 /* content: static void dgnc_tty_throttle(struct tty_struct *tty)*/
3775 /* LDV_COMMENT_BEGIN_PREP */
3776 #define init_MUTEX(sem) sema_init(sem, 1)
3777 #define DECLARE_MUTEX(name) \
3778 struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
3779 #define TMPBUFLEN (1024)
3780 #ifdef TTY_DONT_FLIP
3781 #endif
3782 /* LDV_COMMENT_END_PREP */
3783 /* LDV_COMMENT_FUNCTION_CALL Function from field "throttle" from driver structure with callbacks "dgnc_tty_ops" */
3784 ldv_handler_precall();
3785 dgnc_tty_throttle( var_group1);
3786
3787
3788
3789
3790 }
3791
3792 break;
3793 case 12: {
3794
3795 /** STRUCT: struct type: tty_operations, struct name: dgnc_tty_ops **/
3796
3797
3798 /* content: static void dgnc_tty_unthrottle(struct tty_struct *tty)*/
3799 /* LDV_COMMENT_BEGIN_PREP */
3800 #define init_MUTEX(sem) sema_init(sem, 1)
3801 #define DECLARE_MUTEX(name) \
3802 struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
3803 #define TMPBUFLEN (1024)
3804 #ifdef TTY_DONT_FLIP
3805 #endif
3806 /* LDV_COMMENT_END_PREP */
3807 /* LDV_COMMENT_FUNCTION_CALL Function from field "unthrottle" from driver structure with callbacks "dgnc_tty_ops" */
3808 ldv_handler_precall();
3809 dgnc_tty_unthrottle( var_group1);
3810
3811
3812
3813
3814 }
3815
3816 break;
3817 case 13: {
3818
3819 /** STRUCT: struct type: tty_operations, struct name: dgnc_tty_ops **/
3820
3821
3822 /* content: static void dgnc_tty_hangup(struct tty_struct *tty)*/
3823 /* LDV_COMMENT_BEGIN_PREP */
3824 #define init_MUTEX(sem) sema_init(sem, 1)
3825 #define DECLARE_MUTEX(name) \
3826 struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
3827 #define TMPBUFLEN (1024)
3828 #ifdef TTY_DONT_FLIP
3829 #endif
3830 /* LDV_COMMENT_END_PREP */
3831 /* LDV_COMMENT_FUNCTION_CALL Function from field "hangup" from driver structure with callbacks "dgnc_tty_ops" */
3832 ldv_handler_precall();
3833 dgnc_tty_hangup( var_group1);
3834
3835
3836
3837
3838 }
3839
3840 break;
3841 case 14: {
3842
3843 /** STRUCT: struct type: tty_operations, struct name: dgnc_tty_ops **/
3844
3845
3846 /* content: static int dgnc_tty_put_char(struct tty_struct *tty, unsigned char c)*/
3847 /* LDV_COMMENT_BEGIN_PREP */
3848 #define init_MUTEX(sem) sema_init(sem, 1)
3849 #define DECLARE_MUTEX(name) \
3850 struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
3851 #define TMPBUFLEN (1024)
3852 #ifdef TTY_DONT_FLIP
3853 #endif
3854 /* LDV_COMMENT_END_PREP */
3855 /* LDV_COMMENT_FUNCTION_CALL Function from field "put_char" from driver structure with callbacks "dgnc_tty_ops" */
3856 ldv_handler_precall();
3857 dgnc_tty_put_char( var_group1, var_dgnc_tty_put_char_19_p1);
3858
3859
3860
3861
3862 }
3863
3864 break;
3865 case 15: {
3866
3867 /** STRUCT: struct type: tty_operations, struct name: dgnc_tty_ops **/
3868
3869
3870 /* content: static int dgnc_tty_tiocmget(struct tty_struct *tty)*/
3871 /* LDV_COMMENT_BEGIN_PREP */
3872 #define init_MUTEX(sem) sema_init(sem, 1)
3873 #define DECLARE_MUTEX(name) \
3874 struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
3875 #define TMPBUFLEN (1024)
3876 #ifdef TTY_DONT_FLIP
3877 #endif
3878 /* LDV_COMMENT_END_PREP */
3879 /* LDV_COMMENT_FUNCTION_CALL Function from field "tiocmget" from driver structure with callbacks "dgnc_tty_ops" */
3880 ldv_handler_precall();
3881 dgnc_tty_tiocmget( var_group1);
3882
3883
3884
3885
3886 }
3887
3888 break;
3889 case 16: {
3890
3891 /** STRUCT: struct type: tty_operations, struct name: dgnc_tty_ops **/
3892
3893
3894 /* content: static int dgnc_tty_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear)*/
3895 /* LDV_COMMENT_BEGIN_PREP */
3896 #define init_MUTEX(sem) sema_init(sem, 1)
3897 #define DECLARE_MUTEX(name) \
3898 struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
3899 #define TMPBUFLEN (1024)
3900 #ifdef TTY_DONT_FLIP
3901 #endif
3902 /* LDV_COMMENT_END_PREP */
3903 /* LDV_COMMENT_FUNCTION_CALL Function from field "tiocmset" from driver structure with callbacks "dgnc_tty_ops" */
3904 ldv_handler_precall();
3905 dgnc_tty_tiocmset( var_group1, var_dgnc_tty_tiocmset_22_p1, var_dgnc_tty_tiocmset_22_p2);
3906
3907
3908
3909
3910 }
3911
3912 break;
3913 case 17: {
3914
3915 /** STRUCT: struct type: tty_operations, struct name: dgnc_tty_ops **/
3916
3917
3918 /* content: static int dgnc_tty_send_break(struct tty_struct *tty, int msec)*/
3919 /* LDV_COMMENT_BEGIN_PREP */
3920 #define init_MUTEX(sem) sema_init(sem, 1)
3921 #define DECLARE_MUTEX(name) \
3922 struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
3923 #define TMPBUFLEN (1024)
3924 #ifdef TTY_DONT_FLIP
3925 #endif
3926 /* LDV_COMMENT_END_PREP */
3927 /* LDV_COMMENT_FUNCTION_CALL Function from field "break_ctl" from driver structure with callbacks "dgnc_tty_ops" */
3928 ldv_handler_precall();
3929 dgnc_tty_send_break( var_group1, var_dgnc_tty_send_break_23_p1);
3930
3931
3932
3933
3934 }
3935
3936 break;
3937 case 18: {
3938
3939 /** STRUCT: struct type: tty_operations, struct name: dgnc_tty_ops **/
3940
3941
3942 /* content: static void dgnc_tty_wait_until_sent(struct tty_struct *tty, int timeout)*/
3943 /* LDV_COMMENT_BEGIN_PREP */
3944 #define init_MUTEX(sem) sema_init(sem, 1)
3945 #define DECLARE_MUTEX(name) \
3946 struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
3947 #define TMPBUFLEN (1024)
3948 #ifdef TTY_DONT_FLIP
3949 #endif
3950 /* LDV_COMMENT_END_PREP */
3951 /* LDV_COMMENT_FUNCTION_CALL Function from field "wait_until_sent" from driver structure with callbacks "dgnc_tty_ops" */
3952 ldv_handler_precall();
3953 dgnc_tty_wait_until_sent( var_group1, var_dgnc_tty_wait_until_sent_24_p1);
3954
3955
3956
3957
3958 }
3959
3960 break;
3961 case 19: {
3962
3963 /** STRUCT: struct type: tty_operations, struct name: dgnc_tty_ops **/
3964
3965
3966 /* content: static void dgnc_tty_send_xchar(struct tty_struct *tty, char c)*/
3967 /* LDV_COMMENT_BEGIN_PREP */
3968 #define init_MUTEX(sem) sema_init(sem, 1)
3969 #define DECLARE_MUTEX(name) \
3970 struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
3971 #define TMPBUFLEN (1024)
3972 #ifdef TTY_DONT_FLIP
3973 #endif
3974 /* LDV_COMMENT_END_PREP */
3975 /* LDV_COMMENT_FUNCTION_CALL Function from field "send_xchar" from driver structure with callbacks "dgnc_tty_ops" */
3976 ldv_handler_precall();
3977 dgnc_tty_send_xchar( var_group1, var_dgnc_tty_send_xchar_25_p1);
3978
3979
3980
3981
3982 }
3983
3984 break;
3985 default: break;
3986
3987 }
3988
3989 }
3990
3991 ldv_module_exit:
3992
3993 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */
3994 ldv_final: ldv_check_final_state();
3995
3996 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */
3997 return;
3998
3999 }
4000 #endif
4001
4002 /* LDV_COMMENT_END_MAIN */ 1
2
3 #include <verifier/rcv.h>
4 #include <kernel-model/ERR.inc>
5
6 // There are 2 possible states of usb gadget class registration.
7 enum
8 {
9 LDV_USB_GADGET_CLASS_ZERO_STATE, // Usb gadget class is not registered.
10 LDV_USB_GADGET_CLASS_REGISTERED // Usb gadget class is registered.
11 };
12
13 // There are 2 possible states of char device region registration.
14 enum
15 {
16 LDV_USB_GADGET_CHRDEV_ZERO_STATE, // Char device region is not registered for usb gadget.
17 LDV_USB_GADGET_CHRDEV_REGISTERED // Char device region is registered for usb gadget.
18 };
19
20 // There are 2 possible states of usb gadget registration.
21 enum
22 {
23 LDV_USB_GADGET_ZERO_STATE, // Usb gadget is not registered.
24 LDV_USB_GADGET_REGISTERED // Usb gadget is registered.
25 };
26
27 /* LDV_COMMENT_CHANGE_STATE Usb gadget class is not registered at the beginning. */
28 int ldv_usb_gadget_class = LDV_USB_GADGET_CLASS_ZERO_STATE;
29
30 /* LDV_COMMENT_CHANGE_STATE Char device region is not registered at the beginning. */
31 int ldv_usb_gadget_chrdev = LDV_USB_GADGET_CHRDEV_ZERO_STATE;
32
33 /* LDV_COMMENT_CHANGE_STATE Usb gadget is not registered at the beginning. */
34 int ldv_usb_gadget = LDV_USB_GADGET_ZERO_STATE;
35
36
37 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_create_class') Check that usb gadget class was not registered. Then create and register class for it. */
38 void *ldv_create_class(void)
39 {
40 void *is_got;
41
42 /* LDV_COMMENT_OTHER Get blk request in the nondeterministic way. */
43 is_got = ldv_undef_ptr();
44
45 /* LDV_COMMENT_ASSERT Get blk request just in case when an error did not happen. */
46 if (is_got <= LDV_PTR_MAX)
47 {
48 /* LDV_COMMENT_ASSERT Registring usb gadget class is only allowed if usb gadget is not registered. */
49 ldv_assert(ldv_usb_gadget == LDV_USB_GADGET_ZERO_STATE);
50 /* LDV_COMMENT_ASSERT Check that usb gadget class is unregistered. */
51 ldv_assert(ldv_usb_gadget_class == LDV_USB_GADGET_CLASS_ZERO_STATE);
52 /* LDV_COMMENT_CHANGE_STATE Register class for usb gadget. */
53 ldv_usb_gadget_class = LDV_USB_GADGET_CLASS_REGISTERED;
54 }
55
56 /* LDV_COMMENT_RETURN Return obtained blk request. */
57 return is_got;
58 }
59
60 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_register_class') Check that usb gadget class was not registered and register class for it. */
61 int ldv_register_class(void)
62 {
63 int is_reg;
64
65 /* LDV_COMMENT_OTHER Register gadget class in the nondeterministic way. */
66 is_reg = ldv_undef_int_nonpositive();
67
68 /* LDV_COMMENT_ASSERT Get blk request just in case when an error did not happen. */
69 if (!is_reg)
70 {
71 /* LDV_COMMENT_ASSERT Registering usb gadget class is only allowed if usb gadget is not registered. */
72 ldv_assert(ldv_usb_gadget == LDV_USB_GADGET_ZERO_STATE);
73 /* LDV_COMMENT_ASSERT Check that usb gadget class is unregistered. */
74 ldv_assert(ldv_usb_gadget_class == LDV_USB_GADGET_CLASS_ZERO_STATE);
75 /* LDV_COMMENT_CHANGE_STATE Register class for usb gadget. */
76 ldv_usb_gadget_class = LDV_USB_GADGET_CLASS_REGISTERED;
77 }
78
79 /* LDV_COMMENT_RETURN Return registration status (0 is success). */
80 return is_reg;
81 }
82
83 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_unregister_class') Check that usb gadget class was registered and unregister it. */
84 void ldv_unregister_class(void)
85 {
86 /* LDV_COMMENT_ASSERT Unregistering usb gadget class is only allowed if usb gadget is not registered. */
87 ldv_assert(ldv_usb_gadget == LDV_USB_GADGET_ZERO_STATE);
88 /* LDV_COMMENT_ASSERT Check that usb gadget class is registered. */
89 ldv_assert(ldv_usb_gadget_class == LDV_USB_GADGET_CLASS_REGISTERED);
90 /* LDV_COMMENT_CHANGE_STATE Unregister class for usb gadget. */
91 ldv_usb_gadget_class = LDV_USB_GADGET_CLASS_ZERO_STATE;
92 }
93
94 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_register_chrdev_region') Check that chrdev region was not registered and register it. */
95 int ldv_register_chrdev_region(void)
96 {
97 int is_reg;
98
99 /* LDV_COMMENT_OTHER Register chrdev in the nondeterministic way. */
100 is_reg = ldv_undef_int_nonpositive();
101
102 /* LDV_COMMENT_ASSERT Register chrdev just in case when an error did not happen. */
103 if (!is_reg)
104 {
105 /* LDV_COMMENT_ASSERT Usb gadget should be unregistered at this point. */
106 ldv_assert(ldv_usb_gadget == LDV_USB_GADGET_ZERO_STATE);
107 /* LDV_COMMENT_ASSERT Check that chrdev region is unregistered. */
108 ldv_assert(ldv_usb_gadget_chrdev == LDV_USB_GADGET_CHRDEV_ZERO_STATE);
109 /* LDV_COMMENT_CHANGE_STATE Register chrdev region for usb gadget. */
110 ldv_usb_gadget_chrdev = LDV_USB_GADGET_CHRDEV_REGISTERED;
111 }
112
113 /* LDV_COMMENT_RETURN Return registration status (0 is success). */
114 return is_reg;
115 }
116
117 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_unregister_chrdev_region') Check that chrdev region was registered and unregister it. */
118 void ldv_unregister_chrdev_region(void)
119 {
120 /* LDV_COMMENT_ASSERT Usb gadget should not be registered at this point. */
121 ldv_assert(ldv_usb_gadget == LDV_USB_GADGET_ZERO_STATE);
122 /* LDV_COMMENT_ASSERT Check that chrdev region is registered. */
123 ldv_assert(ldv_usb_gadget_chrdev == LDV_USB_GADGET_CHRDEV_REGISTERED);
124 /* LDV_COMMENT_CHANGE_STATE Unregister chrdev. */
125 ldv_usb_gadget_chrdev = LDV_USB_GADGET_CHRDEV_ZERO_STATE;
126 }
127
128 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_register_usb_gadget') Check that usb gadget was not registered and register it. */
129 int ldv_register_usb_gadget(void)
130 {
131 int is_reg;
132
133 /* LDV_COMMENT_OTHER Register usb gadget in the nondeterministic way. */
134 is_reg = ldv_undef_int_nonpositive();
135
136 /* LDV_COMMENT_ASSERT Register usb gadget just in case when an error did not happen. */
137 if (!is_reg)
138 {
139 /* LDV_COMMENT_ASSERT Gadget should not be registered at this point. */
140 ldv_assert(ldv_usb_gadget == LDV_USB_GADGET_ZERO_STATE);
141 /* LDV_COMMENT_CHANGE_STATE Register usb gadget. */
142 ldv_usb_gadget = LDV_USB_GADGET_REGISTERED;
143 }
144
145 /* LDV_COMMENT_RETURN Return registration status (0 is success). */
146 return is_reg;
147 }
148
149 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_unregister_usb_gadget') Check that usb gadget was registered and unregister it. */
150 void ldv_unregister_usb_gadget(void)
151 {
152 /* LDV_COMMENT_ASSERT Usb gadget should be registered at this point. */
153 ldv_assert(ldv_usb_gadget == LDV_USB_GADGET_REGISTERED);
154 /* LDV_COMMENT_CHANGE_STATE Unregister usb gadget. */
155 ldv_usb_gadget = LDV_USB_GADGET_ZERO_STATE;
156 }
157
158 /* LDV_COMMENT_MODEL_FUNCTION_DEFINITION(name='ldv_check_final_state') Check that usb gadget is fully unregistered at the end. */
159 void ldv_check_final_state(void)
160 {
161 /* LDV_COMMENT_ASSERT Usb gadget class should be unregistered at the end. */
162 ldv_assert(ldv_usb_gadget_class == LDV_USB_GADGET_CLASS_ZERO_STATE);
163 /* LDV_COMMENT_ASSERT Chrdev region should be unregistered at the end. */
164 ldv_assert(ldv_usb_gadget_chrdev == LDV_USB_GADGET_CHRDEV_ZERO_STATE);
165 /* LDV_COMMENT_ASSERT Usb gadget should be unregistered at the end. */
166 ldv_assert(ldv_usb_gadget == LDV_USB_GADGET_ZERO_STATE);
167 } 1 #ifndef _LINUX_FS_H
2 #define _LINUX_FS_H
3
4
5 #include <linux/linkage.h>
6 #include <linux/wait.h>
7 #include <linux/kdev_t.h>
8 #include <linux/dcache.h>
9 #include <linux/path.h>
10 #include <linux/stat.h>
11 #include <linux/cache.h>
12 #include <linux/list.h>
13 #include <linux/list_lru.h>
14 #include <linux/llist.h>
15 #include <linux/radix-tree.h>
16 #include <linux/rbtree.h>
17 #include <linux/init.h>
18 #include <linux/pid.h>
19 #include <linux/bug.h>
20 #include <linux/mutex.h>
21 #include <linux/capability.h>
22 #include <linux/semaphore.h>
23 #include <linux/fiemap.h>
24 #include <linux/rculist_bl.h>
25 #include <linux/atomic.h>
26 #include <linux/shrinker.h>
27 #include <linux/migrate_mode.h>
28 #include <linux/uidgid.h>
29 #include <linux/lockdep.h>
30 #include <linux/percpu-rwsem.h>
31 #include <linux/blk_types.h>
32
33 #include <asm/byteorder.h>
34 #include <uapi/linux/fs.h>
35
36 struct export_operations;
37 struct hd_geometry;
38 struct iovec;
39 struct nameidata;
40 struct kiocb;
41 struct kobject;
42 struct pipe_inode_info;
43 struct poll_table_struct;
44 struct kstatfs;
45 struct vm_area_struct;
46 struct vfsmount;
47 struct cred;
48 struct swap_info_struct;
49 struct seq_file;
50 struct workqueue_struct;
51 struct iov_iter;
52
53 extern void __init inode_init(void);
54 extern void __init inode_init_early(void);
55 extern void __init files_init(unsigned long);
56
57 extern struct files_stat_struct files_stat;
58 extern unsigned long get_max_files(void);
59 extern int sysctl_nr_open;
60 extern struct inodes_stat_t inodes_stat;
61 extern int leases_enable, lease_break_time;
62 extern int sysctl_protected_symlinks;
63 extern int sysctl_protected_hardlinks;
64
65 struct buffer_head;
66 typedef int (get_block_t)(struct inode *inode, sector_t iblock,
67 struct buffer_head *bh_result, int create);
68 typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
69 ssize_t bytes, void *private);
70
71 #define MAY_EXEC 0x00000001
72 #define MAY_WRITE 0x00000002
73 #define MAY_READ 0x00000004
74 #define MAY_APPEND 0x00000008
75 #define MAY_ACCESS 0x00000010
76 #define MAY_OPEN 0x00000020
77 #define MAY_CHDIR 0x00000040
78 /* called from RCU mode, don't block */
79 #define MAY_NOT_BLOCK 0x00000080
80
81 /*
82 * flags in file.f_mode. Note that FMODE_READ and FMODE_WRITE must correspond
83 * to O_WRONLY and O_RDWR via the strange trick in __dentry_open()
84 */
85
86 /* file is open for reading */
87 #define FMODE_READ ((__force fmode_t)0x1)
88 /* file is open for writing */
89 #define FMODE_WRITE ((__force fmode_t)0x2)
90 /* file is seekable */
91 #define FMODE_LSEEK ((__force fmode_t)0x4)
92 /* file can be accessed using pread */
93 #define FMODE_PREAD ((__force fmode_t)0x8)
94 /* file can be accessed using pwrite */
95 #define FMODE_PWRITE ((__force fmode_t)0x10)
96 /* File is opened for execution with sys_execve / sys_uselib */
97 #define FMODE_EXEC ((__force fmode_t)0x20)
98 /* File is opened with O_NDELAY (only set for block devices) */
99 #define FMODE_NDELAY ((__force fmode_t)0x40)
100 /* File is opened with O_EXCL (only set for block devices) */
101 #define FMODE_EXCL ((__force fmode_t)0x80)
102 /* File is opened using open(.., 3, ..) and is writeable only for ioctls
103 (specialy hack for floppy.c) */
104 #define FMODE_WRITE_IOCTL ((__force fmode_t)0x100)
105 /* 32bit hashes as llseek() offset (for directories) */
106 #define FMODE_32BITHASH ((__force fmode_t)0x200)
107 /* 64bit hashes as llseek() offset (for directories) */
108 #define FMODE_64BITHASH ((__force fmode_t)0x400)
109
110 /*
111 * Don't update ctime and mtime.
112 *
113 * Currently a special hack for the XFS open_by_handle ioctl, but we'll
114 * hopefully graduate it to a proper O_CMTIME flag supported by open(2) soon.
115 */
116 #define FMODE_NOCMTIME ((__force fmode_t)0x800)
117
118 /* Expect random access pattern */
119 #define FMODE_RANDOM ((__force fmode_t)0x1000)
120
121 /* File is huge (eg. /dev/kmem): treat loff_t as unsigned */
122 #define FMODE_UNSIGNED_OFFSET ((__force fmode_t)0x2000)
123
124 /* File is opened with O_PATH; almost nothing can be done with it */
125 #define FMODE_PATH ((__force fmode_t)0x4000)
126
127 /* File needs atomic accesses to f_pos */
128 #define FMODE_ATOMIC_POS ((__force fmode_t)0x8000)
129 /* Write access to underlying fs */
130 #define FMODE_WRITER ((__force fmode_t)0x10000)
131 /* Has read method(s) */
132 #define FMODE_CAN_READ ((__force fmode_t)0x20000)
133 /* Has write method(s) */
134 #define FMODE_CAN_WRITE ((__force fmode_t)0x40000)
135
136 /* File was opened by fanotify and shouldn't generate fanotify events */
137 #define FMODE_NONOTIFY ((__force fmode_t)0x1000000)
138
139 /*
140 * Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector
141 * that indicates that they should check the contents of the iovec are
142 * valid, but not check the memory that the iovec elements
143 * points too.
144 */
145 #define CHECK_IOVEC_ONLY -1
146
147 /*
148 * The below are the various read and write types that we support. Some of
149 * them include behavioral modifiers that send information down to the
150 * block layer and IO scheduler. Terminology:
151 *
152 * The block layer uses device plugging to defer IO a little bit, in
153 * the hope that we will see more IO very shortly. This increases
154 * coalescing of adjacent IO and thus reduces the number of IOs we
155 * have to send to the device. It also allows for better queuing,
156 * if the IO isn't mergeable. If the caller is going to be waiting
157 * for the IO, then he must ensure that the device is unplugged so
158 * that the IO is dispatched to the driver.
159 *
160 * All IO is handled async in Linux. This is fine for background
161 * writes, but for reads or writes that someone waits for completion
162 * on, we want to notify the block layer and IO scheduler so that they
163 * know about it. That allows them to make better scheduling
164 * decisions. So when the below references 'sync' and 'async', it
165 * is referencing this priority hint.
166 *
167 * With that in mind, the available types are:
168 *
169 * READ A normal read operation. Device will be plugged.
170 * READ_SYNC A synchronous read. Device is not plugged, caller can
171 * immediately wait on this read without caring about
172 * unplugging.
173 * READA Used for read-ahead operations. Lower priority, and the
174 * block layer could (in theory) choose to ignore this
175 * request if it runs into resource problems.
176 * WRITE A normal async write. Device will be plugged.
177 * WRITE_SYNC Synchronous write. Identical to WRITE, but passes down
178 * the hint that someone will be waiting on this IO
179 * shortly. The write equivalent of READ_SYNC.
180 * WRITE_ODIRECT Special case write for O_DIRECT only.
181 * WRITE_FLUSH Like WRITE_SYNC but with preceding cache flush.
182 * WRITE_FUA Like WRITE_SYNC but data is guaranteed to be on
183 * non-volatile media on completion.
184 * WRITE_FLUSH_FUA Combination of WRITE_FLUSH and FUA. The IO is preceded
185 * by a cache flush and data is guaranteed to be on
186 * non-volatile media on completion.
187 *
188 */
189 #define RW_MASK REQ_WRITE
190 #define RWA_MASK REQ_RAHEAD
191
192 #define READ 0
193 #define WRITE RW_MASK
194 #define READA RWA_MASK
195
196 #define READ_SYNC (READ | REQ_SYNC)
197 #define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE)
198 #define WRITE_ODIRECT (WRITE | REQ_SYNC)
199 #define WRITE_FLUSH (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH)
200 #define WRITE_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FUA)
201 #define WRITE_FLUSH_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH | REQ_FUA)
202
203 /*
204 * Attribute flags. These should be or-ed together to figure out what
205 * has been changed!
206 */
207 #define ATTR_MODE (1 << 0)
208 #define ATTR_UID (1 << 1)
209 #define ATTR_GID (1 << 2)
210 #define ATTR_SIZE (1 << 3)
211 #define ATTR_ATIME (1 << 4)
212 #define ATTR_MTIME (1 << 5)
213 #define ATTR_CTIME (1 << 6)
214 #define ATTR_ATIME_SET (1 << 7)
215 #define ATTR_MTIME_SET (1 << 8)
216 #define ATTR_FORCE (1 << 9) /* Not a change, but a change it */
217 #define ATTR_ATTR_FLAG (1 << 10)
218 #define ATTR_KILL_SUID (1 << 11)
219 #define ATTR_KILL_SGID (1 << 12)
220 #define ATTR_FILE (1 << 13)
221 #define ATTR_KILL_PRIV (1 << 14)
222 #define ATTR_OPEN (1 << 15) /* Truncating from open(O_TRUNC) */
223 #define ATTR_TIMES_SET (1 << 16)
224
225 /*
226 * This is the Inode Attributes structure, used for notify_change(). It
227 * uses the above definitions as flags, to know which values have changed.
228 * Also, in this manner, a Filesystem can look at only the values it cares
229 * about. Basically, these are the attributes that the VFS layer can
230 * request to change from the FS layer.
231 *
232 * Derek Atkins <warlord@MIT.EDU> 94-10-20
233 */
234 struct iattr {
235 unsigned int ia_valid;
236 umode_t ia_mode;
237 kuid_t ia_uid;
238 kgid_t ia_gid;
239 loff_t ia_size;
240 struct timespec ia_atime;
241 struct timespec ia_mtime;
242 struct timespec ia_ctime;
243
244 /*
245 * Not an attribute, but an auxiliary info for filesystems wanting to
246 * implement an ftruncate() like method. NOTE: filesystem should
247 * check for (ia_valid & ATTR_FILE), and not for (ia_file != NULL).
248 */
249 struct file *ia_file;
250 };
251
252 /*
253 * Includes for diskquotas.
254 */
255 #include <linux/quota.h>
256
257 /**
258 * enum positive_aop_returns - aop return codes with specific semantics
259 *
260 * @AOP_WRITEPAGE_ACTIVATE: Informs the caller that page writeback has
261 * completed, that the page is still locked, and
262 * should be considered active. The VM uses this hint
263 * to return the page to the active list -- it won't
264 * be a candidate for writeback again in the near
265 * future. Other callers must be careful to unlock
266 * the page if they get this return. Returned by
267 * writepage();
268 *
269 * @AOP_TRUNCATED_PAGE: The AOP method that was handed a locked page has
270 * unlocked it and the page might have been truncated.
271 * The caller should back up to acquiring a new page and
272 * trying again. The aop will be taking reasonable
273 * precautions not to livelock. If the caller held a page
274 * reference, it should drop it before retrying. Returned
275 * by readpage().
276 *
277 * address_space_operation functions return these large constants to indicate
278 * special semantics to the caller. These are much larger than the bytes in a
279 * page to allow for functions that return the number of bytes operated on in a
280 * given page.
281 */
282
283 enum positive_aop_returns {
284 AOP_WRITEPAGE_ACTIVATE = 0x80000,
285 AOP_TRUNCATED_PAGE = 0x80001,
286 };
287
288 #define AOP_FLAG_UNINTERRUPTIBLE 0x0001 /* will not do a short write */
289 #define AOP_FLAG_CONT_EXPAND 0x0002 /* called from cont_expand */
290 #define AOP_FLAG_NOFS 0x0004 /* used by filesystem to direct
291 * helper code (eg buffer layer)
292 * to clear GFP_FS from alloc */
293
294 /*
295 * oh the beauties of C type declarations.
296 */
297 struct page;
298 struct address_space;
299 struct writeback_control;
300
301 /*
302 * "descriptor" for what we're up to with a read.
303 * This allows us to use the same read code yet
304 * have multiple different users of the data that
305 * we read from a file.
306 *
307 * The simplest case just copies the data to user
308 * mode.
309 */
310 typedef struct {
311 size_t written;
312 size_t count;
313 union {
314 char __user *buf;
315 void *data;
316 } arg;
317 int error;
318 } read_descriptor_t;
319
320 typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
321 unsigned long, unsigned long);
322
323 struct address_space_operations {
324 int (*writepage)(struct page *page, struct writeback_control *wbc);
325 int (*readpage)(struct file *, struct page *);
326
327 /* Write back some dirty pages from this mapping. */
328 int (*writepages)(struct address_space *, struct writeback_control *);
329
330 /* Set a page dirty. Return true if this dirtied it */
331 int (*set_page_dirty)(struct page *page);
332
333 int (*readpages)(struct file *filp, struct address_space *mapping,
334 struct list_head *pages, unsigned nr_pages);
335
336 int (*write_begin)(struct file *, struct address_space *mapping,
337 loff_t pos, unsigned len, unsigned flags,
338 struct page **pagep, void **fsdata);
339 int (*write_end)(struct file *, struct address_space *mapping,
340 loff_t pos, unsigned len, unsigned copied,
341 struct page *page, void *fsdata);
342
343 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
344 sector_t (*bmap)(struct address_space *, sector_t);
345 void (*invalidatepage) (struct page *, unsigned int, unsigned int);
346 int (*releasepage) (struct page *, gfp_t);
347 void (*freepage)(struct page *);
348 ssize_t (*direct_IO)(int, struct kiocb *, struct iov_iter *iter, loff_t offset);
349 int (*get_xip_mem)(struct address_space *, pgoff_t, int,
350 void **, unsigned long *);
351 /*
352 * migrate the contents of a page to the specified target. If
353 * migrate_mode is MIGRATE_ASYNC, it must not block.
354 */
355 int (*migratepage) (struct address_space *,
356 struct page *, struct page *, enum migrate_mode);
357 int (*launder_page) (struct page *);
358 int (*is_partially_uptodate) (struct page *, unsigned long,
359 unsigned long);
360 void (*is_dirty_writeback) (struct page *, bool *, bool *);
361 int (*error_remove_page)(struct address_space *, struct page *);
362
363 /* swapfile support */
364 int (*swap_activate)(struct swap_info_struct *sis, struct file *file,
365 sector_t *span);
366 void (*swap_deactivate)(struct file *file);
367 };
368
369 extern const struct address_space_operations empty_aops;
370
371 /*
372 * pagecache_write_begin/pagecache_write_end must be used by general code
373 * to write into the pagecache.
374 */
375 int pagecache_write_begin(struct file *, struct address_space *mapping,
376 loff_t pos, unsigned len, unsigned flags,
377 struct page **pagep, void **fsdata);
378
379 int pagecache_write_end(struct file *, struct address_space *mapping,
380 loff_t pos, unsigned len, unsigned copied,
381 struct page *page, void *fsdata);
382
383 struct backing_dev_info;
384 struct address_space {
385 struct inode *host; /* owner: inode, block_device */
386 struct radix_tree_root page_tree; /* radix tree of all pages */
387 spinlock_t tree_lock; /* and lock protecting it */
388 atomic_t i_mmap_writable;/* count VM_SHARED mappings */
389 struct rb_root i_mmap; /* tree of private and shared mappings */
390 struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */
391 struct mutex i_mmap_mutex; /* protect tree, count, list */
392 /* Protected by tree_lock together with the radix tree */
393 unsigned long nrpages; /* number of total pages */
394 unsigned long nrshadows; /* number of shadow entries */
395 pgoff_t writeback_index;/* writeback starts here */
396 const struct address_space_operations *a_ops; /* methods */
397 unsigned long flags; /* error bits/gfp mask */
398 struct backing_dev_info *backing_dev_info; /* device readahead, etc */
399 spinlock_t private_lock; /* for use by the address_space */
400 struct list_head private_list; /* ditto */
401 void *private_data; /* ditto */
402 } __attribute__((aligned(sizeof(long))));
403 /*
404 * On most architectures that alignment is already the case; but
405 * must be enforced here for CRIS, to let the least significant bit
406 * of struct page's "mapping" pointer be used for PAGE_MAPPING_ANON.
407 */
408 struct request_queue;
409
410 struct block_device {
411 dev_t bd_dev; /* not a kdev_t - it's a search key */
412 int bd_openers;
413 struct inode * bd_inode; /* will die */
414 struct super_block * bd_super;
415 struct mutex bd_mutex; /* open/close mutex */
416 struct list_head bd_inodes;
417 void * bd_claiming;
418 void * bd_holder;
419 int bd_holders;
420 bool bd_write_holder;
421 #ifdef CONFIG_SYSFS
422 struct list_head bd_holder_disks;
423 #endif
424 struct block_device * bd_contains;
425 unsigned bd_block_size;
426 struct hd_struct * bd_part;
427 /* number of times partitions within this device have been opened. */
428 unsigned bd_part_count;
429 int bd_invalidated;
430 struct gendisk * bd_disk;
431 struct request_queue * bd_queue;
432 struct list_head bd_list;
433 /*
434 * Private data. You must have bd_claim'ed the block_device
435 * to use this. NOTE: bd_claim allows an owner to claim
436 * the same device multiple times, the owner must take special
437 * care to not mess up bd_private for that case.
438 */
439 unsigned long bd_private;
440
441 /* The counter of freeze processes */
442 int bd_fsfreeze_count;
443 /* Mutex for freeze */
444 struct mutex bd_fsfreeze_mutex;
445 };
446
447 /*
448 * Radix-tree tags, for tagging dirty and writeback pages within the pagecache
449 * radix trees
450 */
451 #define PAGECACHE_TAG_DIRTY 0
452 #define PAGECACHE_TAG_WRITEBACK 1
453 #define PAGECACHE_TAG_TOWRITE 2
454
455 int mapping_tagged(struct address_space *mapping, int tag);
456
457 /*
458 * Might pages of this file be mapped into userspace?
459 */
460 static inline int mapping_mapped(struct address_space *mapping)
461 {
462 return !RB_EMPTY_ROOT(&mapping->i_mmap) ||
463 !list_empty(&mapping->i_mmap_nonlinear);
464 }
465
466 /*
467 * Might pages of this file have been modified in userspace?
468 * Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap_pgoff
469 * marks vma as VM_SHARED if it is shared, and the file was opened for
470 * writing i.e. vma may be mprotected writable even if now readonly.
471 *
472 * If i_mmap_writable is negative, no new writable mappings are allowed. You
473 * can only deny writable mappings, if none exists right now.
474 */
475 static inline int mapping_writably_mapped(struct address_space *mapping)
476 {
477 return atomic_read(&mapping->i_mmap_writable) > 0;
478 }
479
480 static inline int mapping_map_writable(struct address_space *mapping)
481 {
482 return atomic_inc_unless_negative(&mapping->i_mmap_writable) ?
483 0 : -EPERM;
484 }
485
486 static inline void mapping_unmap_writable(struct address_space *mapping)
487 {
488 atomic_dec(&mapping->i_mmap_writable);
489 }
490
491 static inline int mapping_deny_writable(struct address_space *mapping)
492 {
493 return atomic_dec_unless_positive(&mapping->i_mmap_writable) ?
494 0 : -EBUSY;
495 }
496
497 static inline void mapping_allow_writable(struct address_space *mapping)
498 {
499 atomic_inc(&mapping->i_mmap_writable);
500 }
501
502 /*
503 * Use sequence counter to get consistent i_size on 32-bit processors.
504 */
505 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
506 #include <linux/seqlock.h>
507 #define __NEED_I_SIZE_ORDERED
508 #define i_size_ordered_init(inode) seqcount_init(&inode->i_size_seqcount)
509 #else
510 #define i_size_ordered_init(inode) do { } while (0)
511 #endif
512
513 struct posix_acl;
514 #define ACL_NOT_CACHED ((void *)(-1))
515
516 #define IOP_FASTPERM 0x0001
517 #define IOP_LOOKUP 0x0002
518 #define IOP_NOFOLLOW 0x0004
519
520 /*
521 * Keep mostly read-only and often accessed (especially for
522 * the RCU path lookup and 'stat' data) fields at the beginning
523 * of the 'struct inode'
524 */
525 struct inode {
526 umode_t i_mode;
527 unsigned short i_opflags;
528 kuid_t i_uid;
529 kgid_t i_gid;
530 unsigned int i_flags;
531
532 #ifdef CONFIG_FS_POSIX_ACL
533 struct posix_acl *i_acl;
534 struct posix_acl *i_default_acl;
535 #endif
536
537 const struct inode_operations *i_op;
538 struct super_block *i_sb;
539 struct address_space *i_mapping;
540
541 #ifdef CONFIG_SECURITY
542 void *i_security;
543 #endif
544
545 /* Stat data, not accessed from path walking */
546 unsigned long i_ino;
547 /*
548 * Filesystems may only read i_nlink directly. They shall use the
549 * following functions for modification:
550 *
551 * (set|clear|inc|drop)_nlink
552 * inode_(inc|dec)_link_count
553 */
554 union {
555 const unsigned int i_nlink;
556 unsigned int __i_nlink;
557 };
558 dev_t i_rdev;
559 loff_t i_size;
560 struct timespec i_atime;
561 struct timespec i_mtime;
562 struct timespec i_ctime;
563 spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
564 unsigned short i_bytes;
565 unsigned int i_blkbits;
566 blkcnt_t i_blocks;
567
568 #ifdef __NEED_I_SIZE_ORDERED
569 seqcount_t i_size_seqcount;
570 #endif
571
572 /* Misc */
573 unsigned long i_state;
574 struct mutex i_mutex;
575
576 unsigned long dirtied_when; /* jiffies of first dirtying */
577
578 struct hlist_node i_hash;
579 struct list_head i_wb_list; /* backing dev IO list */
580 struct list_head i_lru; /* inode LRU list */
581 struct list_head i_sb_list;
582 union {
583 struct hlist_head i_dentry;
584 struct rcu_head i_rcu;
585 };
586 u64 i_version;
587 atomic_t i_count;
588 atomic_t i_dio_count;
589 atomic_t i_writecount;
590 #ifdef CONFIG_IMA
591 atomic_t i_readcount; /* struct files open RO */
592 #endif
593 const struct file_operations *i_fop; /* former ->i_op->default_file_ops */
594 struct file_lock *i_flock;
595 struct address_space i_data;
596 #ifdef CONFIG_QUOTA
597 struct dquot *i_dquot[MAXQUOTAS];
598 #endif
599 struct list_head i_devices;
600 union {
601 struct pipe_inode_info *i_pipe;
602 struct block_device *i_bdev;
603 struct cdev *i_cdev;
604 };
605
606 __u32 i_generation;
607
608 #ifdef CONFIG_FSNOTIFY
609 __u32 i_fsnotify_mask; /* all events this inode cares about */
610 struct hlist_head i_fsnotify_marks;
611 #endif
612
613 void *i_private; /* fs or device private pointer */
614 };
615
616 static inline int inode_unhashed(struct inode *inode)
617 {
618 return hlist_unhashed(&inode->i_hash);
619 }
620
621 /*
622 * inode->i_mutex nesting subclasses for the lock validator:
623 *
624 * 0: the object of the current VFS operation
625 * 1: parent
626 * 2: child/target
627 * 3: xattr
628 * 4: second non-directory
629 * The last is for certain operations (such as rename) which lock two
630 * non-directories at once.
631 *
632 * The locking order between these classes is
633 * parent -> child -> normal -> xattr -> second non-directory
634 */
635 enum inode_i_mutex_lock_class
636 {
637 I_MUTEX_NORMAL,
638 I_MUTEX_PARENT,
639 I_MUTEX_CHILD,
640 I_MUTEX_XATTR,
641 I_MUTEX_NONDIR2
642 };
643
644 void lock_two_nondirectories(struct inode *, struct inode*);
645 void unlock_two_nondirectories(struct inode *, struct inode*);
646
647 /*
648 * NOTE: in a 32bit arch with a preemptable kernel and
649 * an UP compile the i_size_read/write must be atomic
650 * with respect to the local cpu (unlike with preempt disabled),
651 * but they don't need to be atomic with respect to other cpus like in
652 * true SMP (so they need either to either locally disable irq around
653 * the read or for example on x86 they can be still implemented as a
654 * cmpxchg8b without the need of the lock prefix). For SMP compiles
655 * and 64bit archs it makes no difference if preempt is enabled or not.
656 */
657 static inline loff_t i_size_read(const struct inode *inode)
658 {
659 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
660 loff_t i_size;
661 unsigned int seq;
662
663 do {
664 seq = read_seqcount_begin(&inode->i_size_seqcount);
665 i_size = inode->i_size;
666 } while (read_seqcount_retry(&inode->i_size_seqcount, seq));
667 return i_size;
668 #elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
669 loff_t i_size;
670
671 preempt_disable();
672 i_size = inode->i_size;
673 preempt_enable();
674 return i_size;
675 #else
676 return inode->i_size;
677 #endif
678 }
679
680 /*
681 * NOTE: unlike i_size_read(), i_size_write() does need locking around it
682 * (normally i_mutex), otherwise on 32bit/SMP an update of i_size_seqcount
683 * can be lost, resulting in subsequent i_size_read() calls spinning forever.
684 */
685 static inline void i_size_write(struct inode *inode, loff_t i_size)
686 {
687 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
688 preempt_disable();
689 write_seqcount_begin(&inode->i_size_seqcount);
690 inode->i_size = i_size;
691 write_seqcount_end(&inode->i_size_seqcount);
692 preempt_enable();
693 #elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
694 preempt_disable();
695 inode->i_size = i_size;
696 preempt_enable();
697 #else
698 inode->i_size = i_size;
699 #endif
700 }
701
702 /* Helper functions so that in most cases filesystems will
703 * not need to deal directly with kuid_t and kgid_t and can
704 * instead deal with the raw numeric values that are stored
705 * in the filesystem.
706 */
707 static inline uid_t i_uid_read(const struct inode *inode)
708 {
709 return from_kuid(&init_user_ns, inode->i_uid);
710 }
711
712 static inline gid_t i_gid_read(const struct inode *inode)
713 {
714 return from_kgid(&init_user_ns, inode->i_gid);
715 }
716
717 static inline void i_uid_write(struct inode *inode, uid_t uid)
718 {
719 inode->i_uid = make_kuid(&init_user_ns, uid);
720 }
721
722 static inline void i_gid_write(struct inode *inode, gid_t gid)
723 {
724 inode->i_gid = make_kgid(&init_user_ns, gid);
725 }
726
727 static inline unsigned iminor(const struct inode *inode)
728 {
729 return MINOR(inode->i_rdev);
730 }
731
732 static inline unsigned imajor(const struct inode *inode)
733 {
734 return MAJOR(inode->i_rdev);
735 }
736
737 extern struct block_device *I_BDEV(struct inode *inode);
738
739 struct fown_struct {
740 rwlock_t lock; /* protects pid, uid, euid fields */
741 struct pid *pid; /* pid or -pgrp where SIGIO should be sent */
742 enum pid_type pid_type; /* Kind of process group SIGIO should be sent to */
743 kuid_t uid, euid; /* uid/euid of process setting the owner */
744 int signum; /* posix.1b rt signal to be delivered on IO */
745 };
746
747 /*
748 * Track a single file's readahead state
749 */
750 struct file_ra_state {
751 pgoff_t start; /* where readahead started */
752 unsigned int size; /* # of readahead pages */
753 unsigned int async_size; /* do asynchronous readahead when
754 there are only # of pages ahead */
755
756 unsigned int ra_pages; /* Maximum readahead window */
757 unsigned int mmap_miss; /* Cache miss stat for mmap accesses */
758 loff_t prev_pos; /* Cache last read() position */
759 };
760
761 /*
762 * Check if @index falls in the readahead windows.
763 */
764 static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index)
765 {
766 return (index >= ra->start &&
767 index < ra->start + ra->size);
768 }
769
770 struct file {
771 union {
772 struct llist_node fu_llist;
773 struct rcu_head fu_rcuhead;
774 } f_u;
775 struct path f_path;
776 #define f_dentry f_path.dentry
777 struct inode *f_inode; /* cached value */
778 const struct file_operations *f_op;
779
780 /*
781 * Protects f_ep_links, f_flags.
782 * Must not be taken from IRQ context.
783 */
784 spinlock_t f_lock;
785 atomic_long_t f_count;
786 unsigned int f_flags;
787 fmode_t f_mode;
788 struct mutex f_pos_lock;
789 loff_t f_pos;
790 struct fown_struct f_owner;
791 const struct cred *f_cred;
792 struct file_ra_state f_ra;
793
794 u64 f_version;
795 #ifdef CONFIG_SECURITY
796 void *f_security;
797 #endif
798 /* needed for tty driver, and maybe others */
799 void *private_data;
800
801 #ifdef CONFIG_EPOLL
802 /* Used by fs/eventpoll.c to link all the hooks to this file */
803 struct list_head f_ep_links;
804 struct list_head f_tfile_llink;
805 #endif /* #ifdef CONFIG_EPOLL */
806 struct address_space *f_mapping;
807 } __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
808
809 struct file_handle {
810 __u32 handle_bytes;
811 int handle_type;
812 /* file identifier */
813 unsigned char f_handle[0];
814 };
815
816 static inline struct file *get_file(struct file *f)
817 {
818 atomic_long_inc(&f->f_count);
819 return f;
820 }
821 #define fput_atomic(x) atomic_long_add_unless(&(x)->f_count, -1, 1)
822 #define file_count(x) atomic_long_read(&(x)->f_count)
823
824 #define MAX_NON_LFS ((1UL<<31) - 1)
825
826 /* Page cache limit. The filesystems should put that into their s_maxbytes
827 limits, otherwise bad things can happen in VM. */
828 #if BITS_PER_LONG==32
829 #define MAX_LFS_FILESIZE (((loff_t)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
830 #elif BITS_PER_LONG==64
831 #define MAX_LFS_FILESIZE ((loff_t)0x7fffffffffffffffLL)
832 #endif
833
834 #define FL_POSIX 1
835 #define FL_FLOCK 2
836 #define FL_DELEG 4 /* NFSv4 delegation */
837 #define FL_ACCESS 8 /* not trying to lock, just looking */
838 #define FL_EXISTS 16 /* when unlocking, test for existence */
839 #define FL_LEASE 32 /* lease held on this file */
840 #define FL_CLOSE 64 /* unlock on close */
841 #define FL_SLEEP 128 /* A blocking lock */
842 #define FL_DOWNGRADE_PENDING 256 /* Lease is being downgraded */
843 #define FL_UNLOCK_PENDING 512 /* Lease is being broken */
844 #define FL_OFDLCK 1024 /* lock is "owned" by struct file */
845
846 /*
847 * Special return value from posix_lock_file() and vfs_lock_file() for
848 * asynchronous locking.
849 */
850 #define FILE_LOCK_DEFERRED 1
851
852 /* legacy typedef, should eventually be removed */
853 typedef void *fl_owner_t;
854
855 struct file_lock_operations {
856 void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
857 void (*fl_release_private)(struct file_lock *);
858 };
859
860 struct lock_manager_operations {
861 int (*lm_compare_owner)(struct file_lock *, struct file_lock *);
862 unsigned long (*lm_owner_key)(struct file_lock *);
863 void (*lm_get_owner)(struct file_lock *, struct file_lock *);
864 void (*lm_put_owner)(struct file_lock *);
865 void (*lm_notify)(struct file_lock *); /* unblock callback */
866 int (*lm_grant)(struct file_lock *, int);
867 bool (*lm_break)(struct file_lock *);
868 int (*lm_change)(struct file_lock **, int, struct list_head *);
869 void (*lm_setup)(struct file_lock *, void **);
870 };
871
872 struct lock_manager {
873 struct list_head list;
874 };
875
876 struct net;
877 void locks_start_grace(struct net *, struct lock_manager *);
878 void locks_end_grace(struct lock_manager *);
879 int locks_in_grace(struct net *);
880
881 /* that will die - we need it for nfs_lock_info */
882 #include <linux/nfs_fs_i.h>
883
884 /*
885 * struct file_lock represents a generic "file lock". It's used to represent
886 * POSIX byte range locks, BSD (flock) locks, and leases. It's important to
887 * note that the same struct is used to represent both a request for a lock and
888 * the lock itself, but the same object is never used for both.
889 *
890 * FIXME: should we create a separate "struct lock_request" to help distinguish
891 * these two uses?
892 *
893 * The i_flock list is ordered by:
894 *
895 * 1) lock type -- FL_LEASEs first, then FL_FLOCK, and finally FL_POSIX
896 * 2) lock owner
897 * 3) lock range start
898 * 4) lock range end
899 *
900 * Obviously, the last two criteria only matter for POSIX locks.
901 */
902 struct file_lock {
903 struct file_lock *fl_next; /* singly linked list for this inode */
904 struct hlist_node fl_link; /* node in global lists */
905 struct list_head fl_block; /* circular list of blocked processes */
906 fl_owner_t fl_owner;
907 unsigned int fl_flags;
908 unsigned char fl_type;
909 unsigned int fl_pid;
910 int fl_link_cpu; /* what cpu's list is this on? */
911 struct pid *fl_nspid;
912 wait_queue_head_t fl_wait;
913 struct file *fl_file;
914 loff_t fl_start;
915 loff_t fl_end;
916
917 struct fasync_struct * fl_fasync; /* for lease break notifications */
918 /* for lease breaks: */
919 unsigned long fl_break_time;
920 unsigned long fl_downgrade_time;
921
922 const struct file_lock_operations *fl_ops; /* Callbacks for filesystems */
923 const struct lock_manager_operations *fl_lmops; /* Callbacks for lockmanagers */
924 union {
925 struct nfs_lock_info nfs_fl;
926 struct nfs4_lock_info nfs4_fl;
927 struct {
928 struct list_head link; /* link in AFS vnode's pending_locks list */
929 int state; /* state of grant or error if -ve */
930 } afs;
931 } fl_u;
932 };
933
934 /* The following constant reflects the upper bound of the file/locking space */
935 #ifndef OFFSET_MAX
936 #define INT_LIMIT(x) (~((x)1 << (sizeof(x)*8 - 1)))
937 #define OFFSET_MAX INT_LIMIT(loff_t)
938 #define OFFT_OFFSET_MAX INT_LIMIT(off_t)
939 #endif
940
941 #include <linux/fcntl.h>
942
943 extern void send_sigio(struct fown_struct *fown, int fd, int band);
944
945 #ifdef CONFIG_FILE_LOCKING
946 extern int fcntl_getlk(struct file *, unsigned int, struct flock __user *);
947 extern int fcntl_setlk(unsigned int, struct file *, unsigned int,
948 struct flock __user *);
949
950 #if BITS_PER_LONG == 32
951 extern int fcntl_getlk64(struct file *, unsigned int, struct flock64 __user *);
952 extern int fcntl_setlk64(unsigned int, struct file *, unsigned int,
953 struct flock64 __user *);
954 #endif
955
956 extern int fcntl_setlease(unsigned int fd, struct file *filp, long arg);
957 extern int fcntl_getlease(struct file *filp);
958
959 /* fs/locks.c */
960 void locks_free_lock(struct file_lock *fl);
961 extern void locks_init_lock(struct file_lock *);
962 extern struct file_lock * locks_alloc_lock(void);
963 extern void locks_copy_lock(struct file_lock *, struct file_lock *);
964 extern void locks_copy_conflock(struct file_lock *, struct file_lock *);
965 extern void locks_remove_posix(struct file *, fl_owner_t);
966 extern void locks_remove_file(struct file *);
967 extern void locks_release_private(struct file_lock *);
968 extern void posix_test_lock(struct file *, struct file_lock *);
969 extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *);
970 extern int posix_lock_file_wait(struct file *, struct file_lock *);
971 extern int posix_unblock_lock(struct file_lock *);
972 extern int vfs_test_lock(struct file *, struct file_lock *);
973 extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *);
974 extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
975 extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl);
976 extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type);
977 extern void lease_get_mtime(struct inode *, struct timespec *time);
978 extern int generic_setlease(struct file *, long, struct file_lock **, void **priv);
979 extern int vfs_setlease(struct file *, long, struct file_lock **, void **);
980 extern int lease_modify(struct file_lock **, int, struct list_head *);
981 #else /* !CONFIG_FILE_LOCKING */
982 static inline int fcntl_getlk(struct file *file, unsigned int cmd,
983 struct flock __user *user)
984 {
985 return -EINVAL;
986 }
987
988 static inline int fcntl_setlk(unsigned int fd, struct file *file,
989 unsigned int cmd, struct flock __user *user)
990 {
991 return -EACCES;
992 }
993
994 #if BITS_PER_LONG == 32
995 static inline int fcntl_getlk64(struct file *file, unsigned int cmd,
996 struct flock64 __user *user)
997 {
998 return -EINVAL;
999 }
1000
1001 static inline int fcntl_setlk64(unsigned int fd, struct file *file,
1002 unsigned int cmd, struct flock64 __user *user)
1003 {
1004 return -EACCES;
1005 }
1006 #endif
1007 static inline int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1008 {
1009 return -EINVAL;
1010 }
1011
1012 static inline int fcntl_getlease(struct file *filp)
1013 {
1014 return F_UNLCK;
1015 }
1016
1017 static inline void locks_init_lock(struct file_lock *fl)
1018 {
1019 return;
1020 }
1021
1022 static inline void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
1023 {
1024 return;
1025 }
1026
1027 static inline void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
1028 {
1029 return;
1030 }
1031
1032 static inline void locks_remove_posix(struct file *filp, fl_owner_t owner)
1033 {
1034 return;
1035 }
1036
1037 static inline void locks_remove_file(struct file *filp)
1038 {
1039 return;
1040 }
1041
1042 static inline void posix_test_lock(struct file *filp, struct file_lock *fl)
1043 {
1044 return;
1045 }
1046
1047 static inline int posix_lock_file(struct file *filp, struct file_lock *fl,
1048 struct file_lock *conflock)
1049 {
1050 return -ENOLCK;
1051 }
1052
1053 static inline int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
1054 {
1055 return -ENOLCK;
1056 }
1057
1058 static inline int posix_unblock_lock(struct file_lock *waiter)
1059 {
1060 return -ENOENT;
1061 }
1062
1063 static inline int vfs_test_lock(struct file *filp, struct file_lock *fl)
1064 {
1065 return 0;
1066 }
1067
1068 static inline int vfs_lock_file(struct file *filp, unsigned int cmd,
1069 struct file_lock *fl, struct file_lock *conf)
1070 {
1071 return -ENOLCK;
1072 }
1073
1074 static inline int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
1075 {
1076 return 0;
1077 }
1078
1079 static inline int flock_lock_file_wait(struct file *filp,
1080 struct file_lock *request)
1081 {
1082 return -ENOLCK;
1083 }
1084
1085 static inline int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
1086 {
1087 return 0;
1088 }
1089
1090 static inline void lease_get_mtime(struct inode *inode, struct timespec *time)
1091 {
1092 return;
1093 }
1094
1095 static inline int generic_setlease(struct file *filp, long arg,
1096 struct file_lock **flp, void **priv)
1097 {
1098 return -EINVAL;
1099 }
1100
1101 static inline int vfs_setlease(struct file *filp, long arg,
1102 struct file_lock **lease, void **priv)
1103 {
1104 return -EINVAL;
1105 }
1106
1107 static inline int lease_modify(struct file_lock **before, int arg,
1108 struct list_head *dispose)
1109 {
1110 return -EINVAL;
1111 }
1112 #endif /* !CONFIG_FILE_LOCKING */
1113
1114
1115 struct fasync_struct {
1116 spinlock_t fa_lock;
1117 int magic;
1118 int fa_fd;
1119 struct fasync_struct *fa_next; /* singly linked list */
1120 struct file *fa_file;
1121 struct rcu_head fa_rcu;
1122 };
1123
1124 #define FASYNC_MAGIC 0x4601
1125
1126 /* SMP safe fasync helpers: */
1127 extern int fasync_helper(int, struct file *, int, struct fasync_struct **);
1128 extern struct fasync_struct *fasync_insert_entry(int, struct file *, struct fasync_struct **, struct fasync_struct *);
1129 extern int fasync_remove_entry(struct file *, struct fasync_struct **);
1130 extern struct fasync_struct *fasync_alloc(void);
1131 extern void fasync_free(struct fasync_struct *);
1132
1133 /* can be called from interrupts */
1134 extern void kill_fasync(struct fasync_struct **, int, int);
1135
1136 extern void __f_setown(struct file *filp, struct pid *, enum pid_type, int force);
1137 extern void f_setown(struct file *filp, unsigned long arg, int force);
1138 extern void f_delown(struct file *filp);
1139 extern pid_t f_getown(struct file *filp);
1140 extern int send_sigurg(struct fown_struct *fown);
1141
1142 struct mm_struct;
1143
1144 /*
1145 * Umount options
1146 */
1147
1148 #define MNT_FORCE 0x00000001 /* Attempt to forcibily umount */
1149 #define MNT_DETACH 0x00000002 /* Just detach from the tree */
1150 #define MNT_EXPIRE 0x00000004 /* Mark for expiry */
1151 #define UMOUNT_NOFOLLOW 0x00000008 /* Don't follow symlink on umount */
1152 #define UMOUNT_UNUSED 0x80000000 /* Flag guaranteed to be unused */
1153
1154 extern struct list_head super_blocks;
1155 extern spinlock_t sb_lock;
1156
1157 /* Possible states of 'frozen' field */
1158 enum {
1159 SB_UNFROZEN = 0, /* FS is unfrozen */
1160 SB_FREEZE_WRITE = 1, /* Writes, dir ops, ioctls frozen */
1161 SB_FREEZE_PAGEFAULT = 2, /* Page faults stopped as well */
1162 SB_FREEZE_FS = 3, /* For internal FS use (e.g. to stop
1163 * internal threads if needed) */
1164 SB_FREEZE_COMPLETE = 4, /* ->freeze_fs finished successfully */
1165 };
1166
1167 #define SB_FREEZE_LEVELS (SB_FREEZE_COMPLETE - 1)
1168
1169 struct sb_writers {
1170 /* Counters for counting writers at each level */
1171 struct percpu_counter counter[SB_FREEZE_LEVELS];
1172 wait_queue_head_t wait; /* queue for waiting for
1173 writers / faults to finish */
1174 int frozen; /* Is sb frozen? */
1175 wait_queue_head_t wait_unfrozen; /* queue for waiting for
1176 sb to be thawed */
1177 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1178 struct lockdep_map lock_map[SB_FREEZE_LEVELS];
1179 #endif
1180 };
1181
1182 struct super_block {
1183 struct list_head s_list; /* Keep this first */
1184 dev_t s_dev; /* search index; _not_ kdev_t */
1185 unsigned char s_blocksize_bits;
1186 unsigned long s_blocksize;
1187 loff_t s_maxbytes; /* Max file size */
1188 struct file_system_type *s_type;
1189 const struct super_operations *s_op;
1190 const struct dquot_operations *dq_op;
1191 const struct quotactl_ops *s_qcop;
1192 const struct export_operations *s_export_op;
1193 unsigned long s_flags;
1194 unsigned long s_magic;
1195 struct dentry *s_root;
1196 struct rw_semaphore s_umount;
1197 int s_count;
1198 atomic_t s_active;
1199 #ifdef CONFIG_SECURITY
1200 void *s_security;
1201 #endif
1202 const struct xattr_handler **s_xattr;
1203
1204 struct list_head s_inodes; /* all inodes */
1205 struct hlist_bl_head s_anon; /* anonymous dentries for (nfs) exporting */
1206 struct list_head s_mounts; /* list of mounts; _not_ for fs use */
1207 struct block_device *s_bdev;
1208 struct backing_dev_info *s_bdi;
1209 struct mtd_info *s_mtd;
1210 struct hlist_node s_instances;
1211 struct quota_info s_dquot; /* Diskquota specific options */
1212
1213 struct sb_writers s_writers;
1214
1215 char s_id[32]; /* Informational name */
1216 u8 s_uuid[16]; /* UUID */
1217
1218 void *s_fs_info; /* Filesystem private info */
1219 unsigned int s_max_links;
1220 fmode_t s_mode;
1221
1222 /* Granularity of c/m/atime in ns.
1223 Cannot be worse than a second */
1224 u32 s_time_gran;
1225
1226 /*
1227 * The next field is for VFS *only*. No filesystems have any business
1228 * even looking at it. You had been warned.
1229 */
1230 struct mutex s_vfs_rename_mutex; /* Kludge */
1231
1232 /*
1233 * Filesystem subtype. If non-empty the filesystem type field
1234 * in /proc/mounts will be "type.subtype"
1235 */
1236 char *s_subtype;
1237
1238 /*
1239 * Saved mount options for lazy filesystems using
1240 * generic_show_options()
1241 */
1242 char __rcu *s_options;
1243 const struct dentry_operations *s_d_op; /* default d_op for dentries */
1244
1245 /*
1246 * Saved pool identifier for cleancache (-1 means none)
1247 */
1248 int cleancache_poolid;
1249
1250 struct shrinker s_shrink; /* per-sb shrinker handle */
1251
1252 /* Number of inodes with nlink == 0 but still referenced */
1253 atomic_long_t s_remove_count;
1254
1255 /* Being remounted read-only */
1256 int s_readonly_remount;
1257
1258 /* AIO completions deferred from interrupt context */
1259 struct workqueue_struct *s_dio_done_wq;
1260 struct hlist_head s_pins;
1261
1262 /*
1263 * Keep the lru lists last in the structure so they always sit on their
1264 * own individual cachelines.
1265 */
1266 struct list_lru s_dentry_lru ____cacheline_aligned_in_smp;
1267 struct list_lru s_inode_lru ____cacheline_aligned_in_smp;
1268 struct rcu_head rcu;
1269 };
1270
1271 extern struct timespec current_fs_time(struct super_block *sb);
1272
1273 /*
1274 * Snapshotting support.
1275 */
1276
1277 void __sb_end_write(struct super_block *sb, int level);
1278 int __sb_start_write(struct super_block *sb, int level, bool wait);
1279
1280 /**
1281 * sb_end_write - drop write access to a superblock
1282 * @sb: the super we wrote to
1283 *
1284 * Decrement number of writers to the filesystem. Wake up possible waiters
1285 * wanting to freeze the filesystem.
1286 */
1287 static inline void sb_end_write(struct super_block *sb)
1288 {
1289 __sb_end_write(sb, SB_FREEZE_WRITE);
1290 }
1291
1292 /**
1293 * sb_end_pagefault - drop write access to a superblock from a page fault
1294 * @sb: the super we wrote to
1295 *
1296 * Decrement number of processes handling write page fault to the filesystem.
1297 * Wake up possible waiters wanting to freeze the filesystem.
1298 */
1299 static inline void sb_end_pagefault(struct super_block *sb)
1300 {
1301 __sb_end_write(sb, SB_FREEZE_PAGEFAULT);
1302 }
1303
1304 /**
1305 * sb_end_intwrite - drop write access to a superblock for internal fs purposes
1306 * @sb: the super we wrote to
1307 *
1308 * Decrement fs-internal number of writers to the filesystem. Wake up possible
1309 * waiters wanting to freeze the filesystem.
1310 */
1311 static inline void sb_end_intwrite(struct super_block *sb)
1312 {
1313 __sb_end_write(sb, SB_FREEZE_FS);
1314 }
1315
1316 /**
1317 * sb_start_write - get write access to a superblock
1318 * @sb: the super we write to
1319 *
1320 * When a process wants to write data or metadata to a file system (i.e. dirty
1321 * a page or an inode), it should embed the operation in a sb_start_write() -
1322 * sb_end_write() pair to get exclusion against file system freezing. This
1323 * function increments number of writers preventing freezing. If the file
1324 * system is already frozen, the function waits until the file system is
1325 * thawed.
1326 *
1327 * Since freeze protection behaves as a lock, users have to preserve
1328 * ordering of freeze protection and other filesystem locks. Generally,
1329 * freeze protection should be the outermost lock. In particular, we have:
1330 *
1331 * sb_start_write
1332 * -> i_mutex (write path, truncate, directory ops, ...)
1333 * -> s_umount (freeze_super, thaw_super)
1334 */
1335 static inline void sb_start_write(struct super_block *sb)
1336 {
1337 __sb_start_write(sb, SB_FREEZE_WRITE, true);
1338 }
1339
1340 static inline int sb_start_write_trylock(struct super_block *sb)
1341 {
1342 return __sb_start_write(sb, SB_FREEZE_WRITE, false);
1343 }
1344
1345 /**
1346 * sb_start_pagefault - get write access to a superblock from a page fault
1347 * @sb: the super we write to
1348 *
1349 * When a process starts handling write page fault, it should embed the
1350 * operation into sb_start_pagefault() - sb_end_pagefault() pair to get
1351 * exclusion against file system freezing. This is needed since the page fault
1352 * is going to dirty a page. This function increments number of running page
1353 * faults preventing freezing. If the file system is already frozen, the
1354 * function waits until the file system is thawed.
1355 *
1356 * Since page fault freeze protection behaves as a lock, users have to preserve
1357 * ordering of freeze protection and other filesystem locks. It is advised to
1358 * put sb_start_pagefault() close to mmap_sem in lock ordering. Page fault
1359 * handling code implies lock dependency:
1360 *
1361 * mmap_sem
1362 * -> sb_start_pagefault
1363 */
1364 static inline void sb_start_pagefault(struct super_block *sb)
1365 {
1366 __sb_start_write(sb, SB_FREEZE_PAGEFAULT, true);
1367 }
1368
1369 /*
1370 * sb_start_intwrite - get write access to a superblock for internal fs purposes
1371 * @sb: the super we write to
1372 *
1373 * This is the third level of protection against filesystem freezing. It is
1374 * free for use by a filesystem. The only requirement is that it must rank
1375 * below sb_start_pagefault.
1376 *
1377 * For example filesystem can call sb_start_intwrite() when starting a
1378 * transaction which somewhat eases handling of freezing for internal sources
1379 * of filesystem changes (internal fs threads, discarding preallocation on file
1380 * close, etc.).
1381 */
1382 static inline void sb_start_intwrite(struct super_block *sb)
1383 {
1384 __sb_start_write(sb, SB_FREEZE_FS, true);
1385 }
1386
1387
1388 extern bool inode_owner_or_capable(const struct inode *inode);
1389
1390 /*
1391 * VFS helper functions..
1392 */
1393 extern int vfs_create(struct inode *, struct dentry *, umode_t, bool);
1394 extern int vfs_mkdir(struct inode *, struct dentry *, umode_t);
1395 extern int vfs_mknod(struct inode *, struct dentry *, umode_t, dev_t);
1396 extern int vfs_symlink(struct inode *, struct dentry *, const char *);
1397 extern int vfs_link(struct dentry *, struct inode *, struct dentry *, struct inode **);
1398 extern int vfs_rmdir(struct inode *, struct dentry *);
1399 extern int vfs_unlink(struct inode *, struct dentry *, struct inode **);
1400 extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *, struct inode **, unsigned int);
1401
1402 /*
1403 * VFS dentry helper functions.
1404 */
1405 extern void dentry_unhash(struct dentry *dentry);
1406
1407 /*
1408 * VFS file helper functions.
1409 */
1410 extern void inode_init_owner(struct inode *inode, const struct inode *dir,
1411 umode_t mode);
1412 /*
1413 * VFS FS_IOC_FIEMAP helper definitions.
1414 */
1415 struct fiemap_extent_info {
1416 unsigned int fi_flags; /* Flags as passed from user */
1417 unsigned int fi_extents_mapped; /* Number of mapped extents */
1418 unsigned int fi_extents_max; /* Size of fiemap_extent array */
1419 struct fiemap_extent __user *fi_extents_start; /* Start of
1420 fiemap_extent array */
1421 };
1422 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
1423 u64 phys, u64 len, u32 flags);
1424 int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags);
1425
1426 /*
1427 * File types
1428 *
1429 * NOTE! These match bits 12..15 of stat.st_mode
1430 * (ie "(i_mode >> 12) & 15").
1431 */
1432 #define DT_UNKNOWN 0
1433 #define DT_FIFO 1
1434 #define DT_CHR 2
1435 #define DT_DIR 4
1436 #define DT_BLK 6
1437 #define DT_REG 8
1438 #define DT_LNK 10
1439 #define DT_SOCK 12
1440 #define DT_WHT 14
1441
1442 /*
1443 * This is the "filldir" function type, used by readdir() to let
1444 * the kernel specify what kind of dirent layout it wants to have.
1445 * This allows the kernel to read directories into kernel space or
1446 * to have different dirent layouts depending on the binary type.
1447 */
1448 typedef int (*filldir_t)(void *, const char *, int, loff_t, u64, unsigned);
1449 struct dir_context {
1450 const filldir_t actor;
1451 loff_t pos;
1452 };
1453
1454 struct block_device_operations;
1455
1456 /* These macros are for out of kernel modules to test that
1457 * the kernel supports the unlocked_ioctl and compat_ioctl
1458 * fields in struct file_operations. */
1459 #define HAVE_COMPAT_IOCTL 1
1460 #define HAVE_UNLOCKED_IOCTL 1
1461
1462 struct iov_iter;
1463
1464 struct file_operations {
1465 struct module *owner;
1466 loff_t (*llseek) (struct file *, loff_t, int);
1467 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
1468 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
1469 ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
1470 ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
1471 ssize_t (*read_iter) (struct kiocb *, struct iov_iter *);
1472 ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
1473 int (*iterate) (struct file *, struct dir_context *);
1474 unsigned int (*poll) (struct file *, struct poll_table_struct *);
1475 long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
1476 long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
1477 int (*mmap) (struct file *, struct vm_area_struct *);
1478 int (*open) (struct inode *, struct file *);
1479 int (*flush) (struct file *, fl_owner_t id);
1480 int (*release) (struct inode *, struct file *);
1481 int (*fsync) (struct file *, loff_t, loff_t, int datasync);
1482 int (*aio_fsync) (struct kiocb *, int datasync);
1483 int (*fasync) (int, struct file *, int);
1484 int (*lock) (struct file *, int, struct file_lock *);
1485 ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);
1486 unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
1487 int (*check_flags)(int);
1488 int (*flock) (struct file *, int, struct file_lock *);
1489 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
1490 ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
1491 int (*setlease)(struct file *, long, struct file_lock **, void **);
1492 long (*fallocate)(struct file *file, int mode, loff_t offset,
1493 loff_t len);
1494 int (*show_fdinfo)(struct seq_file *m, struct file *f);
1495 };
1496
1497 struct inode_operations {
1498 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
1499 void * (*follow_link) (struct dentry *, struct nameidata *);
1500 int (*permission) (struct inode *, int);
1501 struct posix_acl * (*get_acl)(struct inode *, int);
1502
1503 int (*readlink) (struct dentry *, char __user *,int);
1504 void (*put_link) (struct dentry *, struct nameidata *, void *);
1505
1506 int (*create) (struct inode *,struct dentry *, umode_t, bool);
1507 int (*link) (struct dentry *,struct inode *,struct dentry *);
1508 int (*unlink) (struct inode *,struct dentry *);
1509 int (*symlink) (struct inode *,struct dentry *,const char *);
1510 int (*mkdir) (struct inode *,struct dentry *,umode_t);
1511 int (*rmdir) (struct inode *,struct dentry *);
1512 int (*mknod) (struct inode *,struct dentry *,umode_t,dev_t);
1513 int (*rename) (struct inode *, struct dentry *,
1514 struct inode *, struct dentry *);
1515 int (*rename2) (struct inode *, struct dentry *,
1516 struct inode *, struct dentry *, unsigned int);
1517 int (*setattr) (struct dentry *, struct iattr *);
1518 int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *);
1519 int (*setxattr) (struct dentry *, const char *,const void *,size_t,int);
1520 ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
1521 ssize_t (*listxattr) (struct dentry *, char *, size_t);
1522 int (*removexattr) (struct dentry *, const char *);
1523 int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
1524 u64 len);
1525 int (*update_time)(struct inode *, struct timespec *, int);
1526 int (*atomic_open)(struct inode *, struct dentry *,
1527 struct file *, unsigned open_flag,
1528 umode_t create_mode, int *opened);
1529 int (*tmpfile) (struct inode *, struct dentry *, umode_t);
1530 int (*set_acl)(struct inode *, struct posix_acl *, int);
1531 } ____cacheline_aligned;
1532
1533 ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
1534 unsigned long nr_segs, unsigned long fast_segs,
1535 struct iovec *fast_pointer,
1536 struct iovec **ret_pointer);
1537
1538 extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
1539 extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *);
1540 extern ssize_t vfs_readv(struct file *, const struct iovec __user *,
1541 unsigned long, loff_t *);
1542 extern ssize_t vfs_writev(struct file *, const struct iovec __user *,
1543 unsigned long, loff_t *);
1544
1545 struct super_operations {
1546 struct inode *(*alloc_inode)(struct super_block *sb);
1547 void (*destroy_inode)(struct inode *);
1548
1549 void (*dirty_inode) (struct inode *, int flags);
1550 int (*write_inode) (struct inode *, struct writeback_control *wbc);
1551 int (*drop_inode) (struct inode *);
1552 void (*evict_inode) (struct inode *);
1553 void (*put_super) (struct super_block *);
1554 int (*sync_fs)(struct super_block *sb, int wait);
1555 int (*freeze_fs) (struct super_block *);
1556 int (*unfreeze_fs) (struct super_block *);
1557 int (*statfs) (struct dentry *, struct kstatfs *);
1558 int (*remount_fs) (struct super_block *, int *, char *);
1559 void (*umount_begin) (struct super_block *);
1560
1561 int (*show_options)(struct seq_file *, struct dentry *);
1562 int (*show_devname)(struct seq_file *, struct dentry *);
1563 int (*show_path)(struct seq_file *, struct dentry *);
1564 int (*show_stats)(struct seq_file *, struct dentry *);
1565 #ifdef CONFIG_QUOTA
1566 ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
1567 ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
1568 #endif
1569 int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
1570 long (*nr_cached_objects)(struct super_block *, int);
1571 long (*free_cached_objects)(struct super_block *, long, int);
1572 };
1573
1574 /*
1575 * Inode flags - they have no relation to superblock flags now
1576 */
1577 #define S_SYNC 1 /* Writes are synced at once */
1578 #define S_NOATIME 2 /* Do not update access times */
1579 #define S_APPEND 4 /* Append-only file */
1580 #define S_IMMUTABLE 8 /* Immutable file */
1581 #define S_DEAD 16 /* removed, but still open directory */
1582 #define S_NOQUOTA 32 /* Inode is not counted to quota */
1583 #define S_DIRSYNC 64 /* Directory modifications are synchronous */
1584 #define S_NOCMTIME 128 /* Do not update file c/mtime */
1585 #define S_SWAPFILE 256 /* Do not truncate: swapon got its bmaps */
1586 #define S_PRIVATE 512 /* Inode is fs-internal */
1587 #define S_IMA 1024 /* Inode has an associated IMA struct */
1588 #define S_AUTOMOUNT 2048 /* Automount/referral quasi-directory */
1589 #define S_NOSEC 4096 /* no suid or xattr security attributes */
1590
1591 /*
1592 * Note that nosuid etc flags are inode-specific: setting some file-system
1593 * flags just means all the inodes inherit those flags by default. It might be
1594 * possible to override it selectively if you really wanted to with some
1595 * ioctl() that is not currently implemented.
1596 *
1597 * Exception: MS_RDONLY is always applied to the entire file system.
1598 *
1599 * Unfortunately, it is possible to change a filesystems flags with it mounted
1600 * with files in use. This means that all of the inodes will not have their
1601 * i_flags updated. Hence, i_flags no longer inherit the superblock mount
1602 * flags, so these have to be checked separately. -- rmk@arm.uk.linux.org
1603 */
1604 #define __IS_FLG(inode, flg) ((inode)->i_sb->s_flags & (flg))
1605
1606 #define IS_RDONLY(inode) ((inode)->i_sb->s_flags & MS_RDONLY)
1607 #define IS_SYNC(inode) (__IS_FLG(inode, MS_SYNCHRONOUS) || \
1608 ((inode)->i_flags & S_SYNC))
1609 #define IS_DIRSYNC(inode) (__IS_FLG(inode, MS_SYNCHRONOUS|MS_DIRSYNC) || \
1610 ((inode)->i_flags & (S_SYNC|S_DIRSYNC)))
1611 #define IS_MANDLOCK(inode) __IS_FLG(inode, MS_MANDLOCK)
1612 #define IS_NOATIME(inode) __IS_FLG(inode, MS_RDONLY|MS_NOATIME)
1613 #define IS_I_VERSION(inode) __IS_FLG(inode, MS_I_VERSION)
1614
1615 #define IS_NOQUOTA(inode) ((inode)->i_flags & S_NOQUOTA)
1616 #define IS_APPEND(inode) ((inode)->i_flags & S_APPEND)
1617 #define IS_IMMUTABLE(inode) ((inode)->i_flags & S_IMMUTABLE)
1618 #define IS_POSIXACL(inode) __IS_FLG(inode, MS_POSIXACL)
1619
1620 #define IS_DEADDIR(inode) ((inode)->i_flags & S_DEAD)
1621 #define IS_NOCMTIME(inode) ((inode)->i_flags & S_NOCMTIME)
1622 #define IS_SWAPFILE(inode) ((inode)->i_flags & S_SWAPFILE)
1623 #define IS_PRIVATE(inode) ((inode)->i_flags & S_PRIVATE)
1624 #define IS_IMA(inode) ((inode)->i_flags & S_IMA)
1625 #define IS_AUTOMOUNT(inode) ((inode)->i_flags & S_AUTOMOUNT)
1626 #define IS_NOSEC(inode) ((inode)->i_flags & S_NOSEC)
1627
1628 /*
1629 * Inode state bits. Protected by inode->i_lock
1630 *
1631 * Three bits determine the dirty state of the inode, I_DIRTY_SYNC,
1632 * I_DIRTY_DATASYNC and I_DIRTY_PAGES.
1633 *
1634 * Four bits define the lifetime of an inode. Initially, inodes are I_NEW,
1635 * until that flag is cleared. I_WILL_FREE, I_FREEING and I_CLEAR are set at
1636 * various stages of removing an inode.
1637 *
1638 * Two bits are used for locking and completion notification, I_NEW and I_SYNC.
1639 *
1640 * I_DIRTY_SYNC Inode is dirty, but doesn't have to be written on
1641 * fdatasync(). i_atime is the usual cause.
1642 * I_DIRTY_DATASYNC Data-related inode changes pending. We keep track of
1643 * these changes separately from I_DIRTY_SYNC so that we
1644 * don't have to write inode on fdatasync() when only
1645 * mtime has changed in it.
1646 * I_DIRTY_PAGES Inode has dirty pages. Inode itself may be clean.
1647 * I_NEW Serves as both a mutex and completion notification.
1648 * New inodes set I_NEW. If two processes both create
1649 * the same inode, one of them will release its inode and
1650 * wait for I_NEW to be released before returning.
1651 * Inodes in I_WILL_FREE, I_FREEING or I_CLEAR state can
1652 * also cause waiting on I_NEW, without I_NEW actually
1653 * being set. find_inode() uses this to prevent returning
1654 * nearly-dead inodes.
1655 * I_WILL_FREE Must be set when calling write_inode_now() if i_count
1656 * is zero. I_FREEING must be set when I_WILL_FREE is
1657 * cleared.
1658 * I_FREEING Set when inode is about to be freed but still has dirty
1659 * pages or buffers attached or the inode itself is still
1660 * dirty.
1661 * I_CLEAR Added by clear_inode(). In this state the inode is
1662 * clean and can be destroyed. Inode keeps I_FREEING.
1663 *
1664 * Inodes that are I_WILL_FREE, I_FREEING or I_CLEAR are
1665 * prohibited for many purposes. iget() must wait for
1666 * the inode to be completely released, then create it
1667 * anew. Other functions will just ignore such inodes,
1668 * if appropriate. I_NEW is used for waiting.
1669 *
1670 * I_SYNC Writeback of inode is running. The bit is set during
1671 * data writeback, and cleared with a wakeup on the bit
1672 * address once it is done. The bit is also used to pin
1673 * the inode in memory for flusher thread.
1674 *
1675 * I_REFERENCED Marks the inode as recently references on the LRU list.
1676 *
1677 * I_DIO_WAKEUP Never set. Only used as a key for wait_on_bit().
1678 *
1679 * Q: What is the difference between I_WILL_FREE and I_FREEING?
1680 */
1681 #define I_DIRTY_SYNC (1 << 0)
1682 #define I_DIRTY_DATASYNC (1 << 1)
1683 #define I_DIRTY_PAGES (1 << 2)
1684 #define __I_NEW 3
1685 #define I_NEW (1 << __I_NEW)
1686 #define I_WILL_FREE (1 << 4)
1687 #define I_FREEING (1 << 5)
1688 #define I_CLEAR (1 << 6)
1689 #define __I_SYNC 7
1690 #define I_SYNC (1 << __I_SYNC)
1691 #define I_REFERENCED (1 << 8)
1692 #define __I_DIO_WAKEUP 9
1693 #define I_DIO_WAKEUP (1 << I_DIO_WAKEUP)
1694 #define I_LINKABLE (1 << 10)
1695
1696 #define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES)
1697
1698 extern void __mark_inode_dirty(struct inode *, int);
1699 static inline void mark_inode_dirty(struct inode *inode)
1700 {
1701 __mark_inode_dirty(inode, I_DIRTY);
1702 }
1703
1704 static inline void mark_inode_dirty_sync(struct inode *inode)
1705 {
1706 __mark_inode_dirty(inode, I_DIRTY_SYNC);
1707 }
1708
1709 extern void inc_nlink(struct inode *inode);
1710 extern void drop_nlink(struct inode *inode);
1711 extern void clear_nlink(struct inode *inode);
1712 extern void set_nlink(struct inode *inode, unsigned int nlink);
1713
1714 static inline void inode_inc_link_count(struct inode *inode)
1715 {
1716 inc_nlink(inode);
1717 mark_inode_dirty(inode);
1718 }
1719
1720 static inline void inode_dec_link_count(struct inode *inode)
1721 {
1722 drop_nlink(inode);
1723 mark_inode_dirty(inode);
1724 }
1725
1726 /**
1727 * inode_inc_iversion - increments i_version
1728 * @inode: inode that need to be updated
1729 *
1730 * Every time the inode is modified, the i_version field will be incremented.
1731 * The filesystem has to be mounted with i_version flag
1732 */
1733
1734 static inline void inode_inc_iversion(struct inode *inode)
1735 {
1736 spin_lock(&inode->i_lock);
1737 inode->i_version++;
1738 spin_unlock(&inode->i_lock);
1739 }
1740
1741 enum file_time_flags {
1742 S_ATIME = 1,
1743 S_MTIME = 2,
1744 S_CTIME = 4,
1745 S_VERSION = 8,
1746 };
1747
1748 extern void touch_atime(const struct path *);
1749 static inline void file_accessed(struct file *file)
1750 {
1751 if (!(file->f_flags & O_NOATIME))
1752 touch_atime(&file->f_path);
1753 }
1754
1755 int sync_inode(struct inode *inode, struct writeback_control *wbc);
1756 int sync_inode_metadata(struct inode *inode, int wait);
1757
1758 struct file_system_type {
1759 const char *name;
1760 int fs_flags;
1761 #define FS_REQUIRES_DEV 1
1762 #define FS_BINARY_MOUNTDATA 2
1763 #define FS_HAS_SUBTYPE 4
1764 #define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */
1765 #define FS_USERNS_DEV_MOUNT 16 /* A userns mount does not imply MNT_NODEV */
1766 #define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */
1767 struct dentry *(*mount) (struct file_system_type *, int,
1768 const char *, void *);
1769 void (*kill_sb) (struct super_block *);
1770 struct module *owner;
1771 struct file_system_type * next;
1772 struct hlist_head fs_supers;
1773
1774 struct lock_class_key s_lock_key;
1775 struct lock_class_key s_umount_key;
1776 struct lock_class_key s_vfs_rename_key;
1777 struct lock_class_key s_writers_key[SB_FREEZE_LEVELS];
1778
1779 struct lock_class_key i_lock_key;
1780 struct lock_class_key i_mutex_key;
1781 struct lock_class_key i_mutex_dir_key;
1782 };
1783
1784 #define MODULE_ALIAS_FS(NAME) MODULE_ALIAS("fs-" NAME)
1785
1786 extern struct dentry *mount_ns(struct file_system_type *fs_type, int flags,
1787 void *data, int (*fill_super)(struct super_block *, void *, int));
1788 extern struct dentry *mount_bdev(struct file_system_type *fs_type,
1789 int flags, const char *dev_name, void *data,
1790 int (*fill_super)(struct super_block *, void *, int));
1791 extern struct dentry *mount_single(struct file_system_type *fs_type,
1792 int flags, void *data,
1793 int (*fill_super)(struct super_block *, void *, int));
1794 extern struct dentry *mount_nodev(struct file_system_type *fs_type,
1795 int flags, void *data,
1796 int (*fill_super)(struct super_block *, void *, int));
1797 extern struct dentry *mount_subtree(struct vfsmount *mnt, const char *path);
1798 void generic_shutdown_super(struct super_block *sb);
1799 void kill_block_super(struct super_block *sb);
1800 void kill_anon_super(struct super_block *sb);
1801 void kill_litter_super(struct super_block *sb);
1802 void deactivate_super(struct super_block *sb);
1803 void deactivate_locked_super(struct super_block *sb);
1804 int set_anon_super(struct super_block *s, void *data);
1805 int get_anon_bdev(dev_t *);
1806 void free_anon_bdev(dev_t);
1807 struct super_block *sget(struct file_system_type *type,
1808 int (*test)(struct super_block *,void *),
1809 int (*set)(struct super_block *,void *),
1810 int flags, void *data);
1811 extern struct dentry *mount_pseudo(struct file_system_type *, char *,
1812 const struct super_operations *ops,
1813 const struct dentry_operations *dops,
1814 unsigned long);
1815
1816 /* Alas, no aliases. Too much hassle with bringing module.h everywhere */
1817 #define fops_get(fops) \
1818 (((fops) && try_module_get((fops)->owner) ? (fops) : NULL))
1819 #define fops_put(fops) \
1820 do { if (fops) module_put((fops)->owner); } while(0)
1821 /*
1822 * This one is to be used *ONLY* from ->open() instances.
1823 * fops must be non-NULL, pinned down *and* module dependencies
1824 * should be sufficient to pin the caller down as well.
1825 */
1826 #define replace_fops(f, fops) \
1827 do { \
1828 struct file *__file = (f); \
1829 fops_put(__file->f_op); \
1830 BUG_ON(!(__file->f_op = (fops))); \
1831 } while(0)
1832
1833 extern int register_filesystem(struct file_system_type *);
1834 extern int unregister_filesystem(struct file_system_type *);
1835 extern struct vfsmount *kern_mount_data(struct file_system_type *, void *data);
1836 #define kern_mount(type) kern_mount_data(type, NULL)
1837 extern void kern_unmount(struct vfsmount *mnt);
1838 extern int may_umount_tree(struct vfsmount *);
1839 extern int may_umount(struct vfsmount *);
1840 extern long do_mount(const char *, const char __user *,
1841 const char *, unsigned long, void *);
1842 extern struct vfsmount *collect_mounts(struct path *);
1843 extern void drop_collected_mounts(struct vfsmount *);
1844 extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *,
1845 struct vfsmount *);
1846 extern int vfs_statfs(struct path *, struct kstatfs *);
1847 extern int user_statfs(const char __user *, struct kstatfs *);
1848 extern int fd_statfs(int, struct kstatfs *);
1849 extern int vfs_ustat(dev_t, struct kstatfs *);
1850 extern int freeze_super(struct super_block *super);
1851 extern int thaw_super(struct super_block *super);
1852 extern bool our_mnt(struct vfsmount *mnt);
1853 extern bool fs_fully_visible(struct file_system_type *);
1854
1855 extern int current_umask(void);
1856
1857 extern void ihold(struct inode * inode);
1858 extern void iput(struct inode *);
1859
1860 static inline struct inode *file_inode(const struct file *f)
1861 {
1862 return f->f_inode;
1863 }
1864
1865 /* /sys/fs */
1866 extern struct kobject *fs_kobj;
1867
1868 #define MAX_RW_COUNT (INT_MAX & PAGE_CACHE_MASK)
1869
1870 #define FLOCK_VERIFY_READ 1
1871 #define FLOCK_VERIFY_WRITE 2
1872
1873 #ifdef CONFIG_FILE_LOCKING
1874 extern int locks_mandatory_locked(struct file *);
1875 extern int locks_mandatory_area(int, struct inode *, struct file *, loff_t, size_t);
1876
1877 /*
1878 * Candidates for mandatory locking have the setgid bit set
1879 * but no group execute bit - an otherwise meaningless combination.
1880 */
1881
1882 static inline int __mandatory_lock(struct inode *ino)
1883 {
1884 return (ino->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID;
1885 }
1886
1887 /*
1888 * ... and these candidates should be on MS_MANDLOCK mounted fs,
1889 * otherwise these will be advisory locks
1890 */
1891
1892 static inline int mandatory_lock(struct inode *ino)
1893 {
1894 return IS_MANDLOCK(ino) && __mandatory_lock(ino);
1895 }
1896
1897 static inline int locks_verify_locked(struct file *file)
1898 {
1899 if (mandatory_lock(file_inode(file)))
1900 return locks_mandatory_locked(file);
1901 return 0;
1902 }
1903
1904 static inline int locks_verify_truncate(struct inode *inode,
1905 struct file *filp,
1906 loff_t size)
1907 {
1908 if (inode->i_flock && mandatory_lock(inode))
1909 return locks_mandatory_area(
1910 FLOCK_VERIFY_WRITE, inode, filp,
1911 size < inode->i_size ? size : inode->i_size,
1912 (size < inode->i_size ? inode->i_size - size
1913 : size - inode->i_size)
1914 );
1915 return 0;
1916 }
1917
1918 static inline int break_lease(struct inode *inode, unsigned int mode)
1919 {
1920 /*
1921 * Since this check is lockless, we must ensure that any refcounts
1922 * taken are done before checking inode->i_flock. Otherwise, we could
1923 * end up racing with tasks trying to set a new lease on this file.
1924 */
1925 smp_mb();
1926 if (inode->i_flock)
1927 return __break_lease(inode, mode, FL_LEASE);
1928 return 0;
1929 }
1930
1931 static inline int break_deleg(struct inode *inode, unsigned int mode)
1932 {
1933 /*
1934 * Since this check is lockless, we must ensure that any refcounts
1935 * taken are done before checking inode->i_flock. Otherwise, we could
1936 * end up racing with tasks trying to set a new lease on this file.
1937 */
1938 smp_mb();
1939 if (inode->i_flock)
1940 return __break_lease(inode, mode, FL_DELEG);
1941 return 0;
1942 }
1943
1944 static inline int try_break_deleg(struct inode *inode, struct inode **delegated_inode)
1945 {
1946 int ret;
1947
1948 ret = break_deleg(inode, O_WRONLY|O_NONBLOCK);
1949 if (ret == -EWOULDBLOCK && delegated_inode) {
1950 *delegated_inode = inode;
1951 ihold(inode);
1952 }
1953 return ret;
1954 }
1955
1956 static inline int break_deleg_wait(struct inode **delegated_inode)
1957 {
1958 int ret;
1959
1960 ret = break_deleg(*delegated_inode, O_WRONLY);
1961 iput(*delegated_inode);
1962 *delegated_inode = NULL;
1963 return ret;
1964 }
1965
1966 #else /* !CONFIG_FILE_LOCKING */
1967 static inline int locks_mandatory_locked(struct file *file)
1968 {
1969 return 0;
1970 }
1971
1972 static inline int locks_mandatory_area(int rw, struct inode *inode,
1973 struct file *filp, loff_t offset,
1974 size_t count)
1975 {
1976 return 0;
1977 }
1978
1979 static inline int __mandatory_lock(struct inode *inode)
1980 {
1981 return 0;
1982 }
1983
1984 static inline int mandatory_lock(struct inode *inode)
1985 {
1986 return 0;
1987 }
1988
1989 static inline int locks_verify_locked(struct file *file)
1990 {
1991 return 0;
1992 }
1993
1994 static inline int locks_verify_truncate(struct inode *inode, struct file *filp,
1995 size_t size)
1996 {
1997 return 0;
1998 }
1999
2000 static inline int break_lease(struct inode *inode, unsigned int mode)
2001 {
2002 return 0;
2003 }
2004
2005 static inline int break_deleg(struct inode *inode, unsigned int mode)
2006 {
2007 return 0;
2008 }
2009
2010 static inline int try_break_deleg(struct inode *inode, struct inode **delegated_inode)
2011 {
2012 return 0;
2013 }
2014
2015 static inline int break_deleg_wait(struct inode **delegated_inode)
2016 {
2017 BUG();
2018 return 0;
2019 }
2020
2021 #endif /* CONFIG_FILE_LOCKING */
2022
2023 /* fs/open.c */
2024 struct audit_names;
2025 struct filename {
2026 const char *name; /* pointer to actual string */
2027 const __user char *uptr; /* original userland pointer */
2028 struct audit_names *aname;
2029 bool separate; /* should "name" be freed? */
2030 };
2031
2032 extern long vfs_truncate(struct path *, loff_t);
2033 extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs,
2034 struct file *filp);
2035 extern int do_fallocate(struct file *file, int mode, loff_t offset,
2036 loff_t len);
2037 extern long do_sys_open(int dfd, const char __user *filename, int flags,
2038 umode_t mode);
2039 extern struct file *file_open_name(struct filename *, int, umode_t);
2040 extern struct file *filp_open(const char *, int, umode_t);
2041 extern struct file *file_open_root(struct dentry *, struct vfsmount *,
2042 const char *, int);
2043 extern struct file * dentry_open(const struct path *, int, const struct cred *);
2044 extern int filp_close(struct file *, fl_owner_t id);
2045
2046 extern struct filename *getname(const char __user *);
2047 extern struct filename *getname_kernel(const char *);
2048
2049 enum {
2050 FILE_CREATED = 1,
2051 FILE_OPENED = 2
2052 };
2053 extern int finish_open(struct file *file, struct dentry *dentry,
2054 int (*open)(struct inode *, struct file *),
2055 int *opened);
2056 extern int finish_no_open(struct file *file, struct dentry *dentry);
2057
2058 /* fs/ioctl.c */
2059
2060 extern int ioctl_preallocate(struct file *filp, void __user *argp);
2061
2062 /* fs/dcache.c */
2063 extern void __init vfs_caches_init_early(void);
2064 extern void __init vfs_caches_init(unsigned long);
2065
2066 extern struct kmem_cache *names_cachep;
2067
2068 extern void final_putname(struct filename *name);
2069
2070 #define __getname() kmem_cache_alloc(names_cachep, GFP_KERNEL)
2071 #define __putname(name) kmem_cache_free(names_cachep, (void *)(name))
2072 #ifndef CONFIG_AUDITSYSCALL
2073 #define putname(name) final_putname(name)
2074 #else
2075 extern void putname(struct filename *name);
2076 #endif
2077
2078 #ifdef CONFIG_BLOCK
2079 extern int register_blkdev(unsigned int, const char *);
2080 extern void unregister_blkdev(unsigned int, const char *);
2081 extern struct block_device *bdget(dev_t);
2082 extern struct block_device *bdgrab(struct block_device *bdev);
2083 extern void bd_set_size(struct block_device *, loff_t size);
2084 extern void bd_forget(struct inode *inode);
2085 extern void bdput(struct block_device *);
2086 extern void invalidate_bdev(struct block_device *);
2087 extern void iterate_bdevs(void (*)(struct block_device *, void *), void *);
2088 extern int sync_blockdev(struct block_device *bdev);
2089 extern void kill_bdev(struct block_device *);
2090 extern struct super_block *freeze_bdev(struct block_device *);
2091 extern void emergency_thaw_all(void);
2092 extern int thaw_bdev(struct block_device *bdev, struct super_block *sb);
2093 extern int fsync_bdev(struct block_device *);
2094 extern int sb_is_blkdev_sb(struct super_block *sb);
2095 #else
2096 static inline void bd_forget(struct inode *inode) {}
2097 static inline int sync_blockdev(struct block_device *bdev) { return 0; }
2098 static inline void kill_bdev(struct block_device *bdev) {}
2099 static inline void invalidate_bdev(struct block_device *bdev) {}
2100
2101 static inline struct super_block *freeze_bdev(struct block_device *sb)
2102 {
2103 return NULL;
2104 }
2105
2106 static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb)
2107 {
2108 return 0;
2109 }
2110
2111 static inline void iterate_bdevs(void (*f)(struct block_device *, void *), void *arg)
2112 {
2113 }
2114
2115 static inline int sb_is_blkdev_sb(struct super_block *sb)
2116 {
2117 return 0;
2118 }
2119 #endif
2120 extern int sync_filesystem(struct super_block *);
2121 extern const struct file_operations def_blk_fops;
2122 extern const struct file_operations def_chr_fops;
2123 extern const struct file_operations bad_sock_fops;
2124 #ifdef CONFIG_BLOCK
2125 extern int ioctl_by_bdev(struct block_device *, unsigned, unsigned long);
2126 extern int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long);
2127 extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long);
2128 extern int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder);
2129 extern struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
2130 void *holder);
2131 extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode,
2132 void *holder);
2133 extern void blkdev_put(struct block_device *bdev, fmode_t mode);
2134 #ifdef CONFIG_SYSFS
2135 extern int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
2136 extern void bd_unlink_disk_holder(struct block_device *bdev,
2137 struct gendisk *disk);
2138 #else
2139 static inline int bd_link_disk_holder(struct block_device *bdev,
2140 struct gendisk *disk)
2141 {
2142 return 0;
2143 }
2144 static inline void bd_unlink_disk_holder(struct block_device *bdev,
2145 struct gendisk *disk)
2146 {
2147 }
2148 #endif
2149 #endif
2150
2151 /* fs/char_dev.c */
2152 #define CHRDEV_MAJOR_HASH_SIZE 255
2153 extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *);
2154 extern int register_chrdev_region(dev_t, unsigned, const char *);
2155 extern int __register_chrdev(unsigned int major, unsigned int baseminor,
2156 unsigned int count, const char *name,
2157 const struct file_operations *fops);
2158 extern void __unregister_chrdev(unsigned int major, unsigned int baseminor,
2159 unsigned int count, const char *name);
2160 extern void unregister_chrdev_region(dev_t, unsigned);
2161 extern void chrdev_show(struct seq_file *,off_t);
2162
2163 static inline int register_chrdev(unsigned int major, const char *name,
2164 const struct file_operations *fops)
2165 {
2166 return __register_chrdev(major, 0, 256, name, fops);
2167 }
2168
2169 static inline void unregister_chrdev(unsigned int major, const char *name)
2170 {
2171 __unregister_chrdev(major, 0, 256, name);
2172 }
2173
2174 /* fs/block_dev.c */
2175 #define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */
2176 #define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */
2177
2178 #ifdef CONFIG_BLOCK
2179 #define BLKDEV_MAJOR_HASH_SIZE 255
2180 extern const char *__bdevname(dev_t, char *buffer);
2181 extern const char *bdevname(struct block_device *bdev, char *buffer);
2182 extern struct block_device *lookup_bdev(const char *);
2183 extern void blkdev_show(struct seq_file *,off_t);
2184
2185 #else
2186 #define BLKDEV_MAJOR_HASH_SIZE 0
2187 #endif
2188
2189 extern void init_special_inode(struct inode *, umode_t, dev_t);
2190
2191 /* Invalid inode operations -- fs/bad_inode.c */
2192 extern void make_bad_inode(struct inode *);
2193 extern int is_bad_inode(struct inode *);
2194
2195 #ifdef CONFIG_BLOCK
2196 /*
2197 * return READ, READA, or WRITE
2198 */
2199 #define bio_rw(bio) ((bio)->bi_rw & (RW_MASK | RWA_MASK))
2200
2201 /*
2202 * return data direction, READ or WRITE
2203 */
2204 #define bio_data_dir(bio) ((bio)->bi_rw & 1)
2205
2206 extern void check_disk_size_change(struct gendisk *disk,
2207 struct block_device *bdev);
2208 extern int revalidate_disk(struct gendisk *);
2209 extern int check_disk_change(struct block_device *);
2210 extern int __invalidate_device(struct block_device *, bool);
2211 extern int invalidate_partition(struct gendisk *, int);
2212 #endif
2213 unsigned long invalidate_mapping_pages(struct address_space *mapping,
2214 pgoff_t start, pgoff_t end);
2215
2216 static inline void invalidate_remote_inode(struct inode *inode)
2217 {
2218 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
2219 S_ISLNK(inode->i_mode))
2220 invalidate_mapping_pages(inode->i_mapping, 0, -1);
2221 }
2222 extern int invalidate_inode_pages2(struct address_space *mapping);
2223 extern int invalidate_inode_pages2_range(struct address_space *mapping,
2224 pgoff_t start, pgoff_t end);
2225 extern int write_inode_now(struct inode *, int);
2226 extern int filemap_fdatawrite(struct address_space *);
2227 extern int filemap_flush(struct address_space *);
2228 extern int filemap_fdatawait(struct address_space *);
2229 extern int filemap_fdatawait_range(struct address_space *, loff_t lstart,
2230 loff_t lend);
2231 extern int filemap_write_and_wait(struct address_space *mapping);
2232 extern int filemap_write_and_wait_range(struct address_space *mapping,
2233 loff_t lstart, loff_t lend);
2234 extern int __filemap_fdatawrite_range(struct address_space *mapping,
2235 loff_t start, loff_t end, int sync_mode);
2236 extern int filemap_fdatawrite_range(struct address_space *mapping,
2237 loff_t start, loff_t end);
2238
2239 extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end,
2240 int datasync);
2241 extern int vfs_fsync(struct file *file, int datasync);
2242 static inline int generic_write_sync(struct file *file, loff_t pos, loff_t count)
2243 {
2244 if (!(file->f_flags & O_DSYNC) && !IS_SYNC(file->f_mapping->host))
2245 return 0;
2246 return vfs_fsync_range(file, pos, pos + count - 1,
2247 (file->f_flags & __O_SYNC) ? 0 : 1);
2248 }
2249 extern void emergency_sync(void);
2250 extern void emergency_remount(void);
2251 #ifdef CONFIG_BLOCK
2252 extern sector_t bmap(struct inode *, sector_t);
2253 #endif
2254 extern int notify_change(struct dentry *, struct iattr *, struct inode **);
2255 extern int inode_permission(struct inode *, int);
2256 extern int generic_permission(struct inode *, int);
2257
2258 static inline bool execute_ok(struct inode *inode)
2259 {
2260 return (inode->i_mode & S_IXUGO) || S_ISDIR(inode->i_mode);
2261 }
2262
2263 static inline void file_start_write(struct file *file)
2264 {
2265 if (!S_ISREG(file_inode(file)->i_mode))
2266 return;
2267 __sb_start_write(file_inode(file)->i_sb, SB_FREEZE_WRITE, true);
2268 }
2269
2270 static inline bool file_start_write_trylock(struct file *file)
2271 {
2272 if (!S_ISREG(file_inode(file)->i_mode))
2273 return true;
2274 return __sb_start_write(file_inode(file)->i_sb, SB_FREEZE_WRITE, false);
2275 }
2276
2277 static inline void file_end_write(struct file *file)
2278 {
2279 if (!S_ISREG(file_inode(file)->i_mode))
2280 return;
2281 __sb_end_write(file_inode(file)->i_sb, SB_FREEZE_WRITE);
2282 }
2283
2284 /*
2285 * get_write_access() gets write permission for a file.
2286 * put_write_access() releases this write permission.
2287 * This is used for regular files.
2288 * We cannot support write (and maybe mmap read-write shared) accesses and
2289 * MAP_DENYWRITE mmappings simultaneously. The i_writecount field of an inode
2290 * can have the following values:
2291 * 0: no writers, no VM_DENYWRITE mappings
2292 * < 0: (-i_writecount) vm_area_structs with VM_DENYWRITE set exist
2293 * > 0: (i_writecount) users are writing to the file.
2294 *
2295 * Normally we operate on that counter with atomic_{inc,dec} and it's safe
2296 * except for the cases where we don't hold i_writecount yet. Then we need to
2297 * use {get,deny}_write_access() - these functions check the sign and refuse
2298 * to do the change if sign is wrong.
2299 */
2300 static inline int get_write_access(struct inode *inode)
2301 {
2302 return atomic_inc_unless_negative(&inode->i_writecount) ? 0 : -ETXTBSY;
2303 }
2304 static inline int deny_write_access(struct file *file)
2305 {
2306 struct inode *inode = file_inode(file);
2307 return atomic_dec_unless_positive(&inode->i_writecount) ? 0 : -ETXTBSY;
2308 }
2309 static inline void put_write_access(struct inode * inode)
2310 {
2311 atomic_dec(&inode->i_writecount);
2312 }
2313 static inline void allow_write_access(struct file *file)
2314 {
2315 if (file)
2316 atomic_inc(&file_inode(file)->i_writecount);
2317 }
2318 static inline bool inode_is_open_for_write(const struct inode *inode)
2319 {
2320 return atomic_read(&inode->i_writecount) > 0;
2321 }
2322
2323 #ifdef CONFIG_IMA
2324 static inline void i_readcount_dec(struct inode *inode)
2325 {
2326 BUG_ON(!atomic_read(&inode->i_readcount));
2327 atomic_dec(&inode->i_readcount);
2328 }
2329 static inline void i_readcount_inc(struct inode *inode)
2330 {
2331 atomic_inc(&inode->i_readcount);
2332 }
2333 #else
2334 static inline void i_readcount_dec(struct inode *inode)
2335 {
2336 return;
2337 }
2338 static inline void i_readcount_inc(struct inode *inode)
2339 {
2340 return;
2341 }
2342 #endif
2343 extern int do_pipe_flags(int *, int);
2344
2345 extern int kernel_read(struct file *, loff_t, char *, unsigned long);
2346 extern ssize_t kernel_write(struct file *, const char *, size_t, loff_t);
2347 extern ssize_t __kernel_write(struct file *, const char *, size_t, loff_t *);
2348 extern struct file * open_exec(const char *);
2349
2350 /* fs/dcache.c -- generic fs support functions */
2351 extern int is_subdir(struct dentry *, struct dentry *);
2352 extern int path_is_under(struct path *, struct path *);
2353
2354 #include <linux/err.h>
2355
2356 /* needed for stackable file system support */
2357 extern loff_t default_llseek(struct file *file, loff_t offset, int whence);
2358
2359 extern loff_t vfs_llseek(struct file *file, loff_t offset, int whence);
2360
2361 extern int inode_init_always(struct super_block *, struct inode *);
2362 extern void inode_init_once(struct inode *);
2363 extern void address_space_init_once(struct address_space *mapping);
2364 extern struct inode * igrab(struct inode *);
2365 extern ino_t iunique(struct super_block *, ino_t);
2366 extern int inode_needs_sync(struct inode *inode);
2367 extern int generic_delete_inode(struct inode *inode);
2368 static inline int generic_drop_inode(struct inode *inode)
2369 {
2370 return !inode->i_nlink || inode_unhashed(inode);
2371 }
2372
2373 extern struct inode *ilookup5_nowait(struct super_block *sb,
2374 unsigned long hashval, int (*test)(struct inode *, void *),
2375 void *data);
2376 extern struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
2377 int (*test)(struct inode *, void *), void *data);
2378 extern struct inode *ilookup(struct super_block *sb, unsigned long ino);
2379
2380 extern struct inode * iget5_locked(struct super_block *, unsigned long, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *);
2381 extern struct inode * iget_locked(struct super_block *, unsigned long);
2382 extern int insert_inode_locked4(struct inode *, unsigned long, int (*test)(struct inode *, void *), void *);
2383 extern int insert_inode_locked(struct inode *);
2384 #ifdef CONFIG_DEBUG_LOCK_ALLOC
2385 extern void lockdep_annotate_inode_mutex_key(struct inode *inode);
2386 #else
2387 static inline void lockdep_annotate_inode_mutex_key(struct inode *inode) { };
2388 #endif
2389 extern void unlock_new_inode(struct inode *);
2390 extern unsigned int get_next_ino(void);
2391
2392 extern void __iget(struct inode * inode);
2393 extern void iget_failed(struct inode *);
2394 extern void clear_inode(struct inode *);
2395 extern void __destroy_inode(struct inode *);
2396 extern struct inode *new_inode_pseudo(struct super_block *sb);
2397 extern struct inode *new_inode(struct super_block *sb);
2398 extern void free_inode_nonrcu(struct inode *inode);
2399 extern int should_remove_suid(struct dentry *);
2400 extern int file_remove_suid(struct file *);
2401
2402 extern void __insert_inode_hash(struct inode *, unsigned long hashval);
2403 static inline void insert_inode_hash(struct inode *inode)
2404 {
2405 __insert_inode_hash(inode, inode->i_ino);
2406 }
2407
2408 extern void __remove_inode_hash(struct inode *);
2409 static inline void remove_inode_hash(struct inode *inode)
2410 {
2411 if (!inode_unhashed(inode))
2412 __remove_inode_hash(inode);
2413 }
2414
2415 extern void inode_sb_list_add(struct inode *inode);
2416
2417 #ifdef CONFIG_BLOCK
2418 extern void submit_bio(int, struct bio *);
2419 extern int bdev_read_only(struct block_device *);
2420 #endif
2421 extern int set_blocksize(struct block_device *, int);
2422 extern int sb_set_blocksize(struct super_block *, int);
2423 extern int sb_min_blocksize(struct super_block *, int);
2424
2425 extern int generic_file_mmap(struct file *, struct vm_area_struct *);
2426 extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
2427 extern int generic_file_remap_pages(struct vm_area_struct *, unsigned long addr,
2428 unsigned long size, pgoff_t pgoff);
2429 int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk);
2430 extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *);
2431 extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *);
2432 extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *);
2433 extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *, loff_t);
2434 extern ssize_t generic_perform_write(struct file *, struct iov_iter *, loff_t);
2435 extern ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos);
2436 extern ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos);
2437 extern ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos);
2438 extern ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos);
2439
2440 /* fs/block_dev.c */
2441 extern ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from);
2442 extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
2443 int datasync);
2444 extern void block_sync_page(struct page *page);
2445
2446 /* fs/splice.c */
2447 extern ssize_t generic_file_splice_read(struct file *, loff_t *,
2448 struct pipe_inode_info *, size_t, unsigned int);
2449 extern ssize_t default_file_splice_read(struct file *, loff_t *,
2450 struct pipe_inode_info *, size_t, unsigned int);
2451 extern ssize_t iter_file_splice_write(struct pipe_inode_info *,
2452 struct file *, loff_t *, size_t, unsigned int);
2453 extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe,
2454 struct file *out, loff_t *, size_t len, unsigned int flags);
2455
2456 extern void
2457 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
2458 extern loff_t noop_llseek(struct file *file, loff_t offset, int whence);
2459 extern loff_t no_llseek(struct file *file, loff_t offset, int whence);
2460 extern loff_t vfs_setpos(struct file *file, loff_t offset, loff_t maxsize);
2461 extern loff_t generic_file_llseek(struct file *file, loff_t offset, int whence);
2462 extern loff_t generic_file_llseek_size(struct file *file, loff_t offset,
2463 int whence, loff_t maxsize, loff_t eof);
2464 extern loff_t fixed_size_llseek(struct file *file, loff_t offset,
2465 int whence, loff_t size);
2466 extern int generic_file_open(struct inode * inode, struct file * filp);
2467 extern int nonseekable_open(struct inode * inode, struct file * filp);
2468
2469 #ifdef CONFIG_FS_XIP
2470 extern ssize_t xip_file_read(struct file *filp, char __user *buf, size_t len,
2471 loff_t *ppos);
2472 extern int xip_file_mmap(struct file * file, struct vm_area_struct * vma);
2473 extern ssize_t xip_file_write(struct file *filp, const char __user *buf,
2474 size_t len, loff_t *ppos);
2475 extern int xip_truncate_page(struct address_space *mapping, loff_t from);
2476 #else
2477 static inline int xip_truncate_page(struct address_space *mapping, loff_t from)
2478 {
2479 return 0;
2480 }
2481 #endif
2482
2483 #ifdef CONFIG_BLOCK
2484 typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode,
2485 loff_t file_offset);
2486
2487 enum {
2488 /* need locking between buffered and direct access */
2489 DIO_LOCKING = 0x01,
2490
2491 /* filesystem does not support filling holes */
2492 DIO_SKIP_HOLES = 0x02,
2493
2494 /* filesystem can handle aio writes beyond i_size */
2495 DIO_ASYNC_EXTEND = 0x04,
2496 };
2497
2498 void dio_end_io(struct bio *bio, int error);
2499
2500 ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
2501 struct block_device *bdev, struct iov_iter *iter, loff_t offset,
2502 get_block_t get_block, dio_iodone_t end_io,
2503 dio_submit_t submit_io, int flags);
2504
2505 static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
2506 struct inode *inode, struct iov_iter *iter, loff_t offset,
2507 get_block_t get_block)
2508 {
2509 return __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iter,
2510 offset, get_block, NULL, NULL,
2511 DIO_LOCKING | DIO_SKIP_HOLES);
2512 }
2513 #endif
2514
2515 void inode_dio_wait(struct inode *inode);
2516 void inode_dio_done(struct inode *inode);
2517
2518 extern void inode_set_flags(struct inode *inode, unsigned int flags,
2519 unsigned int mask);
2520
2521 extern const struct file_operations generic_ro_fops;
2522
2523 #define special_file(m) (S_ISCHR(m)||S_ISBLK(m)||S_ISFIFO(m)||S_ISSOCK(m))
2524
2525 extern int readlink_copy(char __user *, int, const char *);
2526 extern int page_readlink(struct dentry *, char __user *, int);
2527 extern void *page_follow_link_light(struct dentry *, struct nameidata *);
2528 extern void page_put_link(struct dentry *, struct nameidata *, void *);
2529 extern int __page_symlink(struct inode *inode, const char *symname, int len,
2530 int nofs);
2531 extern int page_symlink(struct inode *inode, const char *symname, int len);
2532 extern const struct inode_operations page_symlink_inode_operations;
2533 extern void kfree_put_link(struct dentry *, struct nameidata *, void *);
2534 extern int generic_readlink(struct dentry *, char __user *, int);
2535 extern void generic_fillattr(struct inode *, struct kstat *);
2536 int vfs_getattr_nosec(struct path *path, struct kstat *stat);
2537 extern int vfs_getattr(struct path *, struct kstat *);
2538 void __inode_add_bytes(struct inode *inode, loff_t bytes);
2539 void inode_add_bytes(struct inode *inode, loff_t bytes);
2540 void __inode_sub_bytes(struct inode *inode, loff_t bytes);
2541 void inode_sub_bytes(struct inode *inode, loff_t bytes);
2542 loff_t inode_get_bytes(struct inode *inode);
2543 void inode_set_bytes(struct inode *inode, loff_t bytes);
2544
2545 extern int vfs_readdir(struct file *, filldir_t, void *);
2546 extern int iterate_dir(struct file *, struct dir_context *);
2547
2548 extern int vfs_stat(const char __user *, struct kstat *);
2549 extern int vfs_lstat(const char __user *, struct kstat *);
2550 extern int vfs_fstat(unsigned int, struct kstat *);
2551 extern int vfs_fstatat(int , const char __user *, struct kstat *, int);
2552
2553 extern int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
2554 unsigned long arg);
2555 extern int __generic_block_fiemap(struct inode *inode,
2556 struct fiemap_extent_info *fieinfo,
2557 loff_t start, loff_t len,
2558 get_block_t *get_block);
2559 extern int generic_block_fiemap(struct inode *inode,
2560 struct fiemap_extent_info *fieinfo, u64 start,
2561 u64 len, get_block_t *get_block);
2562
2563 extern void get_filesystem(struct file_system_type *fs);
2564 extern void put_filesystem(struct file_system_type *fs);
2565 extern struct file_system_type *get_fs_type(const char *name);
2566 extern struct super_block *get_super(struct block_device *);
2567 extern struct super_block *get_super_thawed(struct block_device *);
2568 extern struct super_block *get_active_super(struct block_device *bdev);
2569 extern void drop_super(struct super_block *sb);
2570 extern void iterate_supers(void (*)(struct super_block *, void *), void *);
2571 extern void iterate_supers_type(struct file_system_type *,
2572 void (*)(struct super_block *, void *), void *);
2573
2574 extern int dcache_dir_open(struct inode *, struct file *);
2575 extern int dcache_dir_close(struct inode *, struct file *);
2576 extern loff_t dcache_dir_lseek(struct file *, loff_t, int);
2577 extern int dcache_readdir(struct file *, struct dir_context *);
2578 extern int simple_setattr(struct dentry *, struct iattr *);
2579 extern int simple_getattr(struct vfsmount *, struct dentry *, struct kstat *);
2580 extern int simple_statfs(struct dentry *, struct kstatfs *);
2581 extern int simple_open(struct inode *inode, struct file *file);
2582 extern int simple_link(struct dentry *, struct inode *, struct dentry *);
2583 extern int simple_unlink(struct inode *, struct dentry *);
2584 extern int simple_rmdir(struct inode *, struct dentry *);
2585 extern int simple_rename(struct inode *, struct dentry *, struct inode *, struct dentry *);
2586 extern int noop_fsync(struct file *, loff_t, loff_t, int);
2587 extern int simple_empty(struct dentry *);
2588 extern int simple_readpage(struct file *file, struct page *page);
2589 extern int simple_write_begin(struct file *file, struct address_space *mapping,
2590 loff_t pos, unsigned len, unsigned flags,
2591 struct page **pagep, void **fsdata);
2592 extern int simple_write_end(struct file *file, struct address_space *mapping,
2593 loff_t pos, unsigned len, unsigned copied,
2594 struct page *page, void *fsdata);
2595 extern int always_delete_dentry(const struct dentry *);
2596 extern struct inode *alloc_anon_inode(struct super_block *);
2597 extern int simple_nosetlease(struct file *, long, struct file_lock **, void **);
2598 extern const struct dentry_operations simple_dentry_operations;
2599
2600 extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned int flags);
2601 extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *);
2602 extern const struct file_operations simple_dir_operations;
2603 extern const struct inode_operations simple_dir_inode_operations;
2604 struct tree_descr { char *name; const struct file_operations *ops; int mode; };
2605 struct dentry *d_alloc_name(struct dentry *, const char *);
2606 extern int simple_fill_super(struct super_block *, unsigned long, struct tree_descr *);
2607 extern int simple_pin_fs(struct file_system_type *, struct vfsmount **mount, int *count);
2608 extern void simple_release_fs(struct vfsmount **mount, int *count);
2609
2610 extern ssize_t simple_read_from_buffer(void __user *to, size_t count,
2611 loff_t *ppos, const void *from, size_t available);
2612 extern ssize_t simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
2613 const void __user *from, size_t count);
2614
2615 extern int __generic_file_fsync(struct file *, loff_t, loff_t, int);
2616 extern int generic_file_fsync(struct file *, loff_t, loff_t, int);
2617
2618 extern int generic_check_addressable(unsigned, u64);
2619
2620 #ifdef CONFIG_MIGRATION
2621 extern int buffer_migrate_page(struct address_space *,
2622 struct page *, struct page *,
2623 enum migrate_mode);
2624 #else
2625 #define buffer_migrate_page NULL
2626 #endif
2627
2628 extern int inode_change_ok(const struct inode *, struct iattr *);
2629 extern int inode_newsize_ok(const struct inode *, loff_t offset);
2630 extern void setattr_copy(struct inode *inode, const struct iattr *attr);
2631
2632 extern int file_update_time(struct file *file);
2633
2634 extern int generic_show_options(struct seq_file *m, struct dentry *root);
2635 extern void save_mount_options(struct super_block *sb, char *options);
2636 extern void replace_mount_options(struct super_block *sb, char *options);
2637
2638 static inline ino_t parent_ino(struct dentry *dentry)
2639 {
2640 ino_t res;
2641
2642 /*
2643 * Don't strictly need d_lock here? If the parent ino could change
2644 * then surely we'd have a deeper race in the caller?
2645 */
2646 spin_lock(&dentry->d_lock);
2647 res = dentry->d_parent->d_inode->i_ino;
2648 spin_unlock(&dentry->d_lock);
2649 return res;
2650 }
2651
2652 /* Transaction based IO helpers */
2653
2654 /*
2655 * An argresp is stored in an allocated page and holds the
2656 * size of the argument or response, along with its content
2657 */
2658 struct simple_transaction_argresp {
2659 ssize_t size;
2660 char data[0];
2661 };
2662
2663 #define SIMPLE_TRANSACTION_LIMIT (PAGE_SIZE - sizeof(struct simple_transaction_argresp))
2664
2665 char *simple_transaction_get(struct file *file, const char __user *buf,
2666 size_t size);
2667 ssize_t simple_transaction_read(struct file *file, char __user *buf,
2668 size_t size, loff_t *pos);
2669 int simple_transaction_release(struct inode *inode, struct file *file);
2670
2671 void simple_transaction_set(struct file *file, size_t n);
2672
2673 /*
2674 * simple attribute files
2675 *
2676 * These attributes behave similar to those in sysfs:
2677 *
2678 * Writing to an attribute immediately sets a value, an open file can be
2679 * written to multiple times.
2680 *
2681 * Reading from an attribute creates a buffer from the value that might get
2682 * read with multiple read calls. When the attribute has been read
2683 * completely, no further read calls are possible until the file is opened
2684 * again.
2685 *
2686 * All attributes contain a text representation of a numeric value
2687 * that are accessed with the get() and set() functions.
2688 */
2689 #define DEFINE_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \
2690 static int __fops ## _open(struct inode *inode, struct file *file) \
2691 { \
2692 __simple_attr_check_format(__fmt, 0ull); \
2693 return simple_attr_open(inode, file, __get, __set, __fmt); \
2694 } \
2695 static const struct file_operations __fops = { \
2696 .owner = THIS_MODULE, \
2697 .open = __fops ## _open, \
2698 .release = simple_attr_release, \
2699 .read = simple_attr_read, \
2700 .write = simple_attr_write, \
2701 .llseek = generic_file_llseek, \
2702 }
2703
2704 static inline __printf(1, 2)
2705 void __simple_attr_check_format(const char *fmt, ...)
2706 {
2707 /* don't do anything, just let the compiler check the arguments; */
2708 }
2709
2710 int simple_attr_open(struct inode *inode, struct file *file,
2711 int (*get)(void *, u64 *), int (*set)(void *, u64),
2712 const char *fmt);
2713 int simple_attr_release(struct inode *inode, struct file *file);
2714 ssize_t simple_attr_read(struct file *file, char __user *buf,
2715 size_t len, loff_t *ppos);
2716 ssize_t simple_attr_write(struct file *file, const char __user *buf,
2717 size_t len, loff_t *ppos);
2718
2719 struct ctl_table;
2720 int proc_nr_files(struct ctl_table *table, int write,
2721 void __user *buffer, size_t *lenp, loff_t *ppos);
2722 int proc_nr_dentry(struct ctl_table *table, int write,
2723 void __user *buffer, size_t *lenp, loff_t *ppos);
2724 int proc_nr_inodes(struct ctl_table *table, int write,
2725 void __user *buffer, size_t *lenp, loff_t *ppos);
2726 int __init get_filesystem_list(char *buf);
2727
2728 #define __FMODE_EXEC ((__force int) FMODE_EXEC)
2729 #define __FMODE_NONOTIFY ((__force int) FMODE_NONOTIFY)
2730
2731 #define ACC_MODE(x) ("\004\002\006\006"[(x)&O_ACCMODE])
2732 #define OPEN_FMODE(flag) ((__force fmode_t)(((flag + 1) & O_ACCMODE) | \
2733 (flag & __FMODE_NONOTIFY)))
2734
2735 static inline int is_sxid(umode_t mode)
2736 {
2737 return (mode & S_ISUID) || ((mode & S_ISGID) && (mode & S_IXGRP));
2738 }
2739
2740 static inline void inode_has_no_xattr(struct inode *inode)
2741 {
2742 if (!is_sxid(inode->i_mode) && (inode->i_sb->s_flags & MS_NOSEC))
2743 inode->i_flags |= S_NOSEC;
2744 }
2745
2746 static inline bool dir_emit(struct dir_context *ctx,
2747 const char *name, int namelen,
2748 u64 ino, unsigned type)
2749 {
2750 return ctx->actor(ctx, name, namelen, ctx->pos, ino, type) == 0;
2751 }
2752 static inline bool dir_emit_dot(struct file *file, struct dir_context *ctx)
2753 {
2754 return ctx->actor(ctx, ".", 1, ctx->pos,
2755 file->f_path.dentry->d_inode->i_ino, DT_DIR) == 0;
2756 }
2757 static inline bool dir_emit_dotdot(struct file *file, struct dir_context *ctx)
2758 {
2759 return ctx->actor(ctx, "..", 2, ctx->pos,
2760 parent_ino(file->f_path.dentry), DT_DIR) == 0;
2761 }
2762 static inline bool dir_emit_dots(struct file *file, struct dir_context *ctx)
2763 {
2764 if (ctx->pos == 0) {
2765 if (!dir_emit_dot(file, ctx))
2766 return false;
2767 ctx->pos = 1;
2768 }
2769 if (ctx->pos == 1) {
2770 if (!dir_emit_dotdot(file, ctx))
2771 return false;
2772 ctx->pos = 2;
2773 }
2774 return true;
2775 }
2776 static inline bool dir_relax(struct inode *inode)
2777 {
2778 mutex_unlock(&inode->i_mutex);
2779 mutex_lock(&inode->i_mutex);
2780 return !IS_DEADDIR(inode);
2781 }
2782
2783 #endif /* _LINUX_FS_H */ 1 /*
2 * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
3 *
4 * (C) SGI 2006, Christoph Lameter
5 * Cleaned up and restructured to ease the addition of alternative
6 * implementations of SLAB allocators.
7 * (C) Linux Foundation 2008-2013
8 * Unified interface for all slab allocators
9 */
10
11 #ifndef _LINUX_SLAB_H
12 #define _LINUX_SLAB_H
13
14 #include <linux/gfp.h>
15 #include <linux/types.h>
16 #include <linux/workqueue.h>
17
18
19 /*
20 * Flags to pass to kmem_cache_create().
21 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
22 */
23 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
24 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
25 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
26 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
27 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
28 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
29 #define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */
30 /*
31 * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS!
32 *
33 * This delays freeing the SLAB page by a grace period, it does _NOT_
34 * delay object freeing. This means that if you do kmem_cache_free()
35 * that memory location is free to be reused at any time. Thus it may
36 * be possible to see another object there in the same RCU grace period.
37 *
38 * This feature only ensures the memory location backing the object
39 * stays valid, the trick to using this is relying on an independent
40 * object validation pass. Something like:
41 *
42 * rcu_read_lock()
43 * again:
44 * obj = lockless_lookup(key);
45 * if (obj) {
46 * if (!try_get_ref(obj)) // might fail for free objects
47 * goto again;
48 *
49 * if (obj->key != key) { // not the object we expected
50 * put_ref(obj);
51 * goto again;
52 * }
53 * }
54 * rcu_read_unlock();
55 *
56 * This is useful if we need to approach a kernel structure obliquely,
57 * from its address obtained without the usual locking. We can lock
58 * the structure to stabilize it and check it's still at the given address,
59 * only if we can be sure that the memory has not been meanwhile reused
60 * for some other kind of object (which our subsystem's lock might corrupt).
61 *
62 * rcu_read_lock before reading the address, then rcu_read_unlock after
63 * taking the spinlock within the structure expected at that address.
64 */
65 #define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
66 #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
67 #define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */
68
69 /* Flag to prevent checks on free */
70 #ifdef CONFIG_DEBUG_OBJECTS
71 # define SLAB_DEBUG_OBJECTS 0x00400000UL
72 #else
73 # define SLAB_DEBUG_OBJECTS 0x00000000UL
74 #endif
75
76 #define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */
77
78 /* Don't track use of uninitialized memory */
79 #ifdef CONFIG_KMEMCHECK
80 # define SLAB_NOTRACK 0x01000000UL
81 #else
82 # define SLAB_NOTRACK 0x00000000UL
83 #endif
84 #ifdef CONFIG_FAILSLAB
85 # define SLAB_FAILSLAB 0x02000000UL /* Fault injection mark */
86 #else
87 # define SLAB_FAILSLAB 0x00000000UL
88 #endif
89
90 /* The following flags affect the page allocator grouping pages by mobility */
91 #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
92 #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
93 /*
94 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
95 *
96 * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
97 *
98 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
99 * Both make kfree a no-op.
100 */
101 #define ZERO_SIZE_PTR ((void *)16)
102
103 #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
104 (unsigned long)ZERO_SIZE_PTR)
105
106 #include <linux/kmemleak.h>
107
108 struct mem_cgroup;
109 /*
110 * struct kmem_cache related prototypes
111 */
112 void __init kmem_cache_init(void);
113 int slab_is_available(void);
114
115 struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
116 unsigned long,
117 void (*)(void *));
118 #ifdef CONFIG_MEMCG_KMEM
119 struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *,
120 struct kmem_cache *,
121 const char *);
122 #endif
123 void kmem_cache_destroy(struct kmem_cache *);
124 int kmem_cache_shrink(struct kmem_cache *);
125 void kmem_cache_free(struct kmem_cache *, void *);
126
127 /*
128 * Please use this macro to create slab caches. Simply specify the
129 * name of the structure and maybe some flags that are listed above.
130 *
131 * The alignment of the struct determines object alignment. If you
132 * f.e. add ____cacheline_aligned_in_smp to the struct declaration
133 * then the objects will be properly aligned in SMP configurations.
134 */
135 #define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\
136 sizeof(struct __struct), __alignof__(struct __struct),\
137 (__flags), NULL)
138
139 /*
140 * Common kmalloc functions provided by all allocators
141 */
142 void * __must_check __krealloc(const void *, size_t, gfp_t);
143 void * __must_check krealloc(const void *, size_t, gfp_t);
144 void kfree(const void *);
145 void kzfree(const void *);
146 size_t ksize(const void *);
147
148 /*
149 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
150 * alignment larger than the alignment of a 64-bit integer.
151 * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
152 */
153 #if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
154 #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
155 #define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
156 #define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
157 #else
158 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
159 #endif
160
161 /*
162 * Kmalloc array related definitions
163 */
164
165 #ifdef CONFIG_SLAB
166 /*
167 * The largest kmalloc size supported by the SLAB allocators is
168 * 32 megabyte (2^25) or the maximum allocatable page order if that is
169 * less than 32 MB.
170 *
171 * WARNING: Its not easy to increase this value since the allocators have
172 * to do various tricks to work around compiler limitations in order to
173 * ensure proper constant folding.
174 */
175 #define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
176 (MAX_ORDER + PAGE_SHIFT - 1) : 25)
177 #define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH
178 #ifndef KMALLOC_SHIFT_LOW
179 #define KMALLOC_SHIFT_LOW 5
180 #endif
181 #endif
182
183 #ifdef CONFIG_SLUB
184 /*
185 * SLUB directly allocates requests fitting in to an order-1 page
186 * (PAGE_SIZE*2). Larger requests are passed to the page allocator.
187 */
188 #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
189 #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
190 #ifndef KMALLOC_SHIFT_LOW
191 #define KMALLOC_SHIFT_LOW 3
192 #endif
193 #endif
194
195 #ifdef CONFIG_SLOB
196 /*
197 * SLOB passes all requests larger than one page to the page allocator.
198 * No kmalloc array is necessary since objects of different sizes can
199 * be allocated from the same page.
200 */
201 #define KMALLOC_SHIFT_HIGH PAGE_SHIFT
202 #define KMALLOC_SHIFT_MAX 30
203 #ifndef KMALLOC_SHIFT_LOW
204 #define KMALLOC_SHIFT_LOW 3
205 #endif
206 #endif
207
208 /* Maximum allocatable size */
209 #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
210 /* Maximum size for which we actually use a slab cache */
211 #define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH)
212 /* Maximum order allocatable via the slab allocagtor */
213 #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
214
215 /*
216 * Kmalloc subsystem.
217 */
218 #ifndef KMALLOC_MIN_SIZE
219 #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
220 #endif
221
222 /*
223 * This restriction comes from byte sized index implementation.
224 * Page size is normally 2^12 bytes and, in this case, if we want to use
225 * byte sized index which can represent 2^8 entries, the size of the object
226 * should be equal or greater to 2^12 / 2^8 = 2^4 = 16.
227 * If minimum size of kmalloc is less than 16, we use it as minimum object
228 * size and give up to use byte sized index.
229 */
230 #define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \
231 (KMALLOC_MIN_SIZE) : 16)
232
233 #ifndef CONFIG_SLOB
234 extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
235 #ifdef CONFIG_ZONE_DMA
236 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
237 #endif
238
239 /*
240 * Figure out which kmalloc slab an allocation of a certain size
241 * belongs to.
242 * 0 = zero alloc
243 * 1 = 65 .. 96 bytes
244 * 2 = 120 .. 192 bytes
245 * n = 2^(n-1) .. 2^n -1
246 */
247 static __always_inline int kmalloc_index(size_t size)
248 {
249 if (!size)
250 return 0;
251
252 if (size <= KMALLOC_MIN_SIZE)
253 return KMALLOC_SHIFT_LOW;
254
255 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
256 return 1;
257 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
258 return 2;
259 if (size <= 8) return 3;
260 if (size <= 16) return 4;
261 if (size <= 32) return 5;
262 if (size <= 64) return 6;
263 if (size <= 128) return 7;
264 if (size <= 256) return 8;
265 if (size <= 512) return 9;
266 if (size <= 1024) return 10;
267 if (size <= 2 * 1024) return 11;
268 if (size <= 4 * 1024) return 12;
269 if (size <= 8 * 1024) return 13;
270 if (size <= 16 * 1024) return 14;
271 if (size <= 32 * 1024) return 15;
272 if (size <= 64 * 1024) return 16;
273 if (size <= 128 * 1024) return 17;
274 if (size <= 256 * 1024) return 18;
275 if (size <= 512 * 1024) return 19;
276 if (size <= 1024 * 1024) return 20;
277 if (size <= 2 * 1024 * 1024) return 21;
278 if (size <= 4 * 1024 * 1024) return 22;
279 if (size <= 8 * 1024 * 1024) return 23;
280 if (size <= 16 * 1024 * 1024) return 24;
281 if (size <= 32 * 1024 * 1024) return 25;
282 if (size <= 64 * 1024 * 1024) return 26;
283 BUG();
284
285 /* Will never be reached. Needed because the compiler may complain */
286 return -1;
287 }
288 #endif /* !CONFIG_SLOB */
289
290 void *__kmalloc(size_t size, gfp_t flags);
291 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
292
293 #ifdef CONFIG_NUMA
294 void *__kmalloc_node(size_t size, gfp_t flags, int node);
295 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
296 #else
297 static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
298 {
299 return __kmalloc(size, flags);
300 }
301
302 static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
303 {
304 return kmem_cache_alloc(s, flags);
305 }
306 #endif
307
308 #ifdef CONFIG_TRACING
309 extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
310
311 #ifdef CONFIG_NUMA
312 extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
313 gfp_t gfpflags,
314 int node, size_t size);
315 #else
316 static __always_inline void *
317 kmem_cache_alloc_node_trace(struct kmem_cache *s,
318 gfp_t gfpflags,
319 int node, size_t size)
320 {
321 return kmem_cache_alloc_trace(s, gfpflags, size);
322 }
323 #endif /* CONFIG_NUMA */
324
325 #else /* CONFIG_TRACING */
326 static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
327 gfp_t flags, size_t size)
328 {
329 return kmem_cache_alloc(s, flags);
330 }
331
332 static __always_inline void *
333 kmem_cache_alloc_node_trace(struct kmem_cache *s,
334 gfp_t gfpflags,
335 int node, size_t size)
336 {
337 return kmem_cache_alloc_node(s, gfpflags, node);
338 }
339 #endif /* CONFIG_TRACING */
340
341 extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order);
342
343 #ifdef CONFIG_TRACING
344 extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
345 #else
346 static __always_inline void *
347 kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
348 {
349 return kmalloc_order(size, flags, order);
350 }
351 #endif
352
353 static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
354 {
355 unsigned int order = get_order(size);
356 return kmalloc_order_trace(size, flags, order);
357 }
358
359 /**
360 * kmalloc - allocate memory
361 * @size: how many bytes of memory are required.
362 * @flags: the type of memory to allocate.
363 *
364 * kmalloc is the normal method of allocating memory
365 * for objects smaller than page size in the kernel.
366 *
367 * The @flags argument may be one of:
368 *
369 * %GFP_USER - Allocate memory on behalf of user. May sleep.
370 *
371 * %GFP_KERNEL - Allocate normal kernel ram. May sleep.
372 *
373 * %GFP_ATOMIC - Allocation will not sleep. May use emergency pools.
374 * For example, use this inside interrupt handlers.
375 *
376 * %GFP_HIGHUSER - Allocate pages from high memory.
377 *
378 * %GFP_NOIO - Do not do any I/O at all while trying to get memory.
379 *
380 * %GFP_NOFS - Do not make any fs calls while trying to get memory.
381 *
382 * %GFP_NOWAIT - Allocation will not sleep.
383 *
384 * %__GFP_THISNODE - Allocate node-local memory only.
385 *
386 * %GFP_DMA - Allocation suitable for DMA.
387 * Should only be used for kmalloc() caches. Otherwise, use a
388 * slab created with SLAB_DMA.
389 *
390 * Also it is possible to set different flags by OR'ing
391 * in one or more of the following additional @flags:
392 *
393 * %__GFP_COLD - Request cache-cold pages instead of
394 * trying to return cache-warm pages.
395 *
396 * %__GFP_HIGH - This allocation has high priority and may use emergency pools.
397 *
398 * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
399 * (think twice before using).
400 *
401 * %__GFP_NORETRY - If memory is not immediately available,
402 * then give up at once.
403 *
404 * %__GFP_NOWARN - If allocation fails, don't issue any warnings.
405 *
406 * %__GFP_REPEAT - If allocation fails initially, try once more before failing.
407 *
408 * There are other flags available as well, but these are not intended
409 * for general use, and so are not documented here. For a full list of
410 * potential flags, always refer to linux/gfp.h.
411 */
412 static __always_inline void *kmalloc(size_t size, gfp_t flags)
413 {
414 if (__builtin_constant_p(size)) {
415 if (size > KMALLOC_MAX_CACHE_SIZE)
416 return kmalloc_large(size, flags);
417 #ifndef CONFIG_SLOB
418 if (!(flags & GFP_DMA)) {
419 int index = kmalloc_index(size);
420
421 if (!index)
422 return ZERO_SIZE_PTR;
423
424 return kmem_cache_alloc_trace(kmalloc_caches[index],
425 flags, size);
426 }
427 #endif
428 }
429 return __kmalloc(size, flags);
430 }
431
432 /*
433 * Determine size used for the nth kmalloc cache.
434 * return size or 0 if a kmalloc cache for that
435 * size does not exist
436 */
437 static __always_inline int kmalloc_size(int n)
438 {
439 #ifndef CONFIG_SLOB
440 if (n > 2)
441 return 1 << n;
442
443 if (n == 1 && KMALLOC_MIN_SIZE <= 32)
444 return 96;
445
446 if (n == 2 && KMALLOC_MIN_SIZE <= 64)
447 return 192;
448 #endif
449 return 0;
450 }
451
452 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
453 {
454 #ifndef CONFIG_SLOB
455 if (__builtin_constant_p(size) &&
456 size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
457 int i = kmalloc_index(size);
458
459 if (!i)
460 return ZERO_SIZE_PTR;
461
462 return kmem_cache_alloc_node_trace(kmalloc_caches[i],
463 flags, node, size);
464 }
465 #endif
466 return __kmalloc_node(size, flags, node);
467 }
468
469 /*
470 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
471 * Intended for arches that get misalignment faults even for 64 bit integer
472 * aligned buffers.
473 */
474 #ifndef ARCH_SLAB_MINALIGN
475 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
476 #endif
477 /*
478 * This is the main placeholder for memcg-related information in kmem caches.
479 * struct kmem_cache will hold a pointer to it, so the memory cost while
480 * disabled is 1 pointer. The runtime cost while enabled, gets bigger than it
481 * would otherwise be if that would be bundled in kmem_cache: we'll need an
482 * extra pointer chase. But the trade off clearly lays in favor of not
483 * penalizing non-users.
484 *
485 * Both the root cache and the child caches will have it. For the root cache,
486 * this will hold a dynamically allocated array large enough to hold
487 * information about the currently limited memcgs in the system. To allow the
488 * array to be accessed without taking any locks, on relocation we free the old
489 * version only after a grace period.
490 *
491 * Child caches will hold extra metadata needed for its operation. Fields are:
492 *
493 * @memcg: pointer to the memcg this cache belongs to
494 * @list: list_head for the list of all caches in this memcg
495 * @root_cache: pointer to the global, root cache, this cache was derived from
496 * @nr_pages: number of pages that belongs to this cache.
497 */
498 struct memcg_cache_params {
499 bool is_root_cache;
500 union {
501 struct {
502 struct rcu_head rcu_head;
503 struct kmem_cache *memcg_caches[0];
504 };
505 struct {
506 struct mem_cgroup *memcg;
507 struct list_head list;
508 struct kmem_cache *root_cache;
509 atomic_t nr_pages;
510 };
511 };
512 };
513
514 int memcg_update_all_caches(int num_memcgs);
515
516 struct seq_file;
517 int cache_show(struct kmem_cache *s, struct seq_file *m);
518 void print_slabinfo_header(struct seq_file *m);
519
520 /**
521 * kmalloc_array - allocate memory for an array.
522 * @n: number of elements.
523 * @size: element size.
524 * @flags: the type of memory to allocate (see kmalloc).
525 */
526 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
527 {
528 if (size != 0 && n > SIZE_MAX / size)
529 return NULL;
530 return __kmalloc(n * size, flags);
531 }
532
533 /**
534 * kcalloc - allocate memory for an array. The memory is set to zero.
535 * @n: number of elements.
536 * @size: element size.
537 * @flags: the type of memory to allocate (see kmalloc).
538 */
539 static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
540 {
541 return kmalloc_array(n, size, flags | __GFP_ZERO);
542 }
543
544 /*
545 * kmalloc_track_caller is a special version of kmalloc that records the
546 * calling function of the routine calling it for slab leak tracking instead
547 * of just the calling function (confusing, eh?).
548 * It's useful when the call to kmalloc comes from a widely-used standard
549 * allocator where we care about the real place the memory allocation
550 * request comes from.
551 */
552 extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
553 #define kmalloc_track_caller(size, flags) \
554 __kmalloc_track_caller(size, flags, _RET_IP_)
555
556 #ifdef CONFIG_NUMA
557 extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
558 #define kmalloc_node_track_caller(size, flags, node) \
559 __kmalloc_node_track_caller(size, flags, node, \
560 _RET_IP_)
561
562 #else /* CONFIG_NUMA */
563
564 #define kmalloc_node_track_caller(size, flags, node) \
565 kmalloc_track_caller(size, flags)
566
567 #endif /* CONFIG_NUMA */
568
569 /*
570 * Shortcuts
571 */
572 static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
573 {
574 return kmem_cache_alloc(k, flags | __GFP_ZERO);
575 }
576
577 /**
578 * kzalloc - allocate memory. The memory is set to zero.
579 * @size: how many bytes of memory are required.
580 * @flags: the type of memory to allocate (see kmalloc).
581 */
582 static inline void *kzalloc(size_t size, gfp_t flags)
583 {
584 return kmalloc(size, flags | __GFP_ZERO);
585 }
586
587 /**
588 * kzalloc_node - allocate zeroed memory from a particular memory node.
589 * @size: how many bytes of memory are required.
590 * @flags: the type of memory to allocate (see kmalloc).
591 * @node: memory node from which to allocate
592 */
593 static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
594 {
595 return kmalloc_node(size, flags | __GFP_ZERO, node);
596 }
597
598 unsigned int kmem_cache_size(struct kmem_cache *s);
599 void __init kmem_cache_init_late(void);
600
601 #endif /* _LINUX_SLAB_H */ |
Here is an explanation of a rule violation arisen while checking your driver against a corresponding kernel.
Note that it may be false positive, i.e. there isn't a real error indeed. Please analyze a given error trace and related source code to understand whether there is an error in your driver.
Error trace column contains a path on which the given rule is violated. You can expand/collapse some entity classes by clicking on corresponding checkboxes in a main menu or in an advanced Others menu. Also you can expand/collapse each particular entity by clicking on +/-. In hovering on some entities you can see some tips. Also the error trace is bound with related source code. Line numbers may be shown as links on the left. You can click on them to open corresponding lines in source code.
Source code column contains a content of files related with the error trace. There is source code of your driver (note that there are some LDV modifications at the end), kernel headers and rule model. Tabs show a currently opened file and other available files. In hovering on them you can see full file names. On clicking a corresponding file content will be shown.
Kernel | Module | Rule | Verifier | Verdict | Status | Timestamp | Bug report |
linux-3.18-rc1.tar.xz | drivers/staging/dgnc/dgnc.ko | 106_1a | BLAST | Bug | Reported | 2014-12-20 00:57:29 | LKML20141219368 |
Comment
reported: 20 Dec 2014
[Home]