前一阵子移植一个串口驱动,发现linux的驱动构架中,面向对象的思想已经根深蒂固。就比如这串口驱动,代码中经常有一些貌似和串口无关的代码,比
如,tty_register_driver等。但我们却删它不得。因为正是这些代码实现了tty core和具体的tty
driver(比如串口驱动)的联系和纽带。以前看ldd3,里边有术语tty core和tty
driver,当是不清楚各指的是什么,但是后来看了代码,才知道,tty
core指的是所有tty类型的驱动的顶层架构,它的代码由内核实现,我们无需修改,代码主要分布在drivers/char下的
n_tty.c,tty_io.c等文件中。而tty
driver就指具体的设备驱动,比如串口驱动,console驱动等。以下总结只是对tty构架的总体分析,希望对大家有所启发。
tty的架构其实分为三层:
第一层:
tty_core
所有tty类型的驱动的顶层构架,向应用曾提供了统一的接口,应用层的read/write等调用首先会到达这里。此层由内核实现,代码主要分布在
drivers/char目录下的n_tty.c,tty_io.c等文件中
static const struct file_operations tty_fops = {
.llseek = no_llseek,
.read = tty_read,
.write = tty_write,
.poll = tty_poll,
.unlocked_ioctl = tty_ioctl,
.compat_ioctl = tty_compat_ioctl,
.open = tty_open,
.release = tty_release,
.fasync = tty_fasync,
};
每个tty类型的驱动注册时都调用tty_register_driver函数
int tty_register_driver(struct tty_driver * driver)
{
...
cdev_init(&driver->cdev, &tty_fops);
...
}
static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
...
ld = tty_ldisc_ref_wait(tty);
if (ld->ops->read)
i = (ld->ops->read)(tty, file, buf, count);
//调用到了ldisc层(线路规程)的read函数
else
i = -EIO;
tty_ldisc_deref(ld);
...
}
static ssize_t tty_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
...
ld = tty_ldisc_ref_wait(tty);
if (!ld->ops->write)
ret = -EIO;
else
ret = do_tty_write(ld->ops->write, tty, file, buf, count);
tty_ldisc_deref(ld);
return ret;
}
static inline ssize_t do_tty_write(
ssize_t (*write)(struct tty_struct *, struct file *, const unsigned char *, size_t),
struct tty_struct *tty,
struct file *file,
const char __user *buf,
size_t count)
{
...
for (;;) {
size_t size = count;
if (size > chunk)
size = chunk;
ret = -EFAULT;
if (copy_from_user(tty->write_buf, buf, size))
break;
ret = write(tty, file, tty->write_buf, size);
//调用到了ldisc层的write函数
if (ret <= 0)
break;
...
}
第二层:线路规程
不同的tty类型的设备,具有不同的线路规程。这一层也由内核实现,主要代码在drivers/char.tty_ldisc.c文件中
从tty_read/tty_write函数可以看出,他们最后调用到了线路规程的read/write函数
struct tty_ldisc_ops tty_ldisc_N_TTY = {
.magic = TTY_LDISC_MAGIC,
.name = "n_tty",
.open = n_tty_open,
.close = n_tty_close,
.flush_buffer = n_tty_flush_buffer,
.chars_in_buffer = n_tty_chars_in_buffer,
.read = n_tty_read,
.write = n_tty_write,
.ioctl = n_tty_ioctl,
.set_termios = n_tty_set_termios,
.poll = n_tty_poll,
.receive_buf = n_tty_receive_buf,
.write_wakeup = n_tty_write_wakeup
};
static ssize_t n_tty_write(struct tty_struct *tty, struct file *file,
const unsigned char *buf, size_t nr)
{
...
add_wait_queue(&tty->write_wait, &wait);//将当前进程放到等待队列中
while (1) {
set_current_state(TASK_INTERRUPTIBLE);
if (signal_pending(current)) {
retval =
-ERESTARTSYS;
break;
}
//进入此处继续执行的原因可能是被信号打断,而不是条件得到了满足。
//只有条件得到了满足,我们才会继续,否则,直接返回!
if (tty_hung_up_p(file) ||
(tty->link && !tty->link->count)) {
retval = -EIO;
break;
}
if
(O_OPOST(tty) && !(test_bit(TTY_HW_COOK_OUT,
&tty->flags))) {
while (nr > 0) {
ssize_t num = process_output_block(tty, b, nr);
if (num < 0) {
if (num == -EAGAIN)
break;
retval = num;
goto break_out;
}
b
+= num;
nr -= num;
if (nr == 0)
break;
c = *b;
if (process_output(c, tty) < 0)
break;
b++; nr--;
}
if
(tty->ops->flush_chars)
tty->ops->flush_chars(tty);
} else {
while (nr > 0) {
c = tty->ops->write(tty,
b, nr);
//调用到具体的驱动中的write函数
if (c < 0) {
retval = c;
goto break_out;
}
if (!c)
break;
b += c;
nr -= c;
}
}
if (!nr)
break;
//全部写入,返回
if (file->f_flags &
O_NONBLOCK) {
retval = -EAGAIN;
break;
}
/*
假如是以非阻塞的方式打开的,那么也直接返回。否则,让出cpu,等条件满足以后再继续执行。
*/
schedule();//执行到这里,当前进程才会真正让出cpu!!!
}
break_out:
__set_current_state(TASK_RUNNING);
remove_wait_queue(&tty->write_wait, &wait);
...
}
关于此段代码的具体分析在
http://blog.chinaunix.net/u2/73067/showart.php?id=2241493
这
段代码中使用了wait等待队列,为什么要使用等待队列呢?大家想想看,我们在应用层打开一个设备文件的时候,有两种方式,阻塞和非阻塞,非阻塞很简单,
不管结果怎样直接返回。但阻塞则有点死皮赖脸的意思,会一直等待,直到操作完成。那write函数的“阻塞”版本在内核里边是怎么实现的呢?就是使用等待
队列,只要条件没有得到满足(驱动层调用write函数失败),那么就一直让出cpu,直到条件满足了才会继续执行,并将写操作的结果返回给上层。
通过以上分析,我们也可以得到如下结论:阻塞是在ldisc层也就是线路规程里边实现的。出于代价和操作性的考虑,我们不会再驱动里边实现阻塞类型的write/read函数
上述代码中有一句:
c = tty->ops->write(tty, b, nr);
这句代码调用到了tty_struct结构的ops->write函数。但是tty_struct结构的ops->write和具体的驱动里边定义的write函数有什么关系呢?
tty_open -> tty_init_dev -> initialize_tty_struct
driver/char/tty_io.c
void initialize_tty_struct(struct tty_struct *tty,
struct tty_driver *driver, int idx)
{
...
tty->ops = driver->ops;
...
}
可见,tty设备打开的时候,就将驱动的ops指针赋给了tty设备的结构体tty_struct的ops
这样,tty->ops->write()其实调用到了具体的驱动的write函数,比如,假如是个串口驱动,那么就会调用到串口驱动的write函数!
n_tty_read的操作比较复杂,暂时不讨论,但是它最终也会调用到具体的tty驱动的read函数
第三层:
具体的tty类型的驱动,由我们实现
比如,以下是摘自serial_core.c的一段代码,描述的是串口驱动:
static const struct tty_operations uart_ops = {
.open = uart_open,
.close = uart_close,
.write = uart_write,
.put_char = uart_put_char,
.flush_chars = uart_flush_chars,
.write_room = uart_write_room,
.chars_in_buffer= uart_chars_in_buffer,
.flush_buffer = uart_flush_buffer,
.ioctl = uart_ioctl,
.throttle = uart_throttle,
.unthrottle = uart_unthrottle,
.send_xchar = uart_send_xchar,
.set_termios = uart_set_termios,
.set_ldisc = uart_set_ldisc,
.stop = uart_stop,
.start = uart_start,
.hangup = uart_hangup,
.break_ctl = uart_break_ctl,
.wait_until_sent= uart_wait_until_sent,
#ifdef CONFIG_PROC_FS
.read_proc = uart_read_proc,
#endif
.tiocmget = uart_tiocmget,
.tiocmset = uart_tiocmset,
#ifdef CONFIG_CONSOLE_POLL
.poll_init = uart_poll_init,
.poll_get_char = uart_poll_get_char,
.poll_put_char = uart_poll_put_char,
#endif
};
int uart_register_driver(struct uart_driver *drv)
{
struct tty_driver *normal = NULL;
drv->tty_driver = normal;
normal->owner = drv->owner;
normal->driver_name = drv->driver_name;
normal->name = drv->dev_name;
normal->major = drv->major;
normal->minor_start = drv->minor;
normal->type = TTY_DRIVER_TYPE_SERIAL;
normal->subtype = SERIAL_TYPE_NORMAL;
normal->init_termios = tty_std_termios;
normal->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
normal->init_termios.c_ispeed = normal->init_termios.c_ospeed = 9600;
normal->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
normal->driver_state = drv;
tty_set_operations(normal, &uart_ops);//##
...
}
我们主要实现这一层的功能,前两层是kernel中已经实现的,我们仅仅需要套用之。当我们按照tty driver的格式书写这一层驱动,并实现几个必要的函数,这个驱动就可以成功运转了。
阅读(1205) | 评论(0) | 转发(0) |