原文轉自:http://m.blog.csdn.net/blog/lushengchu2003/9368031
最近閑來無事情做,想到以前項目中遇到串口硬件流控制的問題,藍牙串口控制返回錯誤,上層讀寫串口buffer溢出的問題等,也折騰了一陣子,雖然 最終證明與串口驅動無關,但是排查問題時候毫無疑問會查看串口驅動的相關代碼,所以把串口驅動的流程過了一遍,方便以后再用到時拿來用。分析的是全志代碼 A20。直接開始代碼分析吧。
串口驅動代碼在linux-3.3/drivers/tty/serial目錄下,全志把自己平台相關的代碼集中到了一個文件中,叫sw_uart.c,那就從它的__init開始了:
static int __init sw_uart_init(void)
{
int ret;
u32 i;
struct sw_uart_pdata *pdata;
SERIAL_MSG("driver initializied\n");
ret = sw_uart_get_devinfo();
if (unlikely(ret))
return ret;
ret = uart_register_driver(&sw_uart_driver);
if (unlikely(ret)) {
SERIAL_MSG("driver initializied\n");
return ret;
}
for (i=0; i<SW_UART_NR; i++) {
pdata = &sw_uport_pdata[i];
if (pdata->used)
platform_device_register(&sw_uport_device[i]);
}
return platform_driver_register(&sw_uport_platform_driver);
}
sw_uart_get_devinfo是解析全志的sys配置腳本中的串口配置,一共有八個串口,要用到那個直接在sys腳本中設置1就行了,這個用過全志平台的都知道
接着sw_uart_driver結構體定義如下:
static struct uart_driver sw_uart_driver = {
.owner = THIS_MODULE,
.driver_name = "sw_serial",
.dev_name = "ttyS",
.nr = SW_UART_NR,
.cons = SW_CONSOLE,
};
這里SW_UART_NR為8,ttyS就是將要顯示在/dev/目錄下的名字了,從0~7
接着看uart注冊函數,顧名思義是把全志自己平台的串口注冊到串口核心serial_core中去:
int uart_register_driver(struct uart_driver *drv)
{
struct tty_driver *normal;
int i, retval;
BUG_ON(drv->state);
/*
* Maybe we should be using a slab cache for this, especially if
* we have a large number of ports to handle.
*/
drv->state = kzalloc(sizeof(struct uart_state) * drv->nr, GFP_KERNEL);
if (!drv->state)
goto out;
normal = alloc_tty_driver(drv->nr);
if (!normal)
goto out_kfree;
drv->tty_driver = normal;
normal->owner = drv->owner;
normal->driver_name = drv->driver_name;
normal->name = drv->dev_name;//名字為ttyS
normal->major = drv->major;
normal->minor_start = drv->minor;
normal->type = TTY_DRIVER_TYPE_SERIAL;
normal->subtype = SERIAL_TYPE_NORMAL;
normal->init_termios = tty_std_termios;
normal->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
normal->init_termios.c_ispeed = normal->init_termios.c_ospeed = 9600;
normal->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
normal->driver_state = drv;
tty_set_operations(normal, &uart_ops);
/*
* Initialise the UART state(s).
*/
for (i = 0; i < drv->nr; i++) {
struct uart_state *state = drv->state + i;
struct tty_port *port = &state->port;
tty_port_init(port);
port->ops = &uart_port_ops;
port->close_delay = HZ / 2; /* .5 seconds */
port->closing_wait = 30 * HZ;/* 30 seconds */
}
retval = tty_register_driver(normal);
if (retval >= 0)
return retval;
put_tty_driver(normal);
out_kfree:
kfree(drv->state);
out:
return -ENOMEM;
}
先列出來吧,以用到時候再回來看,這里先創建了NR個state, 並為每個state做一些初始化,但是這些state還沒有和端口(uart_port)對應起來;初始化完port口后,調用tty_register_driver:
int tty_register_driver(struct tty_driver *driver)
{
int error;
int i;
dev_t dev;
void **p = NULL;
struct device *d;
if (!(driver->flags & TTY_DRIVER_DEVPTS_MEM) && driver->num) {
p = kzalloc(driver->num * 2 * sizeof(void *), GFP_KERNEL);
if (!p)
return -ENOMEM;
}
if (!driver->major) {
error = alloc_chrdev_region(&dev, driver->minor_start,
driver->num, driver->name);
if (!error) {
driver->major = MAJOR(dev);
driver->minor_start = MINOR(dev);
}
} else {
dev = MKDEV(driver->major, driver->minor_start);
error = register_chrdev_region(dev, driver->num, driver->name);
}
if (error < 0) {
kfree(p);
return error;
}
if (p) {
driver->ttys = (struct tty_struct **)p;
driver->termios = (struct ktermios **)(p + driver->num);
} else {
driver->ttys = NULL;
driver->termios = NULL;
}
cdev_init(&driver->cdev, &tty_fops);
driver->cdev.owner = driver->owner;
error = cdev_add(&driver->cdev, dev, driver->num);
if (error) {
unregister_chrdev_region(dev, driver->num);
driver->ttys = NULL;
driver->termios = NULL;
kfree(p);
return error;
}
mutex_lock(&tty_mutex);
list_add(&driver->tty_drivers, &tty_drivers);
mutex_unlock(&tty_mutex);
if (!(driver->flags & TTY_DRIVER_DYNAMIC_DEV)) {
for (i = 0; i < driver->num; i++) {
d = tty_register_device(driver, i, NULL);
if (IS_ERR(d)) {
error = PTR_ERR(d);
goto err;
}
}
}
proc_tty_register_driver(driver);
driver->flags |= TTY_DRIVER_INSTALLED;
return 0;
這里alloc_chrdev_region是動態分配了主從設備號,接着cdev_init,它file_operations結構提和它關聯了起來,以后我們open /dev/ttyS節點時候會調用他的open函數,先看看這個結構體:
static const struct file_operations tty_fops = {
.llseek = no_llseek,
.read = tty_read,
.write = tty_write,
.poll = tty_poll,
.unlocked_ioctl = tty_ioctl,
.compat_ioctl = tty_compat_ioctl,
.open = tty_open,
.release = tty_release,
.fasync = tty_fasync,
};
接着cdev_add就把file_operations和設備號關聯起來了,我們現在還沒有創建設備節點,不過看到有為 driver->major,driver->minor_start賦值的,后面創建節點就是用這兩個主從設備號。接着list_add把 這個tty_driver添加到鏈表中來,方便后續查找。
接着if語句判斷TTY_DRIVER_DYNAMIC_DEV標志,我們前面有賦值:
normal->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
所以這里的if條件不成立的,最后創建proc下的節點,就返回了。按我的理解,tty_register_driver是注冊了一個tty的驅 動,這個驅動有了邏輯能力,但是這個時候這個驅動還沒有對應任何設備,所以后續還要添加對應的端口(也就是芯片的物理串口),並創建/dev/下的設備節 點,上層用tty_driver驅動的邏輯來操作對應的端口。
回到sw_uart.c中,繼續__init函數,platform_device_register函數,如果sys配置文件中那個串口配置了1,才會注冊相應的平台設備;
接着platform_driver_register,看它的probe函數了:
static int __devinit sw_uart_probe(struct platform_device *pdev)
{
u32 id = pdev->id;
struct uart_port *port;
struct sw_uart_port *sw_uport;
struct clk *apbclk;
int ret = -1;
if (unlikely(pdev->id < 0 || pdev->id >= SW_UART_NR))
return -ENXIO;
port = &sw_uart_port[id].port;
port->dev = &pdev->dev;
sw_uport = UART_TO_SPORT(port);
sw_uport->id = id;
sw_uport->ier = 0;
sw_uport->lcr = 0;
sw_uport->mcr = 0;
sw_uport->fcr = 0;
sw_uport->dll = 0;
sw_uport->dlh = 0;
/* request system resource and init them */
ret = sw_uart_request_resource(sw_uport);
if (unlikely(ret)) {
SERIAL_MSG("uart%d error to get resource\n", id);
return -ENXIO;
}
apbclk = clk_get(&pdev->dev, CLK_SYS_APB1);
if (IS_ERR(apbclk)) {
SERIAL_MSG("uart%d error to get source clock\n", id);
return -ENXIO;
}
ret = clk_set_parent(sw_uport->mclk, apbclk);
if (ret) {
SERIAL_MSG("uart%d set mclk parent error\n", id);
clk_put(apbclk);
return -ENXIO;
}
port->uartclk = clk_get_rate(apbclk);
clk_put(apbclk);
port->type = PORT_SW;
port->flags = UPF_BOOT_AUTOCONF;
port->mapbase = sw_uport->pdata->base;
port->irq = sw_uport->pdata->irq;
platform_set_drvdata(pdev, port);
#ifdef CONFIG_PROC_FS
sw_uart_procfs_attach(sw_uport);
#endif
SERIAL_DBG("add uart%d port, port_type %d, uartclk %d\n",
id, port->type, port->uartclk);
return uart_add_one_port(&sw_uart_driver, port);
}
sw_uart_request_resource是申請配置GPIO,接着uart_add_one_port:
int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
{
struct uart_state *state;
struct tty_port *port;
int ret = 0;
struct device *tty_dev;
BUG_ON(in_interrupt());
if (uport->line >= drv->nr)
return -EINVAL;
state = drv->state + uport->line;//state是在函數 uart_register_driver中kmalloc初始化
port = &state->port;
mutex_lock(&port_mutex);
mutex_lock(&port->mutex);
if (state->uart_port) {
ret = -EINVAL;
goto out;
}
state->uart_port = uport;
state->pm_state = -1;
uport->cons = drv->cons;
uport->state = state;
/*
* If this port is a console, then the spinlock is already
* initialised.
*/
if (!(uart_console(uport) && (uport->cons->flags & CON_ENABLED))) {
spin_lock_init(&uport->lock);
lockdep_set_class(&uport->lock, &port_lock_key);
}
uart_configure_port(drv, state, uport);
/*
* Register the port whether it's detected or not. This allows
* setserial to be used to alter this ports parameters.
*/
tty_dev = tty_register_device(drv->tty_driver, uport->line, uport->dev);
if (likely(!IS_ERR(tty_dev))) {
device_set_wakeup_capable(tty_dev, 1);
} else {
printk(KERN_ERR "Cannot register tty device on line %d\n",
uport->line);
}
/*
* Ensure UPF_DEAD is not set.
*/
uport->flags &= ~UPF_DEAD;
out:
mutex_unlock(&port->mutex);
mutex_unlock(&port_mutex);
return ret;
}
這個函數看名字就猜到是為uart_driver添加端口,前面說過state的狀態還沒有和uart_port對應起來,那么這里 state->uart_port = uport就對應了,port的配置就不去關心它了,這樣,uart_driver就可以通過port對ops結構體的操作函數控制底層了,最后調用 tty_register_device:
struct device *tty_register_device(struct tty_driver *driver, unsigned index,
struct device *device)
{
char name[64];
dev_t dev = MKDEV(driver->major, driver->minor_start) + index;
if (index >= driver->num) {
printk(KERN_ERR "Attempt to register invalid tty line number "
" (%d).\n", index);
return ERR_PTR(-EINVAL);
}
if (driver->type == TTY_DRIVER_TYPE_PTY)
pty_line_name(driver, index, name);
else
tty_line_name(driver, index, name);
return device_create(tty_class, device, dev, NULL, name);
}
這里的tty_line_name函數定義如下:
static void tty_line_name(struct tty_driver *driver, int index, char *p)
{
sprintf(p, "%s%d", driver->name, index + driver->name_base);//名字在uart_register_driver中賦值
}
可以看到就是前面說的名字ttyS0~ttyS7。
這樣,如果上層open節點,會調用到前面的file_operations結構體函數tty_open,這是tty核心層的調用:
static int tty_open(struct inode *inode, struct file *filp)
{
struct tty_struct *tty;
int noctty, retval;
struct tty_driver *driver = NULL;
int index;
dev_t device = inode->i_rdev;
unsigned saved_flags = filp->f_flags;
nonseekable_open(inode, filp);
retry_open:
retval = tty_alloc_file(filp);
if (retval)
return -ENOMEM;
noctty = filp->f_flags & O_NOCTTY;
index = -1;
retval = 0;
mutex_lock(&tty_mutex);
tty_lock();
tty = tty_open_current_tty(device, filp);
...................................
if (tty->ops->open)
retval = tty->ops->open(tty, filp);
else
........................................
return retval;
schedule();
/*
* Need to reset f_op in case a hangup happened.
*/
tty_lock();
if (filp->f_op == &hung_up_tty_fops)
filp->f_op = &tty_fops;
tty_unlock();
goto retry_open;
.................................
可以看到,首先查找tty_driver鏈表,找到前面添加的tty_driver,然后調用他的ops->open函數,這個ops賦值是在前面的uart_register_driver函數中:
tty_set_operations(normal, &uart_ops);
所以進入uart_ops結構體的open函數,這里就是從tty核心轉到serial核心,往下走了一層:
static int uart_open(struct tty_struct *tty, struct file *filp)
{
struct uart_driver *drv = (struct uart_driver *)tty->driver->driver_state;
int retval, line = tty->index;
struct uart_state *state = drv->state + line;
struct tty_port *port = &state->port;
................................
/*
* Start up the serial port.
*/
retval = uart_startup(tty, state, 0);
.....................................
uart_startup:
static int uart_startup(struct tty_struct *tty, struct uart_state *state,
int init_hw)
{
struct tty_port *port = &state->port;
int retval;
if (port->flags & ASYNC_INITIALIZED)
return 0;
/*
* Set the TTY IO error marker - we will only clear this
* once we have successfully opened the port.
*/
set_bit(TTY_IO_ERROR, &tty->flags);
retval = uart_port_startup(tty, state, init_hw);
if (!retval) {
set_bit(ASYNCB_INITIALIZED, &port->flags);
clear_bit(TTY_IO_ERROR, &tty->flags);
} else if (retval > 0)
retval = 0;
return retval;
}
uart_port_startup:
static int uart_port_startup(struct tty_struct *tty, struct uart_state *state,
int init_hw)
{
struct uart_port *uport = state->uart_port;
struct tty_port *port = &state->port;
unsigned long page;
int retval = 0;
retval = uport->ops->startup(uport);
if (retval == 0) {
if (uart_console(uport) && uport->cons->cflag) {
tty->termios->c_cflag = uport->cons->cflag;
uport->cons->cflag = 0;
}
/*
* Initialise the hardware port settings.
*/
uart_change_speed(tty, state, NULL);
if (init_hw) {
/*
* Setup the RTS and DTR signals once the
* port is open and ready to respond.
*/
if (tty->termios->c_cflag & CBAUD)
uart_set_mctrl(uport, TIOCM_RTS | TIOCM_DTR);
}
if (port->flags & ASYNC_CTS_FLOW) {
spin_lock_irq(&uport->lock);
if (!(uport->ops->get_mctrl(uport) & TIOCM_CTS))
tty->hw_stopped = 1;
spin_unlock_irq(&uport->lock);
}
}
可以看到,最終調用了 uport->ops->startup,這樣就從serical核心層往下走到了平台的串口驅動層,也就是最底層的驅動了,這個函數定義在sw_uart.c的sw_uart_port結構體中:
static struct sw_uart_port sw_uart_port[] = {
{ .port = { .iotype = UPIO_MEM, .ops = &sw_uart_ops, .fifosize = 64, .line = 0, },
.pdata = &sw_uport_pdata[0], },
{ .port = { .iotype = UPIO_MEM, .ops = &sw_uart_ops, .fifosize = 64, .line = 1, },
.pdata = &sw_uport_pdata[1], },
{ .port = { .iotype = UPIO_MEM, .ops = &sw_uart_ops, .fifosize = 64, .line = 2, },
.pdata = &sw_uport_pdata[2], },
{ .port = { .iotype = UPIO_MEM, .ops = &sw_uart_ops, .fifosize = 64, .line = 3, },
.pdata = &sw_uport_pdata[3], },
{ .port = { .iotype = UPIO_MEM, .ops = &sw_uart_ops, .fifosize = 64, .line = 4, },
.pdata = &sw_uport_pdata[4], },
{ .port = { .iotype = UPIO_MEM, .ops = &sw_uart_ops, .fifosize = 64, .line = 5, },
.pdata = &sw_uport_pdata[5], },
{ .port = { .iotype = UPIO_MEM, .ops = &sw_uart_ops, .fifosize = 64, .line = 6, },
.pdata = &sw_uport_pdata[6], },
{ .port = { .iotype = UPIO_MEM, .ops = &sw_uart_ops, .fifosize = 64, .line = 7, },
.pdata = &sw_uport_pdata[7], },
};
看他的.startup函數:
static int sw_uart_startup(struct uart_port *port)
{
struct sw_uart_port *sw_uport = UART_TO_SPORT(port);
int ret;
SERIAL_DBG("start up ...\n");
snprintf(sw_uport->name, sizeof(sw_uport->name),
"sw_serial%d", port->line);
ret = request_irq(port->irq, sw_uart_irq, 0, sw_uport->name, port);
if (unlikely(ret)) {
SERIAL_MSG("uart%d cannot get irq %d\n", sw_uport->id, port->irq);
return ret;
}
sw_uport->msr_saved_flags = 0;
/*
* PTIME mode to select the THRE trigger condition:
* if PTIME=1(IER[7]), the THRE interrupt will be generated when the
* the water level of the TX FIFO is lower than the threshold of the
* TX FIFO. and if PTIME=0, the THRE interrupt will be generated when
* the TX FIFO is empty.
* In addition, when PTIME=1, the THRE bit of the LSR register will not
* be set when the THRE interrupt is generated. You must check the
* interrupt id of the IIR register to decide whether some data need to
* send.
*/
sw_uport->ier = SW_UART_IER_RLSI | SW_UART_IER_RDI;
#ifdef CONFIG_SW_UART_PTIME_MODE
sw_uport->ier |= SW_UART_IER_PTIME;
#endif
return 0;
}
這里最終就是初始化ARM芯片的寄存器操作了,可以看到申請了中斷函數,后續的讀寫操作就和中斷服務函數密切相關了。
接着open之后,看看上層的write函數調用過程,首先調用了tty核心層的write:
static ssize_t tty_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct inode *inode = file->f_path.dentry->d_inode;
struct tty_struct *tty = file_tty(file);
struct tty_ldisc *ld;
ssize_t ret;
if (tty_paranoia_check(tty, inode, "tty_write"))
return -EIO;
if (!tty || !tty->ops->write ||
(test_bit(TTY_IO_ERROR, &tty->flags)))
return -EIO;
/* Short term debug to catch buggy drivers */
if (tty->ops->write_room == NULL)
printk(KERN_ERR "tty driver %s lacks a write_room method.\n",
tty->driver->name);
ld = tty_ldisc_ref_wait(tty);
if (!ld->ops->write)
ret = -EIO;
else
ret = do_tty_write(ld->ops->write, tty, file, buf, count);
tty_ldisc_deref(ld);
return ret;
}
看這里do_tty_write函數:
static inline ssize_t do_tty_write(
ssize_t (*write)(struct tty_struct *, struct file *, const unsigned char *, size_t),
struct tty_struct *tty,
struct file *file,
const char __user *buf,
size_t count)
{
............................
for (;;) {
size_t size = count;
if (size > chunk)
size = chunk;
ret = -EFAULT;
if (copy_from_user(tty->write_buf, buf, size))
break;
ret = write(tty, file, tty->write_buf, size);
if (ret <= 0)
break;
...............................
copy_from_user就把要寫的數據拷貝到了內核空間的write_buf中來,接着write是函數指針,指向ld->ops->write。
這里的ld->ops指向的是n_tty.c 中結構體:
struct tty_ldisc_ops tty_ldisc_N_TTY = {
.magic = TTY_LDISC_MAGIC,
.name = "n_tty",
.open = n_tty_open,
.close = n_tty_close,
.flush_buffer = n_tty_flush_buffer,
.chars_in_buffer = n_tty_chars_in_buffer,
.read = n_tty_read,
.write = n_tty_write,
.ioctl = n_tty_ioctl,
.set_termios = n_tty_set_termios,
.poll = n_tty_poll,
.receive_buf = n_tty_receive_buf,
.write_wakeup = n_tty_write_wakeup
};
所以調用了調用的是線路規程的write:
static ssize_t n_tty_write(struct tty_struct *tty, struct file *file,
const unsigned char *buf, size_t nr)
{
..........................
b++; nr--;
}
if (tty->ops->flush_chars)
tty->ops->flush_chars(tty);
} else {
while (nr > 0) {
c = tty->ops->write(tty, b, nr);
if (c < 0) {
retval = c;
goto break_out;
}
if (!c)
break;
b += c;
nr -= c;
}
}
............................
從線路規程轉到tty驅動層的write:
static int uart_write(struct tty_struct *tty,
const unsigned char *buf, int count)
{
......................
if (!circ->buf)
return 0;
spin_lock_irqsave(&port->lock, flags);
while (1) {
c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE);
if (count < c)
c = count;
if (c <= 0)
break;
memcpy(circ->buf + circ->head, buf, c);
circ->head = (circ->head + c) & (UART_XMIT_SIZE - 1);
buf += c;
count -= c;
ret += c;
}
spin_unlock_irqrestore(&port->lock, flags);
uart_start(tty);
return ret;
}
可以看到,把數據memcpy到環形隊列中來,這樣數據就保存到了該端口對應的state的xmit的buf中,這是一個環形隊列。接着調用uart_start:
static void uart_start(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
struct uart_port *port = state->uart_port;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
__uart_start(tty);
spin_unlock_irqrestore(&port->lock, flags);
}
看來要開始傳輸數據了,所以對這個操作加鎖了:
static void __uart_start(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
struct uart_port *port = state->uart_port;
if (port->ops->wake_peer)
port->ops->wake_peer(port);
if (!uart_circ_empty(&state->xmit) && state->xmit.buf &&
!tty->stopped && !tty->hw_stopped)
port->ops->start_tx(port);
}
最終調用驅動層的操作函數,也就是該端口對應的傳輸函數來觸發數據發送:
static void sw_uart_start_tx(struct uart_port *port)
{
struct sw_uart_port *sw_uport = UART_TO_SPORT(port);
if (!(sw_uport->ier & SW_UART_IER_THRI)) {
sw_uport->ier |= SW_UART_IER_THRI;
SERIAL_DBG("start tx, ier %x\n", sw_uport->ier);
serial_out(port, sw_uport->ier, SW_UART_IER);
}
}
serial_out函數對應的就是最底層的寄存器操作了,這里#define SW_UART_IER (0x04),SW_UART_IER_THRI它對應的是使能中斷,具體要參考全志的A20 CPU手冊:
static inline void serial_out(struct uart_port *port, unsigned char value, int offs)
{
__raw_writeb(value, port->membase + offs);
}
把配置寫到了寄存器中,使能中斷后,中斷服務函數就自動把buf的數據發送出去了,前面分析看到數據是memcpy到了該端口對應的state的circ_buf結構體中的,所以進入前面open時候分析的中斷服務函數中去:
static irqreturn_t sw_uart_irq(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
struct sw_uart_port *sw_uport = UART_TO_SPORT(port);
unsigned int iir = 0, lsr = 0;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
iir = serial_in(port, SW_UART_IIR) & SW_UART_IIR_IID_MASK;
lsr = serial_in(port, SW_UART_LSR);
SERIAL_DBG("irq: iir %x lsr %x\n", iir, lsr);
if (iir == SW_UART_IIR_IID_BUSBSY) {
/* handle busy */
// SERIAL_MSG("uart%d busy...\n", sw_uport->id);
serial_in(port, SW_UART_USR);
#ifdef CONFIG_SW_UART_FORCE_LCR
sw_uart_force_lcr(sw_uport, 10);
#else
serial_out(port, sw_uport->lcr, SW_UART_LCR);
#endif
} else {
if (lsr & (SW_UART_LSR_DR | SW_UART_LSR_BI))
lsr = sw_uart_handle_rx(sw_uport, lsr);
sw_uart_modem_status(sw_uport);
#ifdef CONFIG_SW_UART_PTIME_MODE
if (iir == SW_UART_IIR_IID_THREMP)
#else
if (lsr & SW_UART_LSR_THRE)
#endif
sw_uart_handle_tx(sw_uport);
}
spin_unlock_irqrestore(&port->lock, flags);
return IRQ_HANDLED;
}
我們要做的是發送操作,所以進入sw_uart_handle_tx:
static void sw_uart_handle_tx(struct sw_uart_port *sw_uport)
{
struct circ_buf *xmit = &sw_uport->port.state->xmit;
int count;
if (sw_uport->port.x_char) {
serial_out(&sw_uport->port, sw_uport->port.x_char, SW_UART_THR);
sw_uport->port.icount.tx++;
sw_uport->port.x_char = 0;
#ifdef CONFIG_SW_UART_DUMP_DATA
sw_uport->dump_buff[sw_uport->dump_len++] = sw_uport->port.x_char;
SERIAL_DUMP(sw_uport, "Tx");
#endif
return;
}
if (uart_circ_empty(xmit) || uart_tx_stopped(&sw_uport->port)) {
sw_uart_stop_tx(&sw_uport->port);
return;
}
count = sw_uport->port.fifosize / 2;
do {
#ifdef CONFIG_SW_UART_DUMP_DATA
sw_uport->dump_buff[sw_uport->dump_len++] = xmit->buf[xmit->tail];
#endif
serial_out(&sw_uport->port, xmit->buf[xmit->tail], SW_UART_THR);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
sw_uport->port.icount.tx++;
if (uart_circ_empty(xmit)) {
break;
}
} while (--count > 0);
SERIAL_DUMP(sw_uport, "Tx");
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) {
spin_unlock(&sw_uport->port.lock);
uart_write_wakeup(&sw_uport->port);
spin_lock(&sw_uport->port.lock);
}
..........................
看到了,serial_out果然是取出circ_buf的buf數據,在do{}while語句中完成發送:
static inline void serial_out(struct uart_port *port, unsigned char value, int offs)
{
__raw_writeb(value, port->membase + offs);
}
這樣就把發送的數據寫到了相應的寄存器中,硬件會自動完成數據發送操作
而上層read操作時候調用和write差不多,不同之處在於read是讀取一個環形buf的數據,因為數據到了會產生中斷,是中斷服務函數自動接收數據並把它存儲在buf中的;而前面寫的時候是主動把數據寫到buf去,所以先從中斷服務函數中看是如何接收輸入的:
static unsigned int sw_uart_handle_rx(struct sw_uart_port *sw_uport, unsigned int lsr)
{
struct tty_struct *tty = sw_uport->port.state->port.tty;
unsigned char ch = 0;
int max_count = 256;
char flag;
do {
if (likely(lsr & SW_UART_LSR_DR)) {
ch = serial_in(&sw_uport->port, SW_UART_RBR);
..........................
if (uart_handle_sysrq_char(&sw_uport->port, ch))
goto ignore_char;
uart_insert_char(&sw_uport->port, lsr, SW_UART_LSR_OE, ch, flag);
............................
從寄存器讀取字符后賦值給ch, 接着uart_insert_char來處理該字符,其實就是把這個數據放到uart層去:
void uart_insert_char(struct uart_port *port, unsigned int status,
unsigned int overrun, unsigned int ch, unsigned int flag)
{
struct tty_struct *tty = port->state->port.tty;
if ((status & port->ignore_status_mask & ~overrun) == 0)
tty_insert_flip_char(tty, ch, flag);
/*
* Overrun is special. Since it's reported immediately,
* it doesn't affect the current character.
*/
if (status & ~port->ignore_status_mask & overrun)
tty_insert_flip_char(tty, 0, TTY_OVERRUN);
}
static inline int tty_insert_flip_char(struct tty_struct *tty,
unsigned char ch, char flag)
{
struct tty_buffer *tb = tty->buf.tail;
if (tb && tb->used < tb->size) {
tb->flag_buf_ptr[tb->used] = flag;
tb->char_buf_ptr[tb->used++] = ch;
return 1;
}
return tty_insert_flip_string_flags(tty, &ch, &flag, 1);
}
當前的tty_buffer空間不夠時調用tty_insert_flip_string_flags,在這個函數里會去查找下一個tty_buffer,並將數據放到下一個tty_buffer的char_buf_ptr里。
這里char_buf_ptr的數據是如何放到線路規程的read_buf中的呢?那是在tty open操作的時候,tty_init_dev -> initialize_tty_struct -> initialize_tty_struct -> tty_buffer_init:
void tty_buffer_init(struct tty_struct *tty)
{
spin_lock_init(&tty->buf.lock);
tty->buf.head = NULL;
tty->buf.tail = NULL;
tty->buf.free = NULL;
tty->buf.memory_used = 0;
INIT_WORK(&tty->buf.work, flush_to_ldisc);
}
可以看到初始化了工作隊列的,而調用工作隊列的時機是在這里操作完成后,繼續sw_uart_handle_rx函數的tty_flip_buffer_push時候:
void tty_flip_buffer_push(struct tty_struct *tty)
{
unsigned long flags;
spin_lock_irqsave(&tty->buf.lock, flags);
if (tty->buf.tail != NULL)
tty->buf.tail->commit = tty->buf.tail->used;
spin_unlock_irqrestore(&tty->buf.lock, flags);
if (tty->low_latency)
flush_to_ldisc(&tty->buf.work);
else
schedule_work(&tty->buf.work);
}
EXPORT_SYMBOL(tty_flip_buffer_push);
這里就有兩種方法把數據上報給鏈路規程層,其實都差不多,這樣數據就上報到了鏈路規程中,看看這個工作隊列函數:
static void flush_to_ldisc(struct work_struct *work)
{
..........................
count = tty->receive_room;
char_buf = head->char_buf_ptr + head->read;
flag_buf = head->flag_buf_ptr + head->read;
head->read += count;
spin_unlock_irqrestore(&tty->buf.lock, flags);
disc->ops->receive_buf(tty, char_buf,
flag_buf, count);
............................
鏈路規程的receive_buf函數:
static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
char *fp, int count)
{
const unsigned char *p;
char *f, flags = TTY_NORMAL;
int i;
char buf[64];
unsigned long cpuflags;
if (!tty->read_buf)
return;
................................
memcpy(tty->read_buf + tty->read_head, cp, i);
tty->read_head = (tty->read_head + i) & (N_TTY_BUF_SIZE-1);
tty->read_cnt += i;
...........................
}
if (tty->ops->flush_chars)
tty->ops->flush_chars(tty);
}
n_tty_set_room(tty);
可以看到,很明顯,memcpy將數據拷貝到了read_buf中。
現在,再回頭從上層看read函數是如何讀取數據的,流程也是tty核心 ->鏈路規程 :
static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
unsigned char __user *buf, size_t nr)
{
unsigned char __user *b = buf;
.......................................
c = tty->read_buf[tty->read_tail];
...............................
uncopied = copy_from_read_buf(tty, &b, &nr);
uncopied += copy_from_read_buf(tty, &b, &nr);
..............................
static int copy_from_read_buf(struct tty_struct *tty,
unsigned char __user **b,
size_t *nr)
{
int retval;
size_t n;
unsigned long flags;
retval = 0;
spin_lock_irqsave(&tty->read_lock, flags);
n = min(tty->read_cnt, N_TTY_BUF_SIZE - tty->read_tail);
n = min(*nr, n);
spin_unlock_irqrestore(&tty->read_lock, flags);
if (n) {
retval = copy_to_user(*b, &tty->read_buf[tty->read_tail], n);
n -= retval;
tty_audit_add_data(tty, &tty->read_buf[tty->read_tail], n);
spin_lock_irqsave(&tty->read_lock, flags);
tty->read_tail = (tty->read_tail + n) & (N_TTY_BUF_SIZE-1);
tty->read_cnt -= n;
/* Turn single EOF into zero-length read */
if (L_EXTPROC(tty) && tty->icanon && n == 1) {
if (!tty->read_cnt && (*b)[n-1] == EOF_CHAR(tty))
n--;
}
spin_unlock_irqrestore(&tty->read_lock, flags);
*b += n;
*nr -= n;
}
return retval;
}
看到了copy_to_user函數,就是把read_buf的數據拷貝到了用戶空間。
到這里,串口的讀寫流程就很清楚,一目了然了
