1 简介
ppp协议(点到点协议),在拨号网络中应用比较广泛,逐渐在替代slip协议。
ppp数据包格式为:| 协议码 | 载荷 |填充符
ppp主要有四类协议码:
1 0x0001 - 0x3fff 网络层协议(ipv4,ipv6,ipx,appletalk)
2 0x4001 - 0x7fff 无网络层协议参与的小载荷量传输(低整流量)
3 0x8001 - 0xbfff 用于配置网络层的子协议(网络控制协议,如ipcp)
4 0xc001 - 0xffff 用于建立ppp连接的子协议(链路层控制协议,如lcp,pap,chap)
2 ppp协议实现的结构
网络应用程序
ip/tcp协议栈
ppp网络协议 -|
|—— ppp内核实现
ppp连线规程 -|
调制解调器
pppd 是应用进程,通过子符设备和ppp内核通信。
3 ppp收发数据过程
a 发送数据
ppp 协议发送数据有两种途径:一种是网络协议栈发送 ,另一种是pppd直接发送控制协议数据
b 接收数据
ppp连线规程解收到数据后,根据数据类型:如果是协商协议数据,则放到子符设备队列等待pppd读取;如果是网络层数据,则调用netif_rx入网络栈。
4 代码分析
a 初始化ppp设备,ppp字符主设备号为108
static struct file_operations ppp_device_fops = {
.owner = THIS_MODULE,
.read = ppp_read,
.write = ppp_write,
.poll = ppp_poll,
.ioctl = ppp_ioctl,
.open = ppp_open,
.release = ppp_release
};
#define PPP_MAJOR 108
static int __init ppp_init(void)
{
int err;
/* 注册字符设备,设备名称是ppp */
err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops);
if (!err) {
/* 创建udev */
ppp_class = class_create(THIS_MODULE, "ppp");
if (IS_ERR(ppp_class)) {
err = PTR_ERR(ppp_class);
goto out_chrdev;
}
class_device_create(ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL, "ppp");
}
out:
if (err)
printk(KERN_ERR "failed to register PPP device (%d)\n", err);
return err;
out_chrdev:
unregister_chrdev(PPP_MAJOR, "ppp");
goto out;
}
class_create用途是做什么的?
从linux内核2.6的某个版本之后,devfs不复存在,udev成为devfs的替代,udev是应用层的东东
加入对udev的支持很简单,在驱动初始化的代码里调用class_create为该设备创建一个class,再为每个设备调用class_device_create创建对应的设备。大致用法如下:
struct class *myclass = class_create(THIS_MODULE, "my_device_driver");
class_device_create(myclass, NULL, MKDEV(major_num, 0), NULL, "my_device");
这样的module被加载时,udev daemon就会自动在/dev下创建my_device设备文件
/* 字符设备ppp的file结构:
* ---------------------
* | .... |
*----------------------
* | f_op |
*----------------------
* | .... |
*----------------------
* | private_data |----------> struct ppp_file
*----------------------
struct ppp_file {
enum {
INTERFACE=1, CHANNEL
} kind;
struct sk_buff_head xq; /* pppd transmit queue */
struct sk_buff_head rq; /* receive queue for pppd */
wait_queue_head_t rwait; /* for poll on reading /dev/ppp */
atomic_t refcnt; /* # refs (incl /dev/ppp attached) */
int hdrlen; /* space to leave for headers */
int index; /* interface unit / channel number */
int dead; /* unit/channel has been shut down */
};
struct ppp {
struct ppp_file file; /* stuff for read/write/poll 0 */
struct file *owner; /* file that owns this unit 48 */
...
};
struct channel {
struct ppp_file file; /* stuff for read/write/poll */
struct list_head list; /* link in all/new_channels list */
struct ppp_channel *chan; /* public channel data structure */
struct rw_semaphore chan_sem; /* protects `chan' during chan ioctl */
spinlock_t downl; /* protects `chan', file.xq dequeue */
struct ppp *ppp; /* ppp unit we're connected to */
struct list_head clist; /* link in list of channels per unit */
rwlock_t upl; /* protects `ppp' */
...
};
struct ppp_channel {
void *private; /* channel private data */
struct ppp_channel_ops *ops; /* operations for this channel */
int mtu; /* max transmit packet size */
int hdrlen; /* amount of headroom channel needs */
void *ppp; /* opaque to channel */
/* the following are not used at present */
int speed; /* transfer rate (bytes/second) */
int latency; /* overhead time in milliseconds */
};
static int ppp_open(struct net_device *dev)
{
//分配hdlc设备对象
hdlc_device *hdlc = dev_to_hdlc(dev);
void *old_ioctl;
int result;
dev->priv = &hdlc->state.ppp.syncppp_ptr;
hdlc->state.ppp.syncppp_ptr = &hdlc->state.ppp.pppdev;
hdlc->state.ppp.pppdev.dev = dev;
old_ioctl = dev->do_ioctl;
hdlc->state.ppp.old_change_mtu = dev->change_mtu;
sppp_attach(&hdlc->state.ppp.pppdev);
/* sppp_attach nukes them. We don't need syncppp's ioctl */
dev->do_ioctl = old_ioctl;
hdlc->state.ppp.pppdev.sppp.pp_flags &= ~PP_CISCO;
dev->type = ARPHRD_PPP;
result = sppp_open(dev);
if (result) {
sppp_detach(dev);
return result;
}
return 0;
}
static ssize_t ppp_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct ppp_file *pf = file->private_data;
//如果没有数据,则在该队列上等待
DECLARE_WAITQUEUE(wait, current);
ssize_t ret;
struct sk_buff *skb = NULL;
ret = count;
if (pf == 0)
return -ENXIO;
add_wait_queue(&pf->rwait, &wait);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
skb = skb_dequeue(&pf->rq);//获取数据
if (skb)
break;
//队列中没有数据
ret = 0;
if (pf->dead)
break;
if (pf->kind == INTERFACE) { //网络设备
/*
* Return 0 (EOF) on an interface that has no
* channels connected, unless it is looping
* network traffic (demand mode).
*/
struct ppp *ppp = PF_TO_PPP(pf);
if (ppp->n_channels == 0
&& (ppp->flags & SC_LOOP_TRAFFIC) == 0)
break;
}
ret = -EAGAIN;
if (file->f_flags & O_NONBLOCK)
break;
ret = -ERESTARTSYS;
//设置pending ,挂起当前进程
if (signal_pending(current))
break;
//重新调度入栈
schedule();
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&pf->rwait, &wait);
if (skb == 0)
goto out;
ret = -EOVERFLOW;
if (skb->len > count)
goto outf;
ret = -EFAULT;
//copy数据到用户空间
if (copy_to_user(buf, skb->data, skb->len))
goto outf;
ret = skb->len;
outf:
kfree_skb(skb);
out:
return ret;
}
//发送数据
static ssize_t ppp_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct ppp_file *pf = file->private_data;
struct sk_buff *skb;
ssize_t ret;
if (pf == 0)
return -ENXIO;
ret = -ENOMEM;
//分配skb
skb = alloc_skb(count + pf->hdrlen, GFP_KERNEL);
if (skb == 0)
goto out;
skb_reserve(skb, pf->hdrlen);
ret = -EFAULT;
//从用户空间copy数据
if (copy_from_user(skb_put(skb, count), buf, count)) {
kfree_skb(skb);
goto out;
}
skb_queue_tail(&pf->xq, skb);
switch (pf->kind) {
case INTERFACE:
ppp_xmit_process(PF_TO_PPP(pf)); //通过网络接口发送
break;
case CHANNEL:
ppp_channel_push(PF_TO_CHANNEL(pf));//通过规程接口发送
break;
}
ret = count;
out:
return ret;
}
阅读(768) | 评论(0) | 转发(0) |